You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@bigtop.apache.org by rv...@apache.org on 2017/03/22 06:09:52 UTC

[01/52] bigtop git commit: Working around ODPI-186

Repository: bigtop
Updated Branches:
  refs/heads/master 0aeea9710 -> 0d3448b81


http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/yarn.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/yarn.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/yarn.py
new file mode 100755
index 0000000..f5acb11
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/yarn.py
@@ -0,0 +1,499 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+# Python Imports
+import os
+
+# Ambari Common and Resource Management Imports
+from resource_management.libraries.script.script import Script
+from resource_management.core.resources.service import ServiceConfig
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.core.resources.system import Directory
+from resource_management.core.resources.system import File
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.core.source import InlineTemplate
+from resource_management.core.source import Template
+from resource_management.core.logger import Logger
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+from resource_management.libraries.functions.mounted_dirs_helper import handle_mounted_dirs
+
+# Local Imports
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def yarn(name = None):
+  import params
+  XmlConfig("mapred-site.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['mapred-site'],
+            owner=params.yarn_user,
+            mode='f'
+  )
+  XmlConfig("yarn-site.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['yarn-site'],
+            owner=params.yarn_user,
+            mode='f',
+            configuration_attributes=params.config['configuration_attributes']['yarn-site']
+  )
+  XmlConfig("capacity-scheduler.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['capacity-scheduler'],
+            owner=params.yarn_user,
+            mode='f'
+  )
+
+  if params.service_map.has_key(name):
+    service_name = params.service_map[name]
+
+    ServiceConfig(service_name,
+                  action="change_user",
+                  username = params.yarn_user,
+                  password = Script.get_password(params.yarn_user))
+
+def create_log_dir(dir_name):
+  import params
+  Directory(dir_name,
+            create_parents = True,
+            cd_access="a",
+            mode=0775,
+            owner=params.yarn_user,
+            group=params.user_group,
+            ignore_failures=True,
+  )
+  
+def create_local_dir(dir_name):
+  import params
+  Directory(dir_name,
+            create_parents = True,
+            cd_access="a",
+            mode=0755,
+            owner=params.yarn_user,
+            group=params.user_group,
+            ignore_failures=True,
+            recursive_mode_flags = {'f': 'a+rw', 'd': 'a+rwx'},
+  )
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def yarn(name=None, config_dir=None):
+  """
+  :param name: Component name, apptimelineserver, nodemanager, resourcemanager, or None (defaults for client)
+  :param config_dir: Which config directory to write configs to, which could be different during rolling upgrade.
+  """
+  import params
+
+  if config_dir is None:
+    config_dir = params.hadoop_conf_dir
+
+  if name == "historyserver":
+    if params.yarn_log_aggregation_enabled:
+      params.HdfsResource(params.yarn_nm_app_log_dir,
+                           action="create_on_execute",
+                           type="directory",
+                           owner=params.yarn_user,
+                           group=params.user_group,
+                           mode=0777,
+                           recursive_chmod=True
+      )
+
+    # create the /tmp folder with proper permissions if it doesn't exist yet
+    if params.entity_file_history_directory.startswith('/tmp'):
+        params.HdfsResource(params.hdfs_tmp_dir,
+                            action="create_on_execute",
+                            type="directory",
+                            owner=params.hdfs_user,
+                            mode=0777,
+        )
+
+    params.HdfsResource(params.entity_file_history_directory,
+                           action="create_on_execute",
+                           type="directory",
+                           owner=params.yarn_user,
+                           group=params.user_group
+    )
+    params.HdfsResource("/mapred",
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.mapred_user
+    )
+    params.HdfsResource("/mapred/system",
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hdfs_user
+    )
+    params.HdfsResource(params.mapreduce_jobhistory_done_dir,
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.mapred_user,
+                         group=params.user_group,
+                         change_permissions_for_parents=True,
+                         mode=0777
+    )
+    params.HdfsResource(None, action="execute")
+    Directory(params.jhs_leveldb_state_store_dir,
+              owner=params.mapred_user,
+              group=params.user_group,
+              create_parents = True,
+              cd_access="a",
+              recursive_ownership = True,
+              )
+
+  #<editor-fold desc="Node Manager Section">
+  if name == "nodemanager":
+
+    # First start after enabling/disabling security
+    if params.toggle_nm_security:
+      Directory(params.nm_local_dirs_list + params.nm_log_dirs_list,
+                action='delete'
+      )
+
+      # If yarn.nodemanager.recovery.dir exists, remove this dir
+      if params.yarn_nodemanager_recovery_dir:
+        Directory(InlineTemplate(params.yarn_nodemanager_recovery_dir).get_content(),
+                  action='delete'
+        )
+
+      # Setting NM marker file
+      if params.security_enabled:
+        Directory(params.nm_security_marker_dir)
+        File(params.nm_security_marker,
+             content="Marker file to track first start after enabling/disabling security. "
+                     "During first start yarn local, log dirs are removed and recreated"
+             )
+      elif not params.security_enabled:
+        File(params.nm_security_marker, action="delete")
+
+
+    if not params.security_enabled or params.toggle_nm_security:
+      # handle_mounted_dirs ensures that we don't create dirs which are temporary unavailable (unmounted), and intended to reside on a different mount.
+      nm_log_dir_to_mount_file_content = handle_mounted_dirs(create_log_dir, params.nm_log_dirs, params.nm_log_dir_to_mount_file, params)
+      # create a history file used by handle_mounted_dirs
+      File(params.nm_log_dir_to_mount_file,
+           owner=params.hdfs_user,
+           group=params.user_group,
+           mode=0644,
+           content=nm_log_dir_to_mount_file_content
+      )
+      nm_local_dir_to_mount_file_content = handle_mounted_dirs(create_local_dir, params.nm_local_dirs, params.nm_local_dir_to_mount_file, params)
+      File(params.nm_local_dir_to_mount_file,
+           owner=params.hdfs_user,
+           group=params.user_group,
+           mode=0644,
+           content=nm_local_dir_to_mount_file_content
+      )
+  #</editor-fold>
+
+  if params.yarn_nodemanager_recovery_dir:
+    Directory(InlineTemplate(params.yarn_nodemanager_recovery_dir).get_content(),
+              owner=params.yarn_user,
+              group=params.user_group,
+              create_parents = True,
+              mode=0755,
+              cd_access = 'a',
+    )
+
+  Directory([params.yarn_pid_dir_prefix, params.yarn_pid_dir, params.yarn_log_dir],
+            owner=params.yarn_user,
+            group=params.user_group,
+            create_parents = True,
+            cd_access = 'a',
+  )
+
+  Directory([params.mapred_pid_dir_prefix, params.mapred_pid_dir, params.mapred_log_dir_prefix, params.mapred_log_dir],
+            owner=params.mapred_user,
+            group=params.user_group,
+            create_parents = True,
+            cd_access = 'a',
+  )
+  Directory([params.yarn_log_dir_prefix],
+            owner=params.yarn_user,
+            group=params.user_group,
+            create_parents = True,
+            ignore_failures=True,
+            cd_access = 'a',
+  )
+
+  XmlConfig("core-site.xml",
+            conf_dir=config_dir,
+            configurations=params.config['configurations']['core-site'],
+            configuration_attributes=params.config['configuration_attributes']['core-site'],
+            owner=params.hdfs_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  # During RU, Core Masters and Slaves need hdfs-site.xml
+  # TODO, instead of specifying individual configs, which is susceptible to breaking when new configs are added,
+  # RU should rely on all available in <stack-root>/<version>/hadoop/conf
+  if 'hdfs-site' in params.config['configurations']:
+    XmlConfig("hdfs-site.xml",
+              conf_dir=config_dir,
+              configurations=params.config['configurations']['hdfs-site'],
+              configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+              owner=params.hdfs_user,
+              group=params.user_group,
+              mode=0644
+    )
+
+  XmlConfig("mapred-site.xml",
+            conf_dir=config_dir,
+            configurations=params.config['configurations']['mapred-site'],
+            configuration_attributes=params.config['configuration_attributes']['mapred-site'],
+            owner=params.yarn_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  XmlConfig("yarn-site.xml",
+            conf_dir=config_dir,
+            configurations=params.config['configurations']['yarn-site'],
+            configuration_attributes=params.config['configuration_attributes']['yarn-site'],
+            owner=params.yarn_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  XmlConfig("capacity-scheduler.xml",
+            conf_dir=config_dir,
+            configurations=params.config['configurations']['capacity-scheduler'],
+            configuration_attributes=params.config['configuration_attributes']['capacity-scheduler'],
+            owner=params.yarn_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  if name == 'resourcemanager':
+    Directory(params.rm_nodes_exclude_dir,
+         mode=0755,
+         create_parents=True,
+         cd_access='a',
+    )
+    File(params.rm_nodes_exclude_path,
+         owner=params.yarn_user,
+         group=params.user_group
+    )
+    File(params.yarn_job_summary_log,
+       owner=params.yarn_user,
+       group=params.user_group
+    )
+    if not is_empty(params.node_label_enable) and params.node_label_enable or is_empty(params.node_label_enable) and params.node_labels_dir:
+      params.HdfsResource(params.node_labels_dir,
+                           type="directory",
+                           action="create_on_execute",
+                           change_permissions_for_parents=True,
+                           owner=params.yarn_user,
+                           group=params.user_group,
+                           mode=0700
+      )
+      params.HdfsResource(None, action="execute")
+
+
+  elif name == 'apptimelineserver':
+    Directory(params.ats_leveldb_dir,
+       owner=params.yarn_user,
+       group=params.user_group,
+       create_parents = True,
+       cd_access="a",
+    )
+
+    # if stack support application timeline-service state store property (timeline_state_store stack feature)
+    if params.stack_supports_timeline_state_store:
+      Directory(params.ats_leveldb_state_store_dir,
+       owner=params.yarn_user,
+       group=params.user_group,
+       create_parents = True,
+       cd_access="a",
+      )
+    # app timeline server 1.5 directories
+    if not is_empty(params.entity_groupfs_store_dir):
+      parent_path = os.path.dirname(params.entity_groupfs_store_dir)
+      params.HdfsResource(parent_path,
+                          type="directory",
+                          action="create_on_execute",
+                          change_permissions_for_parents=True,
+                          owner=params.yarn_user,
+                          group=params.user_group,
+                          mode=0755
+                          )
+      params.HdfsResource(params.entity_groupfs_store_dir,
+                          type="directory",
+                          action="create_on_execute",
+                          owner=params.yarn_user,
+                          group=params.user_group,
+                          mode=params.entity_groupfs_store_dir_mode
+                          )
+    if not is_empty(params.entity_groupfs_active_dir):
+      parent_path = os.path.dirname(params.entity_groupfs_active_dir)
+      params.HdfsResource(parent_path,
+                          type="directory",
+                          action="create_on_execute",
+                          change_permissions_for_parents=True,
+                          owner=params.yarn_user,
+                          group=params.user_group,
+                          mode=0755
+                          )
+      params.HdfsResource(params.entity_groupfs_active_dir,
+                          type="directory",
+                          action="create_on_execute",
+                          owner=params.yarn_user,
+                          group=params.user_group,
+                          mode=params.entity_groupfs_active_dir_mode
+                          )
+    params.HdfsResource(None, action="execute")
+
+  File(format("{limits_conf_dir}/yarn.conf"),
+       mode=0644,
+       content=Template('yarn.conf.j2')
+  )
+
+  File(format("{limits_conf_dir}/mapreduce.conf"),
+       mode=0644,
+       content=Template('mapreduce.conf.j2')
+  )
+
+  File(os.path.join(config_dir, "yarn-env.sh"),
+       owner=params.yarn_user,
+       group=params.user_group,
+       mode=0755,
+       content=InlineTemplate(params.yarn_env_sh_template)
+  )
+
+  container_executor = format("{yarn_container_bin}/container-executor")
+  File(container_executor,
+      group=params.yarn_executor_container_group,
+      mode=params.container_executor_mode
+  )
+
+  File(os.path.join(config_dir, "container-executor.cfg"),
+      group=params.user_group,
+      mode=0644,
+      content=Template('container-executor.cfg.j2')
+  )
+
+  Directory(params.cgroups_dir,
+            group=params.user_group,
+            create_parents = True,
+            mode=0755,
+            cd_access="a")
+
+  if params.security_enabled:
+    tc_mode = 0644
+    tc_owner = "root"
+  else:
+    tc_mode = None
+    tc_owner = params.hdfs_user
+
+  File(os.path.join(config_dir, "mapred-env.sh"),
+       owner=tc_owner,
+       mode=0755,
+       content=InlineTemplate(params.mapred_env_sh_template)
+  )
+
+  if params.security_enabled:
+    File(os.path.join(params.hadoop_bin, "task-controller"),
+         owner="root",
+         group=params.mapred_tt_group,
+         mode=06050
+    )
+    File(os.path.join(config_dir, 'taskcontroller.cfg'),
+         owner = tc_owner,
+         mode = tc_mode,
+         group = params.mapred_tt_group,
+         content=Template("taskcontroller.cfg.j2")
+    )
+  else:
+    File(os.path.join(config_dir, 'taskcontroller.cfg'),
+         owner=tc_owner,
+         content=Template("taskcontroller.cfg.j2")
+    )
+
+  if "mapred-site" in params.config['configurations']:
+    XmlConfig("mapred-site.xml",
+              conf_dir=config_dir,
+              configurations=params.config['configurations']['mapred-site'],
+              configuration_attributes=params.config['configuration_attributes']['mapred-site'],
+              owner=params.mapred_user,
+              group=params.user_group
+    )
+
+  if "capacity-scheduler" in params.config['configurations']:
+    XmlConfig("capacity-scheduler.xml",
+              conf_dir=config_dir,
+              configurations=params.config['configurations'][
+                'capacity-scheduler'],
+              configuration_attributes=params.config['configuration_attributes']['capacity-scheduler'],
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+  if "ssl-client" in params.config['configurations']:
+    XmlConfig("ssl-client.xml",
+              conf_dir=config_dir,
+              configurations=params.config['configurations']['ssl-client'],
+              configuration_attributes=params.config['configuration_attributes']['ssl-client'],
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+
+    Directory(params.hadoop_conf_secure_dir,
+              create_parents = True,
+              owner='root',
+              group=params.user_group,
+              cd_access='a',
+              )
+
+    XmlConfig("ssl-client.xml",
+              conf_dir=params.hadoop_conf_secure_dir,
+              configurations=params.config['configurations']['ssl-client'],
+              configuration_attributes=params.config['configuration_attributes']['ssl-client'],
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+
+  if "ssl-server" in params.config['configurations']:
+    XmlConfig("ssl-server.xml",
+              conf_dir=config_dir,
+              configurations=params.config['configurations']['ssl-server'],
+              configuration_attributes=params.config['configuration_attributes']['ssl-server'],
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+  if os.path.exists(os.path.join(config_dir, 'fair-scheduler.xml')):
+    File(os.path.join(config_dir, 'fair-scheduler.xml'),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
+
+  if os.path.exists(
+    os.path.join(config_dir, 'ssl-client.xml.example')):
+    File(os.path.join(config_dir, 'ssl-client.xml.example'),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
+
+  if os.path.exists(
+    os.path.join(config_dir, 'ssl-server.xml.example')):
+    File(os.path.join(config_dir, 'ssl-server.xml.example'),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/yarn_client.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/yarn_client.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/yarn_client.py
new file mode 100755
index 0000000..4d65a40
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/yarn_client.py
@@ -0,0 +1,67 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from yarn import yarn
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+
+class YarnClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn()
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class YarnClientWindows(YarnClient):
+  pass
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class YarnClientDefault(YarnClient):
+  def get_component_name(self):
+    return "hadoop-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-client", params.version)
+
+
+if __name__ == "__main__":
+  YarnClient().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/container-executor.cfg.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/container-executor.cfg.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/container-executor.cfg.j2
new file mode 100755
index 0000000..c6f1ff6
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/container-executor.cfg.j2
@@ -0,0 +1,40 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+yarn.nodemanager.local-dirs={{nm_local_dirs}}
+yarn.nodemanager.log-dirs={{nm_log_dirs}}
+yarn.nodemanager.linux-container-executor.group={{yarn_executor_container_group}}
+banned.users=hdfs,yarn,mapred,bin
+min.user.id={{min_user_id}}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/exclude_hosts_list.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/exclude_hosts_list.j2
new file mode 100755
index 0000000..c7ce416
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/exclude_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in exclude_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/mapreduce.conf.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/mapreduce.conf.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/mapreduce.conf.j2
new file mode 100755
index 0000000..ae8e6d5
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/mapreduce.conf.j2
@@ -0,0 +1,35 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{mapred_user}}   - nofile {{mapred_user_nofile_limit}}
+{{mapred_user}}   - nproc  {{mapred_user_nproc_limit}}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/taskcontroller.cfg.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/taskcontroller.cfg.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/taskcontroller.cfg.j2
new file mode 100755
index 0000000..3d5f4f2
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/taskcontroller.cfg.j2
@@ -0,0 +1,38 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+mapred.local.dir={{mapred_local_dir}}
+mapreduce.tasktracker.group={{mapred_tt_group}}
+hadoop.log.dir={{hdfs_log_dir_prefix}}/{{mapred_user}}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/yarn.conf.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/yarn.conf.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/yarn.conf.j2
new file mode 100755
index 0000000..1063099
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/yarn.conf.j2
@@ -0,0 +1,35 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{yarn_user}}   - nofile {{yarn_user_nofile_limit}}
+{{yarn_user}}   - nproc  {{yarn_user_nproc_limit}}


[22/52] bigtop git commit: ODPI-193. Add Hive 1.2 to ODPi ambari reference implementation

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-site.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-site.xml
new file mode 100755
index 0000000..cddb624
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-site.xml
@@ -0,0 +1,2796 @@
+<configuration><property require-input="false">
+    <name>hive.default.fileformat.managed</name>
+    <value>TextFile</value>
+    <description>
+      Default file format for CREATE TABLE statement applied to managed tables only. 
+      External tables will be created with default file format. Leaving this null 
+      will result in using the default file format for all tables.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>datanucleus.rdbms.datastoreAdapterClassName</name>
+    <description>Datanucleus Class, This property used only when hive db is SQL Anywhere</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on>
+        <property>
+            <name>hive_database</name>
+            <type>hive-env</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>atlas.hook.hive.minThreads</name>
+    <value>1</value>
+    <description>
+      Minimum number of threads maintained by Atlas hook.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>atlas.hook.hive.maxThreads</name>
+    <value>1</value>
+    <description>
+      Maximum number of threads used by Atlas hook.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.cbo.enable</name>
+    <value>true</value>
+    <description>Flag to control enabling Cost Based Optimizations using Calcite framework.</description>
+    <display-name>Enable Cost Based Optimizer</display-name>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>value-list</type>
+        <entries>
+            <entry>
+                <value>true</value>
+                <label>On</label>
+            </entry>
+            <entry>
+                <value>false</value>
+                <label>Off</label>
+            </entry>
+        </entries>
+        <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by>
+        <dependedByProperties>
+            <name>hive.compute.query.using.stats</name>
+            <type>hive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.stats.fetch.partition.stats</name>
+            <type>hive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.stats.fetch.column.stats</name>
+            <type>hive-site</type>
+        </dependedByProperties>
+    </property_depended_by>
+</property><property require-input="false">
+    <name>hive.zookeeper.quorum</name>
+    <value>localhost:2181</value>
+    <description>List of ZooKeeper servers to talk to. This is needed for: 1.
+      Read/write locks - when hive.lock.manager is set to
+      org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager,
+      2. When HiveServer2 supports service discovery via Zookeeper.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>multiLine</type>
+        <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.metastore.connect.retries</name>
+    <value>24</value>
+    <description>Number of retries while opening a connection to metastore</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.metastore.failure.retries</name>
+    <value>24</value>
+    <description>Number of retries upon failure of Thrift metastore calls</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.metastore.client.connect.retry.delay</name>
+    <value>5s</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      Number of seconds for the client to wait between consecutive connection attempts
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.metastore.client.socket.timeout</name>
+    <value>1800s</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      MetaStore Client socket timeout in seconds
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.mapjoin.bucket.cache.size</name>
+    <value>10000</value>
+    <description></description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.security.authorization.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory</value>
+    <description>
+      The Hive client authorization manager class name. The user defined authorization class should implement
+      interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on>
+        <property>
+            <name>hive_security_authorization</name>
+            <type>hive-env</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.cluster.delegation.token.store.class</name>
+    <value>org.apache.hadoop.hive.thrift.ZooKeeperTokenStore</value>
+    <description>The delegation token store implementation.
+      Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for load-balanced cluster.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.cluster.delegation.token.store.zookeeper.connectString</name>
+    <value>localhost:2181</value>
+    <description>The ZooKeeper token store connect string.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.server2.support.dynamic.service.discovery</name>
+    <value>true</value>
+    <description>Whether HiveServer2 supports dynamic service discovery for its clients.
+      To support this, each instance of HiveServer2 currently uses ZooKeeper to register itself,
+      when it is brought up. JDBC/ODBC clients should use the ZooKeeper ensemble: hive.zookeeper.quorum
+      in their connection string.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>boolean</type>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.exec.scratchdir</name>
+    <value>/tmp/hive</value>
+    <description>HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/&lt;username&gt; is created, with ${hive.scratch.dir.permission}.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.exec.submitviachild</name>
+    <value>false</value>
+    <description></description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.exec.submit.local.task.via.child</name>
+    <value>true</value>
+    <description>
+      Determines whether local tasks (typically mapjoin hashtable generation phase) runs in
+      separate JVM (true recommended) or not.
+      Avoids the overhead of spawning new JVM, but can lead to out-of-memory issues.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.exec.compress.output</name>
+    <value>false</value>
+    <description>
+      This controls whether the final outputs of a query (to a local/HDFS file or a Hive table) is compressed.
+      The compression codec and other options are determined from Hadoop config variables mapred.output.compress*
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.exec.compress.intermediate</name>
+    <value>false</value>
+    <description>
+      This controls whether intermediate files produced by Hive between multiple map-reduce jobs are compressed.
+      The compression codec and other options are determined from Hadoop config variables mapred.output.compress*
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.exec.reducers.bytes.per.reducer</name>
+    <value>67108864</value>
+    <description>Defines the size per reducer. For example, if it is set to 64M, given 256M input size, 4 reducers will be used.</description>
+    <display-name>Data per Reducer</display-name>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>int</type>
+        <maximum>4294967296</maximum>
+        <minimum>64</minimum>
+        <unit>B</unit>
+        <increment-step></increment-step>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.exec.reducers.max</name>
+    <value>1009</value>
+    <description>
+      max number of reducers will be used. If the one specified in the configuration parameter mapred.reduce.tasks is
+      negative, Hive will use this one as the max number of reducers when automatically determine number of reducers.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.exec.pre.hooks</name>
+    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    <description>
+      Comma-separated list of pre-execution hooks to be invoked for each statement.
+      A pre-execution hook is specified as the name of a Java class which implements the
+      org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on>
+        <property>
+            <name>hive_timeline_logging_enabled</name>
+            <type>hive-env</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.exec.post.hooks</name>
+    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    <description>
+      Comma-separated list of post-execution hooks to be invoked for each statement.
+      A post-execution hook is specified as the name of a Java class which implements the
+      org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on>
+        <property>
+            <name>atlas.server.https.port</name>
+            <type>application-properties</type>
+        </property>
+        <property>
+            <name>atlas.server.http.port</name>
+            <type>application-properties</type>
+        </property>
+        <property>
+            <name>hive_timeline_logging_enabled</name>
+            <type>hive-env</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.exec.failure.hooks</name>
+    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    <description>
+      Comma-separated list of on-failure hooks to be invoked for each statement.
+      An on-failure hook is specified as the name of Java class which implements the
+      org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on>
+        <property>
+            <name>hive_timeline_logging_enabled</name>
+            <type>hive-env</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.exec.parallel</name>
+    <value>false</value>
+    <description>Whether to execute jobs in parallel</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.exec.parallel.thread.number</name>
+    <value>8</value>
+    <description>How many jobs at most can be executed in parallel</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.mapred.reduce.tasks.speculative.execution</name>
+    <value>false</value>
+    <description>Whether speculative execution for reducers should be turned on. </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.exec.dynamic.partition</name>
+    <value>true</value>
+    <description>Whether or not to allow dynamic partitions in DML/DDL.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.exec.dynamic.partition.mode</name>
+    <value>nonstrict</value>
+    <description>
+      In strict mode, the user must specify at least one static partition
+      in case the user accidentally overwrites all partitions.
+      NonStrict allows all partitions of a table to be dynamic.
+    </description>
+    <display-name>Allow all partitions to be Dynamic</display-name>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>value-list</type>
+        <entries>
+            <entry>
+                <value>nonstrict</value>
+                <label>On</label>
+            </entry>
+            <entry>
+                <value>strict</value>
+                <label>Off</label>
+            </entry>
+        </entries>
+        <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+        <property>
+            <name>hive_txn_acid</name>
+            <type>hive-env</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.exec.max.dynamic.partitions</name>
+    <value>5000</value>
+    <description>Maximum number of dynamic partitions allowed to be created in total.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.exec.max.dynamic.partitions.pernode</name>
+    <value>2000</value>
+    <description>Maximum number of dynamic partitions allowed to be created in each mapper/reducer node.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.exec.max.created.files</name>
+    <value>100000</value>
+    <description>Maximum number of HDFS files created by all mappers/reducers in a MapReduce job.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.metastore.warehouse.dir</name>
+    <value>/apps/hive/warehouse</value>
+    <description>location of default database for the warehouse</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type>NOT_MANAGED_HDFS_PATH</property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.metastore.uris</name>
+    <value>thrift://localhost:9083</value>
+    <description>Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="true">
+    <name>javax.jdo.option.ConnectionPassword</name>
+    <value></value>
+    <description>password to use against metastore database</description>
+    <display-name>Database Password</display-name>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type>PASSWORD</property-type>
+    <value-attributes>
+        <type>password</type>
+        <overridable>false</overridable>
+        <hidden>HIVE_CLIENT,WEBHCAT_SERVER,HCAT,CONFIG_DOWNLOAD</hidden>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>javax.jdo.option.ConnectionURL</name>
+    <value>jdbc:mysql://localhost/hive?createDatabaseIfNotExist=true</value>
+    <description>JDBC connect string for a JDBC metastore</description>
+    <display-name>Database URL</display-name>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <overridable>false</overridable>
+    </value-attributes>
+    <depends-on>
+        <property>
+            <name>hive_database</name>
+            <type>hive-env</type>
+        </property>
+        <property>
+            <name>ambari.hive.db.schema.name</name>
+            <type>hive-site</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.metastore.server.max.threads</name>
+    <value>100000</value>
+    <description>Maximum number of worker threads in the Thrift server's pool.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.metastore.kerberos.keytab.file</name>
+    <value>/etc/security/keytabs/hive.service.keytab</value>
+    <description>The path to the Kerberos Keytab file containing the metastore Thrift server's service principal.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.metastore.kerberos.principal</name>
+    <value>hive/_HOST@EXAMPLE.COM</value>
+    <description>
+      The service principal for the metastore Thrift server.
+      The special string _HOST will be replaced automatically with the correct host name.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.cluster.delegation.token.store.zookeeper.znode</name>
+    <value>/hive/cluster/delegation</value>
+    <description>The root path for token store data.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.metastore.cache.pinobjtypes</name>
+    <value>Table,Database,Type,FieldSchema,Order</value>
+    <description>List of comma separated metastore object types that should be pinned in the cache</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.metastore.pre.event.listeners</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener</value>
+    <description>List of comma separated listeners for metastore events.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.metastore.authorization.storage.checks</name>
+    <value>false</value>
+    <description>
+      Should the metastore do authorization checks against the underlying storage (usually hdfs)
+      for operations like drop-partition (disallow the drop-partition if the user in
+      question doesn't have permissions to delete the corresponding directory
+      on the storage).
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>javax.jdo.option.ConnectionDriverName</name>
+    <value>com.mysql.jdbc.Driver</value>
+    <description>Driver class name for a JDBC metastore</description>
+    <display-name>JDBC Driver Class</display-name>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <overridable>false</overridable>
+    </value-attributes>
+    <depends-on>
+        <property>
+            <name>hive_database</name>
+            <type>hive-env</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>javax.jdo.option.ConnectionUserName</name>
+    <value>hive</value>
+    <description>username to use against metastore database</description>
+    <display-name>Database Username</display-name>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>db_user</type>
+        <overridable>false</overridable>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.map.aggr</name>
+    <value>true</value>
+    <description>Whether to use map-side aggregation in Hive Group By queries</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.cbo.enable</name>
+    <value>true</value>
+    <description>Flag to control enabling Cost Based Optimizations using Calcite framework.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.mapjoin.optimized.hashtable</name>
+    <value>true</value>
+    <description>
+      Whether Hive should use memory-optimized hash table for MapJoin. Only works on Tez,
+      because memory-optimized hashtable cannot be serialized.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.smbjoin.cache.rows</name>
+    <value>10000</value>
+    <description>How many rows with the same key value should be cached in memory per smb joined table.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.map.aggr.hash.percentmemory</name>
+    <value>0.5</value>
+    <description>Portion of total memory to be used by map-side group aggregation hash table</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.map.aggr.hash.force.flush.memory.threshold</name>
+    <value>0.9</value>
+    <description>
+      The max memory to be used by map-side group aggregation hash table.
+      If the memory usage is higher than this number, force to flush data
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.map.aggr.hash.min.reduction</name>
+    <value>0.5</value>
+    <description>
+      Hash aggregation will be turned off if the ratio between hash  table size and input rows is bigger than this number.
+      Set to 1 to make sure hash aggregation is never turned off.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.merge.mapfiles</name>
+    <value>true</value>
+    <description>Merge small files at the end of a map-only job</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.merge.mapredfiles</name>
+    <value>false</value>
+    <description>Merge small files at the end of a map-reduce job</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.merge.tezfiles</name>
+    <value>false</value>
+    <description>Merge small files at the end of a Tez DAG</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.merge.size.per.task</name>
+    <value>256000000</value>
+    <description>Size of merged files at the end of the job</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.merge.smallfiles.avgsize</name>
+    <value>16000000</value>
+    <description>
+      When the average output file size of a job is less than this number, Hive will start an additional
+      map-reduce job to merge the output files into bigger files. This is only done for map-only jobs
+      if hive.merge.mapfiles is true, and for map-reduce jobs if hive.merge.mapredfiles is true.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.merge.rcfile.block.level</name>
+    <value>true</value>
+    <description></description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.merge.orcfile.stripe.level</name>
+    <value>true</value>
+    <description>
+      When hive.merge.mapfiles or hive.merge.mapredfiles is enabled while writing a
+      table with ORC file format, enabling this config will do stripe level fast merge
+      for small ORC files. Note that enabling this config will not honor padding tolerance
+      config (hive.exec.orc.block.padding.tolerance).
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.exec.orc.default.stripe.size</name>
+    <value>67108864</value>
+    <description>Define the default ORC stripe size</description>
+    <display-name>Default ORC Stripe Size</display-name>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>int</type>
+        <maximum>268435456</maximum>
+        <minimum>8388608</minimum>
+        <unit>B</unit>
+        <increment-step>8388608</increment-step>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.exec.orc.default.compress</name>
+    <value>ZLIB</value>
+    <description>Define the default compression codec for ORC file</description>
+    <display-name>ORC Compression Algorithm</display-name>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>value-list</type>
+        <entries>
+            <entry>
+                <value>ZLIB</value>
+                <label>zlib Compression Library</label>
+            </entry>
+            <entry>
+                <value>SNAPPY</value>
+                <label>Snappy Compression Library</label>
+            </entry>
+        </entries>
+        <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.orc.splits.include.file.footer</name>
+    <value>false</value>
+    <description>
+      If turned on splits generated by orc will include metadata about the stripes in the file. This
+      data is read remotely (from the client or HS2 machine) and sent to all the tasks.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.orc.compute.splits.num.threads</name>
+    <value>10</value>
+    <description>How many threads orc should use to create splits in parallel.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.auto.convert.join</name>
+    <value>true</value>
+    <description>Whether Hive enables the optimization about converting common join into mapjoin based on the input file size</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.auto.convert.join.noconditionaltask</name>
+    <value>true</value>
+    <description>
+      Whether Hive enables the optimization about converting common join into mapjoin based on the input file size.
+      If this parameter is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
+      specified size, the join is directly converted to a mapjoin (there is no conditional task).
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.limit.optimize.enable</name>
+    <value>true</value>
+    <description>Whether to enable to optimization to trying a smaller subset of data for simple LIMIT first.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.tez.cpu.vcores</name>
+    <value>-1</value>
+    <description>By default Tez will ask for however many cpus map-reduce is configured to use per container. This can be used to overwrite.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.tez.log.level</name>
+    <value>INFO</value>
+    <description>
+      The log level to use for tasks executing as part of the DAG.
+      Used only if hive.tez.java.opts is used to configure Java options.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.enforce.bucketing</name>
+    <value>true</value>
+    <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
+    <display-name>Enforce bucketing</display-name>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>value-list</type>
+        <entries>
+            <entry>
+                <value>true</value>
+                <label>True</label>
+            </entry>
+            <entry>
+                <value>false</value>
+                <label>False</label>
+            </entry>
+        </entries>
+        <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+        <property>
+            <name>hive_txn_acid</name>
+            <type>hive-env</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.enforce.sorting</name>
+    <value>true</value>
+    <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.enforce.sortmergebucketmapjoin</name>
+    <value>true</value>
+    <description>If the user asked for sort-merge bucketed map-side join, and it cannot be performed, should the query fail or not ?</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.auto.convert.sortmerge.join</name>
+    <value>true</value>
+    <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass the criteria for sort-merge join.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.auto.convert.sortmerge.join.to.mapjoin</name>
+    <value>false</value>
+    <description>
+      If hive.auto.convert.sortmerge.join is set to true, and a join was converted to a sort-merge join,
+      this parameter decides whether each table should be tried as a big table, and effectively a map-join should be
+      tried. That would create a conditional task with n+1 children for a n-way join (1 child for each table as the
+      big table), and the backup task will be the sort-merge join. In some cases, a map-join would be faster than a
+      sort-merge join, if there is no advantage of having the output bucketed and sorted. For example, if a very big sorted
+      and bucketed table with few files (say 10 files) are being joined with a very small sorter and bucketed table
+      with few files (10 files), the sort-merge join will only use 10 mappers, and a simple map-only join might be faster
+      if the complete small table can fit in memory, and a map-join can be performed.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.optimize.constant.propagation</name>
+    <value>true</value>
+    <description>Whether to enable constant propagation optimizer</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.optimize.metadataonly</name>
+    <value>true</value>
+    <description></description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.optimize.null.scan</name>
+    <value>true</value>
+    <description>Dont scan relations which are guaranteed to not generate any rows</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.optimize.bucketmapjoin</name>
+    <value>true</value>
+    <description>Whether to try bucket mapjoin</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.optimize.reducededuplication</name>
+    <value>true</value>
+    <description>
+      Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again.
+      This should always be set to true. Since it is a new feature, it has been made configurable.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.optimize.reducededuplication.min.reducer</name>
+    <value>4</value>
+    <description>
+      Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
+      That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
+      The optimization will be automatically disabled if number of reducers would be less than specified value.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.optimize.sort.dynamic.partition</name>
+    <value>false</value>
+    <description>
+      When enabled dynamic partitioning column will be globally sorted.
+      This way we can keep only one record writer open for each partition value
+      in the reducer thereby reducing the memory pressure on reducers.
+    </description>
+    <display-name>Sort Partitions Dynamically</display-name>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>value-list</type>
+        <entries>
+            <entry>
+                <value>true</value>
+                <label>True</label>
+            </entry>
+            <entry>
+                <value>false</value>
+                <label>False</label>
+            </entry>
+        </entries>
+        <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.stats.autogather</name>
+    <value>true</value>
+    <description>A flag to gather statistics automatically during the INSERT OVERWRITE command.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.stats.dbclass</name>
+    <value>fs</value>
+    <description>
+      Expects one of the pattern in [jdbc(:.*), hbase, counter, custom, fs].
+      The storage that stores temporary Hive statistics. Currently, jdbc, hbase, counter and custom type are supported.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.stats.fetch.partition.stats</name>
+    <value>true</value>
+    <description>
+      Annotation of operator tree with statistics information requires partition level basic
+      statistics like number of rows, data size and file size. Partition statistics are fetched from
+      metastore. Fetching partition statistics for each needed partition can be expensive when the
+      number of partitions is high. This flag can be used to disable fetching of partition statistics
+      from metastore. When this flag is disabled, Hive will make calls to filesystem to get file sizes
+      and will estimate the number of rows from row schema.
+    </description>
+    <display-name>Fetch partition stats at compiler</display-name>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>value-list</type>
+        <entries>
+            <entry>
+                <value>true</value>
+                <label>On</label>
+            </entry>
+            <entry>
+                <value>false</value>
+                <label>Off</label>
+            </entry>
+        </entries>
+        <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+        <property>
+            <name>hive.cbo.enable</name>
+            <type>hive-site</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.stats.fetch.column.stats</name>
+    <value>false</value>
+    <description>
+      Annotation of operator tree with statistics information requires column statistics.
+      Column statistics are fetched from metastore. Fetching column statistics for each needed column
+      can be expensive when the number of columns is high. This flag can be used to disable fetching
+      of column statistics from metastore.
+    </description>
+    <display-name>Fetch column stats at compiler</display-name>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>value-list</type>
+        <entries>
+            <entry>
+                <value>true</value>
+                <label>On</label>
+            </entry>
+            <entry>
+                <value>false</value>
+                <label>Off</label>
+            </entry>
+        </entries>
+        <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+        <property>
+            <name>hive.cbo.enable</name>
+            <type>hive-site</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.zookeeper.client.port</name>
+    <value>2181</value>
+    <description>The port of ZooKeeper servers to talk to. If the list of Zookeeper servers specified in hive.zookeeper.quorum,does not contain port numbers, this value is used.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.zookeeper.namespace</name>
+    <value>hive_zookeeper_namespace</value>
+    <description>The parent node under which all ZooKeeper nodes are created.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.txn.manager</name>
+    <value>org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager</value>
+    <description></description>
+    <display-name>Transaction Manager</display-name>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>value-list</type>
+        <entries>
+            <entry>
+                <value>org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager</value>
+                <label>org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager (off)</label>
+            </entry>
+            <entry>
+                <value>org.apache.hadoop.hive.ql.lockmgr.DbTxnManager</value>
+                <label>org.apache.hadoop.hive.ql.lockmgr.DbTxnManager (on)</label>
+            </entry>
+        </entries>
+        <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+        <property>
+            <name>hive_txn_acid</name>
+            <type>hive-env</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.txn.max.open.batch</name>
+    <value>1000</value>
+    <description>
+      Maximum number of transactions that can be fetched in one call to open_txns().
+      Increasing this will decrease the number of delta files created when
+      streaming data into Hive.  But it will also increase the number of
+      open transactions at any given time, possibly impacting read performance.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.support.concurrency</name>
+    <value>false</value>
+    <description>
+      Support concurrency and use locks, needed for Transactions. Requires Zookeeper.
+    </description>
+    <display-name>Use Locking</display-name>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>value-list</type>
+        <entries>
+            <entry>
+                <value>true</value>
+                <label>True</label>
+            </entry>
+            <entry>
+                <value>false</value>
+                <label>False</label>
+            </entry>
+        </entries>
+        <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+        <property>
+            <name>hive_txn_acid</name>
+            <type>hive-env</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.cli.print.header</name>
+    <value>false</value>
+    <description>
+      Whether to print the names of the columns in query output.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.compactor.worker.timeout</name>
+    <value>86400L</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      Time before a given compaction in working state is declared a failure
+      and returned to the initiated state.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.compactor.check.interval</name>
+    <value>300L</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      Time between checks to see if any partitions need compacted.
+      This should be kept high because each check for compaction requires many calls against the NameNode.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.compactor.delta.pct.threshold</name>
+    <value>0.1f</value>
+    <description>Percentage (by size) of base that deltas can be before major compaction is initiated.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.fetch.task.conversion</name>
+    <value>more</value>
+    <description>
+      Expects one of [none, minimal, more].
+      Some select queries can be converted to single FETCH task minimizing latency.
+      Currently the query should be single sourced not having any subquery and should not have
+      any aggregations or distincts (which incurs RS), lateral views and joins.
+      0. none : disable hive.fetch.task.conversion
+      1. minimal : SELECT STAR, FILTER on partition columns, LIMIT only
+      2. more    : SELECT, FILTER, LIMIT only (support TABLESAMPLE and virtual columns)
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.fetch.task.conversion.threshold</name>
+    <value>1073741824</value>
+    <description>
+      Input threshold for applying hive.fetch.task.conversion. If target table is native, input length
+      is calculated by summation of file lengths. If it's not native, storage handler for the table
+      can optionally implement org.apache.hadoop.hive.ql.metadata.InputEstimator interface.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.fetch.task.aggr</name>
+    <value>false</value>
+    <description>
+      Aggregation queries with no group-by clause (for example, select count(*) from src) execute
+      final aggregations in single reduce task. If this is set true, Hive delegates final aggregation
+      stage to fetch task, possibly decreasing the query time.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.security.authorization.enabled</name>
+    <value>false</value>
+    <description>enable or disable the Hive client authorization</description>
+    <display-name>Enable Authorization</display-name>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>value-list</type>
+        <entries>
+            <entry>
+                <value>true</value>
+                <label>True</label>
+            </entry>
+            <entry>
+                <value>false</value>
+                <label>False</label>
+            </entry>
+        </entries>
+        <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+        <property>
+            <name>hive_security_authorization</name>
+            <type>hive-env</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.security.authenticator.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator</value>
+    <description>
+      hive client authenticator manager class name. The user defined authenticator should implement
+      interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on>
+        <property>
+            <name>hive_security_authorization</name>
+            <type>hive-env</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.security.metastore.authorization.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
+    <description>
+      authorization manager class name to be used in the metastore for authorization.
+      The user defined authorization class should implement interface
+      org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.
+    </description>
+    <display-name>Hive Authorization Manager</display-name>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on>
+        <property>
+            <name>hive_security_authorization</name>
+            <type>hive-env</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.security.metastore.authorization.auth.reads</name>
+    <value>true</value>
+    <description>If this is true, metastore authorizer authorizes read actions on database, table</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.security.metastore.authenticator.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator</value>
+    <description>
+      authenticator manager class name to be used in the metastore for authentication.
+      The user defined authenticator should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.server2.logging.operation.enabled</name>
+    <value>true</value>
+    <description>When true, HS2 will save operation logs</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.server2.logging.operation.log.location</name>
+    <value>/tmp/hive/operation_logs</value>
+    <description>Top level directory where operation logs are stored if logging functionality is enabled</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.server2.zookeeper.namespace</name>
+    <value>hiveserver2</value>
+    <description>The parent node in ZooKeeper used by HiveServer2 when supporting dynamic service discovery.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.server2.thrift.http.port</name>
+    <value>10001</value>
+    <description>Port number of HiveServer2 Thrift interface when hive.server2.transport.mode is 'http'.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.server2.thrift.http.path</name>
+    <value>cliservice</value>
+    <description>Path component of URL endpoint when in HTTP mode.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.server2.thrift.port</name>
+    <value>10000</value>
+    <description>
+      TCP port number to listen on, default 10000.
+    </description>
+    <display-name>HiveServer2 Port</display-name>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>int</type>
+        <overridable>false</overridable>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.server2.thrift.sasl.qop</name>
+    <value>auth</value>
+    <description>
+      Expects one of [auth, auth-int, auth-conf].
+      Sasl QOP value; Set it to one of following values to enable higher levels of
+      protection for HiveServer2 communication with clients.
+      "auth" - authentication only (default)
+      "auth-int" - authentication plus integrity protection
+      "auth-conf" - authentication plus integrity and confidentiality protection
+      This is applicable only if HiveServer2 is configured to use Kerberos authentication.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.server2.thrift.max.worker.threads</name>
+    <value>500</value>
+    <description>Maximum number of Thrift worker threads</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.server2.allow.user.substitution</name>
+    <value>true</value>
+    <description>Allow alternate user to be specified as part of HiveServer2 open connection request.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.server2.authentication.spnego.keytab</name>
+    <value>HTTP/_HOST@EXAMPLE.COM</value>
+    <description>
+      keytab file for SPNego principal, optional,
+      typical value would look like /etc/security/keytabs/spnego.service.keytab,
+      This keytab would be used by HiveServer2 when Kerberos security is enabled and
+      HTTP transport mode is used.
+      This needs to be set only if SPNEGO is to be used in authentication.
+      SPNego authentication would be honored only if valid
+      hive.server2.authentication.spnego.principal
+      and
+      hive.server2.authentication.spnego.keytab
+      are specified.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.server2.authentication</name>
+    <value>NONE</value>
+    <description>Authentication mode, default NONE. Options are NONE, NOSASL, KERBEROS, LDAP, PAM and CUSTOM</description>
+    <display-name>HiveServer2 Authentication</display-name>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>value-list</type>
+        <entries>
+            <entry>
+                <value>NONE</value>
+                <label>None</label>
+            </entry>
+            <entry>
+                <value>LDAP</value>
+                <label>LDAP</label>
+            </entry>
+            <entry>
+                <value>KERBEROS</value>
+                <label>Kerberos</label>
+            </entry>
+            <entry>
+                <value>PAM</value>
+                <label>PAM</label>
+            </entry>
+            <entry>
+                <value>CUSTOM</value>
+                <label>Custom</label>
+            </entry>
+        </entries>
+        <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by>
+        <dependedByProperties>
+            <name>hive.server2.custom.authentication.class</name>
+            <type>hive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.server2.authentication.kerberos.principal</name>
+            <type>hive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.server2.authentication.kerberos.keytab</name>
+            <type>hive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.server2.authentication.ldap.url</name>
+            <type>hive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.server2.authentication.ldap.baseDN</name>
+            <type>hive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.server2.authentication.pam.services</name>
+            <type>hive-site</type>
+        </dependedByProperties>
+    </property_depended_by>
+</property><property require-input="false">
+    <name>hive.server2.authentication.spnego.principal</name>
+    <value>/etc/security/keytabs/spnego.service.keytab</value>
+    <description>
+      SPNego service principal, optional,
+      typical value would look like HTTP/_HOST@EXAMPLE.COM
+      SPNego service principal would be used by HiveServer2 when Kerberos security is enabled
+      and HTTP transport mode is used.
+      This needs to be set only if SPNEGO is to be used in authentication.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.server2.enable.doAs</name>
+    <value>true</value>
+    <description>
+      Setting this property to true will have HiveServer2 execute
+      Hive operations as the user making the calls to it.
+    </description>
+    <display-name>Run as end user instead of Hive user</display-name>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>value-list</type>
+        <entries>
+            <entry>
+                <value>true</value>
+                <label>True</label>
+            </entry>
+            <entry>
+                <value>false</value>
+                <label>False</label>
+            </entry>
+        </entries>
+        <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+        <property>
+            <name>hive_security_authorization</name>
+            <type>hive-env</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.server2.table.type.mapping</name>
+    <value>CLASSIC</value>
+    <description>
+      Expects one of [classic, hive].
+      This setting reflects how HiveServer2 will report the table types for JDBC and other
+      client implementations that retrieve the available tables and supported table types
+      HIVE : Exposes Hive's native table types like MANAGED_TABLE, EXTERNAL_TABLE, VIRTUAL_VIEW
+      CLASSIC : More generic types like TABLE and VIEW
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.server2.use.SSL</name>
+    <value>false</value>
+    <description>
+      Set this to true for using SSL encryption in HiveServer2.
+    </description>
+    <display-name>Use SSL</display-name>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>value-list</type>
+        <entries>
+            <entry>
+                <value>true</value>
+                <label>True</label>
+            </entry>
+            <entry>
+                <value>false</value>
+                <label>False</label>
+            </entry>
+        </entries>
+        <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.conf.restricted.list</name>
+    <value>hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role</value>
+    <description>Comma separated list of configuration options which are immutable at runtime</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.user.install.directory</name>
+    <value>/user/</value>
+    <description>
+      If hive (in tez mode only) cannot find a usable hive jar in "hive.jar.directory",
+      it will upload the hive jar to "hive.user.install.directory/user.name"
+      and use it to run queries.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.vectorized.groupby.maxentries</name>
+    <value>100000</value>
+    <description>
+      Max number of entries in the vector group by aggregation hashtables.
+      Exceeding this will trigger a flush irrelevant of memory pressure condition.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.prewarm.enabled</name>
+    <value>false</value>
+    <description>Enables container prewarm for Tez (Hadoop 2 only)</description>
+    <display-name>Hold Containers to Reduce Latency</display-name>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>value-list</type>
+        <entries>
+            <entry>
+                <value>true</value>
+                <label>True</label>
+            </entry>
+            <entry>
+                <value>false</value>
+                <label>False</label>
+            </entry>
+        </entries>
+        <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.prewarm.numcontainers</name>
+    <value>3</value>
+    <description>Controls the number of containers to prewarm for Tez (Hadoop 2 only)</description>
+    <display-name>Number of Containers Held</display-name>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>int</type>
+        <maximum>20</maximum>
+        <minimum>1</minimum>
+        <increment-step>1</increment-step>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.convert.join.bucket.mapjoin.tez</name>
+    <value>false</value>
+    <description>
+      Whether joins can be automatically converted to bucket map joins in hive
+      when tez is used as the execution engine.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.tez.auto.reducer.parallelism</name>
+    <value>false</value>
+    <description>
+      Turn on Tez' auto reducer parallelism feature. When enabled, Hive will still estimate data sizes
+      and set parallelism estimates. Tez will sample source vertices' output sizes and adjust the estimates at runtime as
+      necessary.
+    </description>
+    <display-name>Allow dynamic numbers of reducers</display-name>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>value-list</type>
+        <entries>
+            <entry>
+                <value>true</value>
+                <label>True</label>
+            </entry>
+            <entry>
+                <value>false</value>
+                <label>False</label>
+            </entry>
+        </entries>
+        <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.tez.max.partition.factor</name>
+    <value>2.0</value>
+    <description>When auto reducer parallelism is enabled this factor will be used to over-partition data in shuffle edges.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.tez.min.partition.factor</name>
+    <value>0.25</value>
+    <description>
+      When auto reducer parallelism is enabled this factor will be used to put a lower limit to the number
+      of reducers that tez specifies.
+    </description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.tez.dynamic.partition.pruning</name>
+    <value>true</value>
+    <description>When dynamic pruning is enabled, joins on partition keys will be processed by sending events from the processing vertices to the tez application master. These events will be used to prune unnecessary partitions.</description>
+    <display-name>Allow dynamic partition pruning</display-name>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>value-list</type>
+        <entries>
+            <entry>
+                <value>true</value>
+                <label>True</label>
+            </entry>
+            <entry>
+                <value>false</value>
+                <label>False</label>
+            </entry>
+        </entries>
+        <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.tez.dynamic.partition.pruning.max.event.size</name>
+    <value>1048576</value>
+    <description>Maximum size of events sent by processors in dynamic pruning. If this size is crossed no pruning will take place.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.tez.dynamic.partition.pruning.max.data.size</name>
+    <value>104857600</value>
+    <description>Maximum total data size of events in dynamic pruning.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.tez.smb.number.waves</name>
+    <value>0.5</value>
+    <description>The number of waves in which to run the SMB join. Account for cluster being occupied. Ideally should be 1 wave.</description>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>ambari.hive.db.schema.name</name>
+    <value>hive</value>
+    <description>Database name used as the Hive Metastore</description>
+    <display-name>Database Name</display-name>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>database</type>
+        <overridable>false</overridable>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by>
+        <dependedByProperties>
+            <name>javax.jdo.option.ConnectionURL</name>
+            <type>hive-site</type>
+        </dependedByProperties>
+    </property_depended_by>
+</property><property require-input="false">
+    <name>hive.vectorized.execution.enabled</name>
+    <value>true</value>
+    <description>
+      This flag should be set to true to enable vectorized mode of query execution.
+      The default value is false.
+    </description>
+    <display-name>Enable Vectorization and Map Vectorization</display-name>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>value-list</type>
+        <entries>
+            <entry>
+                <value>true</value>
+                <label>True</label>
+            </entry>
+            <entry>
+                <value>false</value>
+                <label>False</label>
+            </entry>
+        </entries>
+        <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.auto.convert.join.noconditionaltask.size</name>
+    <value>52428800</value>
+    <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
+      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
+      converted to a mapjoin(there is no conditional task).
+    </description>
+    <display-name>For Map Join, per Map memory threshold</display-name>
+    <filename>hive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>int</type>
+        <maximum>17179869184</maximum>
+       

<TRUNCATED>

[27/52] bigtop git commit: Add Hive 1.2 to ODPi ambari reference implementation

Posted by rv...@apache.org.
Add Hive 1.2 to ODPi ambari reference implementation

(cherry picked from commit e49f7e6b8bbf5650c36df0f2bfc7c4f7474ca5b2)


Project: http://git-wip-us.apache.org/repos/asf/bigtop/repo
Commit: http://git-wip-us.apache.org/repos/asf/bigtop/commit/b6475d77
Tree: http://git-wip-us.apache.org/repos/asf/bigtop/tree/b6475d77
Diff: http://git-wip-us.apache.org/repos/asf/bigtop/diff/b6475d77

Branch: refs/heads/master
Commit: b6475d7762ac57b9fe07edd48f3ea3fc2f05bdbd
Parents: ba8d7f5
Author: Sumit Mohanty <su...@gmail.com>
Authored: Wed Oct 26 17:46:24 2016 -0700
Committer: Roman Shaposhnik <rv...@apache.org>
Committed: Tue Mar 21 23:08:09 2017 -0700

----------------------------------------------------------------------
 .../ambari/ODPi/1.0/role_command_order.json     |   1 +
 .../HIVE/configuration/beeline-log4j2.xml       |  62 --
 .../hive-atlas-application.properties.xml       |  67 --
 .../HIVE/configuration/hive-exec-log4j2.xml     |  83 --
 .../HIVE/configuration/hive-interactive-env.xml | 373 --------
 .../configuration/hive-interactive-site.xml     | 909 -------------------
 .../services/HIVE/configuration/hive-log4j2.xml |  90 --
 .../hiveserver2-interactive-site.xml            |  56 --
 .../HIVE/configuration/llap-cli-log4j2.xml      |  91 --
 .../HIVE/configuration/llap-daemon-log4j.xml    | 158 ----
 .../HIVE/configuration/ranger-hive-audit.xml    | 136 ---
 .../ranger-hive-plugin-properties.xml           |  63 --
 .../configuration/ranger-hive-policymgr-ssl.xml |  71 --
 .../HIVE/configuration/ranger-hive-security.xml |  81 --
 .../HIVE/configuration/tez-interactive-site.xml | 144 ---
 .../ambari/ODPi/1.0/services/HIVE/metainfo.xml  | 158 +---
 .../HIVE/package/scripts/params_linux.py        |  42 +-
 .../HIVE/package/scripts/status_params.py       |  14 +-
 .../services/YARN/configuration/yarn-site.xml   | 207 ++++-
 .../ambari/ODPi/1.0/services/YARN/metainfo.xml  |  14 +
 20 files changed, 230 insertions(+), 2590 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/bigtop/blob/b6475d77/bigtop-packages/src/common/ambari/ODPi/1.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/role_command_order.json b/bigtop-packages/src/common/ambari/ODPi/1.0/role_command_order.json
index 31f26e3..05beb76 100755
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/role_command_order.json
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/role_command_order.json
@@ -5,6 +5,7 @@
     "_comment" : "dependencies for all cases",
     "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
     "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
+    "APP_TIMELINE_SERVER-START": ["NAMENODE-START", "DATANODE-START"],
     "OOZIE_SERVER-START": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
     "WEBHCAT_SERVER-START": ["NODEMANAGER-START", "HIVE_SERVER-START"],
     "WEBHCAT_SERVER-RESTART": ["NODEMANAGER-RESTART", "HIVE_SERVER-RESTART"],

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b6475d77/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/beeline-log4j2.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/beeline-log4j2.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/beeline-log4j2.xml
deleted file mode 100755
index 03de64e..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/beeline-log4j2.xml
+++ /dev/null
@@ -1,62 +0,0 @@
-<configuration><property require-input="false">
-    <name>content</name>
-    <value>
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-status = INFO
-name = BeelineLog4j2
-packages = org.apache.hadoop.hive.ql.log
-
-# list of properties
-property.hive.log.level = WARN
-property.hive.root.logger = console
-
-# list of all appenders
-appenders = console
-
-# console appender
-appender.console.type = Console
-appender.console.name = console
-appender.console.target = SYSTEM_ERR
-appender.console.layout.type = PatternLayout
-appender.console.layout.pattern = %d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n
-
-# list of all loggers
-loggers = HiveConnection
-
-# HiveConnection logs useful info for dynamic service discovery
-logger.HiveConnection.name = org.apache.hive.jdbc.HiveConnection
-logger.HiveConnection.level = INFO
-
-# root logger
-rootLogger.level = ${sys:hive.log.level}
-rootLogger.appenderRefs = root
-rootLogger.appenderRef.root.ref = ${sys:hive.root.logger}
-  </value>
-    <description>Custom beeline-log4j2.properties</description>
-    <display-name>beeline-log4j template</display-name>
-    <filename>beeline-log4j2.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>content</type>
-        <show-property-name>false</show-property-name>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b6475d77/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-atlas-application.properties.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-atlas-application.properties.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-atlas-application.properties.xml
deleted file mode 100755
index 7eb72ef..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-atlas-application.properties.xml
+++ /dev/null
@@ -1,67 +0,0 @@
-<configuration><property require-input="false">
-    <name>atlas.hook.hive.synchronous</name>
-    <value>false</value>
-    <description></description>
-    <filename>hive-atlas-application.properties.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>atlas.hook.hive.numRetries</name>
-    <value>3</value>
-    <description></description>
-    <filename>hive-atlas-application.properties.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>atlas.hook.hive.minThreads</name>
-    <value>5</value>
-    <description></description>
-    <filename>hive-atlas-application.properties.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>atlas.hook.hive.maxThreads</name>
-    <value>5</value>
-    <description></description>
-    <filename>hive-atlas-application.properties.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>atlas.hook.hive.keepAliveTime</name>
-    <value>10</value>
-    <description></description>
-    <filename>hive-atlas-application.properties.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>atlas.hook.hive.queueSize</name>
-    <value>1000</value>
-    <description></description>
-    <filename>hive-atlas-application.properties.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b6475d77/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-exec-log4j2.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-exec-log4j2.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-exec-log4j2.xml
deleted file mode 100755
index c818d43..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-exec-log4j2.xml
+++ /dev/null
@@ -1,83 +0,0 @@
-<configuration><property require-input="false">
-    <name>content</name>
-    <value>
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-status = INFO
-name = HiveExecLog4j2
-packages = org.apache.hadoop.hive.ql.log
-
-# list of properties
-property.hive.log.level = INFO
-property.hive.root.logger = FA
-property.hive.query.id = hadoop
-property.hive.log.dir = ${sys:java.io.tmpdir}/${sys:user.name}
-property.hive.log.file = ${sys:hive.query.id}.log
-
-# list of all appenders
-appenders = console, FA
-
-# console appender
-appender.console.type = Console
-appender.console.name = console
-appender.console.target = SYSTEM_ERR
-appender.console.layout.type = PatternLayout
-appender.console.layout.pattern = %d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n
-
-# simple file appender
-appender.FA.type = File
-appender.FA.name = FA
-appender.FA.fileName = ${sys:hive.log.dir}/${sys:hive.log.file}
-appender.FA.layout.type = PatternLayout
-appender.FA.layout.pattern = %d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n
-
-# list of all loggers
-loggers = NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX
-
-logger.NIOServerCnxn.name = org.apache.zookeeper.server.NIOServerCnxn
-logger.NIOServerCnxn.level = WARN
-
-logger.ClientCnxnSocketNIO.name = org.apache.zookeeper.ClientCnxnSocketNIO
-logger.ClientCnxnSocketNIO.level = WARN
-
-logger.DataNucleus.name = DataNucleus
-logger.DataNucleus.level = ERROR
-
-logger.Datastore.name = Datastore
-logger.Datastore.level = ERROR
-
-logger.JPOX.name = JPOX
-logger.JPOX.level = ERROR
-
-# root logger
-rootLogger.level = ${sys:hive.log.level}
-rootLogger.appenderRefs = root
-rootLogger.appenderRef.root.ref = ${sys:hive.root.logger}
-  </value>
-    <description>Custom hive-exec-log4j2.properties</description>
-    <display-name>hive-exec-log4j2 template</display-name>
-    <filename>hive-exec-log4j2.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>content</type>
-        <show-property-name>false</show-property-name>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b6475d77/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-interactive-env.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-interactive-env.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-interactive-env.xml
deleted file mode 100755
index 7035283..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-interactive-env.xml
+++ /dev/null
@@ -1,373 +0,0 @@
-<configuration><property require-input="false">
-    <name>enable_hive_interactive</name>
-    <value>false</value>
-    <description>Enable or disable interactive query in this cluster.</description>
-    <display-name>Enable Interactive Query (Tech Preview)</display-name>
-    <filename>hive-interactive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>value-list</type>
-        <overridable>false</overridable>
-        <entries>
-            <entry>
-                <value>true</value>
-                <label>Yes</label>
-            </entry>
-            <entry>
-                <value>false</value>
-                <label>No</label>
-            </entry>
-        </entries>
-        <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by>
-        <dependedByProperties>
-            <name>hive.llap.daemon.queue.name</name>
-            <type>hive-interactive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.llap.daemon.num.executors</name>
-            <type>hive-interactive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>llap_queue_capacity</name>
-            <type>hive-interactive-env</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>llap_heap_size</name>
-            <type>hive-interactive-env</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>tez.am.resource.memory.mb</name>
-            <type>tez-interactive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.llap.daemon.yarn.container.mb</name>
-            <type>hive-interactive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>capacity-scheduler</name>
-            <type>capacity-scheduler</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.server2.tez.sessions.per.default.queue</name>
-            <type>hive-interactive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.llap.io.memory.size</name>
-            <type>hive-interactive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>num_llap_nodes</name>
-            <type>hive-interactive-env</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>slider_am_container_mb</name>
-            <type>hive-interactive-env</type>
-        </dependedByProperties>
-    </property_depended_by>
-</property><property require-input="false">
-    <name>hive_server_interactive_host</name>
-    <value>localhost</value>
-    <description>HiveServer2 Interactive Host</description>
-    <display-name>HiveServer2 Interactive Host</display-name>
-    <filename>hive-interactive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <overridable>false</overridable>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>llap_queue_capacity</name>
-    <value>0</value>
-    <description>Percentage of the cluster dedicated to interactive query.</description>
-    <display-name>% of Cluster Capacity</display-name>
-    <filename>hive-interactive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>int</type>
-        <maximum>100</maximum>
-        <minimum>20</minimum>
-        <increment-step>1</increment-step>
-    </value-attributes>
-    <depends-on>
-        <property>
-            <name>yarn.scheduler.capacity.root.queues</name>
-            <type>capacity-scheduler</type>
-        </property>
-        <property>
-            <name>hive.llap.daemon.queue.name</name>
-            <type>hive-interactive-site</type>
-        </property>
-        <property>
-            <name>enable_hive_interactive</name>
-            <type>hive-interactive-env</type>
-        </property>
-    </depends-on>
-    <property_depended_by>
-        <dependedByProperties>
-            <name>hive.llap.daemon.num.executors</name>
-            <type>hive-interactive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>llap_heap_size</name>
-            <type>hive-interactive-env</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>tez.am.resource.memory.mb</name>
-            <type>tez-interactive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.llap.daemon.yarn.container.mb</name>
-            <type>hive-interactive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>capacity-scheduler</name>
-            <type>capacity-scheduler</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.server2.tez.sessions.per.default.queue</name>
-            <type>hive-interactive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.llap.io.memory.size</name>
-            <type>hive-interactive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>num_llap_nodes</name>
-            <type>hive-interactive-env</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>slider_am_container_mb</name>
-            <type>hive-interactive-env</type>
-        </dependedByProperties>
-    </property_depended_by>
-</property><property require-input="false">
-    <name>num_llap_nodes</name>
-    <value>1</value>
-    <description>The number of Hive LLAP daemons to run.</description>
-    <display-name>Number of LLAP Daemons</display-name>
-    <filename>hive-interactive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>int</type>
-        <minimum>1</minimum>
-    </value-attributes>
-    <depends-on>
-        <property>
-            <name>yarn.scheduler.capacity.root.queues</name>
-            <type>capacity-scheduler</type>
-        </property>
-        <property>
-            <name>hive.llap.daemon.queue.name</name>
-            <type>hive-interactive-site</type>
-        </property>
-        <property>
-            <name>llap_queue_capacity</name>
-            <type>hive-interactive-env</type>
-        </property>
-        <property>
-            <name>enable_hive_interactive</name>
-            <type>hive-interactive-env</type>
-        </property>
-        <property>
-            <name>hive.server2.tez.sessions.per.default.queue</name>
-            <type>hive-interactive-site</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>num_retries_for_checking_llap_status</name>
-    <value>10</value>
-    <description>After starting LLAP app, retry count to check LLAP status before starting HiveServer2.</description>
-    <display-name>Number of retries while checking LLAP app status</display-name>
-    <filename>hive-interactive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>int</type>
-        <maximum>20</maximum>
-        <minimum>0</minimum>
-        <increment-step>1</increment-step>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>llap_heap_size</name>
-    <value>0</value>
-    <description>Heap Size used by LLAP app.</description>
-    <display-name>LLAP heap size</display-name>
-    <filename>hive-interactive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>int</type>
-    </value-attributes>
-    <depends-on>
-        <property>
-            <name>tez.am.resource.memory.mb</name>
-            <type>tez-site</type>
-        </property>
-        <property>
-            <name>yarn.scheduler.capacity.root.queues</name>
-            <type>capacity-scheduler</type>
-        </property>
-        <property>
-            <name>hive.llap.daemon.queue.name</name>
-            <type>hive-interactive-site</type>
-        </property>
-        <property>
-            <name>llap_queue_capacity</name>
-            <type>hive-interactive-env</type>
-        </property>
-        <property>
-            <name>enable_hive_interactive</name>
-            <type>hive-interactive-env</type>
-        </property>
-        <property>
-            <name>hive.server2.tez.sessions.per.default.queue</name>
-            <type>hive-interactive-site</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>slider_am_container_mb</name>
-    <value>341</value>
-    <description>Slider's app master container size in MB.</description>
-    <display-name>Slider AM container size</display-name>
-    <filename>hive-interactive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>int</type>
-        <unit>MB</unit>
-    </value-attributes>
-    <depends-on>
-        <property>
-            <name>yarn.scheduler.capacity.root.queues</name>
-            <type>capacity-scheduler</type>
-        </property>
-        <property>
-            <name>hive.llap.daemon.queue.name</name>
-            <type>hive-interactive-site</type>
-        </property>
-        <property>
-            <name>llap_queue_capacity</name>
-            <type>hive-interactive-env</type>
-        </property>
-        <property>
-            <name>enable_hive_interactive</name>
-            <type>hive-interactive-env</type>
-        </property>
-        <property>
-            <name>hive.server2.tez.sessions.per.default.queue</name>
-            <type>hive-interactive-site</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>llap_log_level</name>
-    <value>INFO</value>
-    <description>LLAP app logging level (WARN/INFO/DEBUG/TRACE)</description>
-    <display-name>LLAP app logging level (WARN/INFO/DEBUG/TRACE)</display-name>
-    <filename>hive-interactive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>llap_app_name</name>
-    <value>llap0</value>
-    <description>LLAP app name</description>
-    <display-name>LLAP app name</display-name>
-    <filename>hive-interactive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>llap_java_opts</name>
-    <value>-XX:+AlwaysPreTouch {% if java_version &gt; 7 %}-XX:+UseG1GC -XX:TLABSize=8m -XX:+ResizeTLAB -XX:+UseNUMA -XX:+AggressiveOpts -XX:MetaspaceSize=1024m -XX:InitiatingHeapOccupancyPercent=80 -XX:MaxGCPauseMillis=200{% else %}-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC{% endif %}</value>
-    <description>Java opts for llap application</description>
-    <display-name>LLAP app java opts</display-name>
-    <filename>hive-interactive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>content</name>
-    <value>
-      if [ "$SERVICE" = "cli" ]; then
-      if [ -z "$DEBUG" ]; then
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
-      else
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
-      fi
-      fi
-
-      # The heap size of the jvm stared by hive shell script can be controlled via:
-
-      if [ "$SERVICE" = "metastore" ]; then
-      export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
-      else
-      export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client
-      fi
-
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
-
-      # Larger heap size may be required when running queries over large number of files or partitions.
-      # By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
-      # appropriate for hive server (hwi etc).
-
-
-      # Set HADOOP_HOME to point to a specific hadoop install directory
-      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-
-      # Hive Configuration Directory can be controlled by:
-      export HIVE_CONF_DIR={{hive_server_interactive_conf_dir}}
-
-      # Add additional hcatalog jars
-      if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
-      export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
-      else
-      export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-server2-hive2/lib/hive-hcatalog-core.jar
-      fi
-
-      export METASTORE_PORT={{hive_metastore_port}}
-
-      # Spark assembly contains a conflicting copy of HiveConf from hive-1.2
-      export HIVE_SKIP_SPARK_ASSEMBLY=true
-
-    </value>
-    <description>This is the jinja template for hive-env.sh file</description>
-    <display-name>hive-interactive-env template</display-name>
-    <filename>hive-interactive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>content</type>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b6475d77/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-interactive-site.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-interactive-site.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-interactive-site.xml
deleted file mode 100755
index 27d3541..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-interactive-site.xml
+++ /dev/null
@@ -1,909 +0,0 @@
-<configuration><property require-input="false">
-    <name>hive.server2.thrift.port</name>
-    <value>10500</value>
-    <description>
-      TCP port number to listen on, default 10500.
-    </description>
-    <display-name>HiveServer2 Port</display-name>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>int</type>
-        <overridable>false</overridable>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.server2.thrift.http.port</name>
-    <value>10501</value>
-    <description>Port number of HiveServer2 Thrift interface when hive.server2.transport.mode is 'http'.</description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.server2.tez.sessions.per.default.queue</name>
-    <value>1</value>
-    <description>
-      The maximum number of queries the Hive Interactive cluster will be able to handle concurrently.
-    </description>
-    <display-name>Maximum Total Concurrent Queries</display-name>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>int</type>
-        <maximum>10</maximum>
-        <minimum>1</minimum>
-        <increment-step>1</increment-step>
-    </value-attributes>
-    <depends-on>
-        <property>
-            <name>yarn.scheduler.capacity.root.queues</name>
-            <type>capacity-scheduler</type>
-        </property>
-        <property>
-            <name>hive.llap.daemon.queue.name</name>
-            <type>hive-interactive-site</type>
-        </property>
-        <property>
-            <name>llap_queue_capacity</name>
-            <type>hive-interactive-env</type>
-        </property>
-        <property>
-            <name>enable_hive_interactive</name>
-            <type>hive-interactive-env</type>
-        </property>
-    </depends-on>
-    <property_depended_by>
-        <dependedByProperties>
-            <name>hive.llap.daemon.num.executors</name>
-            <type>hive-interactive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>llap_heap_size</name>
-            <type>hive-interactive-env</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>tez.am.resource.memory.mb</name>
-            <type>tez-interactive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.llap.daemon.yarn.container.mb</name>
-            <type>hive-interactive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.llap.io.memory.size</name>
-            <type>hive-interactive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>num_llap_nodes</name>
-            <type>hive-interactive-env</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>slider_am_container_mb</name>
-            <type>hive-interactive-env</type>
-        </dependedByProperties>
-    </property_depended_by>
-</property><property require-input="false">
-    <name>hive.metastore.uris</name>
-    <value></value>
-    <description>Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore.</description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.server2.enable.doAs</name>
-    <value>false</value>
-    <description>
-      Setting this property to true will have HiveServer2 execute
-      Hive operations as the user making the calls to it.
-    </description>
-    <display-name>Run as end user instead of Hive user</display-name>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>value-list</type>
-        <entries>
-            <entry>
-                <value>true</value>
-                <label>True</label>
-            </entry>
-            <entry>
-                <value>false</value>
-                <label>False</label>
-            </entry>
-        </entries>
-        <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on>
-        <property>
-            <name>hive_security_authorization</name>
-            <type>hive-env</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.prewarm.enabled</name>
-    <value>false</value>
-    <description>Enables container prewarm for Tez (Hadoop 2 only)</description>
-    <display-name>Hold Containers to Reduce Latency</display-name>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>value-list</type>
-        <entries>
-            <entry>
-                <value>true</value>
-                <label>True</label>
-            </entry>
-            <entry>
-                <value>false</value>
-                <label>False</label>
-            </entry>
-        </entries>
-        <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.vectorized.execution.reduce.enabled</name>
-    <value>true</value>
-    <description>
-      This flag should be set to true to enable vectorized mode of the reduce-side of
-      query execution.
-    </description>
-    <display-name>Enable Reduce Vectorization</display-name>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>value-list</type>
-        <entries>
-            <entry>
-                <value>true</value>
-                <label>True</label>
-            </entry>
-            <entry>
-                <value>false</value>
-                <label>False</label>
-            </entry>
-        </entries>
-        <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.server2.tez.default.queues</name>
-    <value>default</value>
-    <description>
-      A list of comma separated values corresponding to YARN queues of the same name.
-      When HiveServer2 is launched in Tez mode, this configuration needs to be set
-      for multiple Tez sessions to run in parallel on the cluster.
-    </description>
-    <display-name>Default query queues</display-name>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>combo</type>
-        <entries>
-            <entry>
-                <value>default</value>
-                <label>Default</label>
-            </entry>
-        </entries>
-        <selection-cardinality>1+</selection-cardinality>
-    </value-attributes>
-    <depends-on>
-        <property>
-            <name>yarn.scheduler.capacity.root.queues</name>
-            <type>capacity-scheduler</type>
-        </property>
-        <property>
-            <name>hive.llap.daemon.queue.name</name>
-            <type>hive-interactive-site</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.server2.tez.initialize.default.sessions</name>
-    <value>true</value>
-    <description>
-      This flag is used in HiveServer2 to enable a user to use HiveServer2 without
-      turning on Tez for HiveServer2. The user could potentially want to run queries
-      over Tez without the pool of sessions.
-    </description>
-    <display-name>Start Tez session at Initialization</display-name>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>value-list</type>
-        <entries>
-            <entry>
-                <value>true</value>
-                <label>True</label>
-            </entry>
-            <entry>
-                <value>false</value>
-                <label>False</label>
-            </entry>
-        </entries>
-        <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.driver.parallel.compilation</name>
-    <value>true</value>
-    <description>
-      This flag allows HiveServer2 to compile queries in parallel.
-    </description>
-    <display-name>Compile queries in parallel</display-name>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>value-list</type>
-        <entries>
-            <entry>
-                <value>true</value>
-                <label>True</label>
-            </entry>
-            <entry>
-                <value>false</value>
-                <label>False</label>
-            </entry>
-        </entries>
-        <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.server2.webui.port</name>
-    <value>10502</value>
-    <description>Web UI port address</description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.server2.webui.use.ssl</name>
-    <value>false</value>
-    <description>Enable SSL for HiveServer2 Interactive</description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.server2.zookeeper.namespace</name>
-    <value>hiveserver2-hive2</value>
-    <description>The parent node in ZooKeeper used by HiveServer2 when supporting dynamic service discovery.</description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.llap.daemon.queue.name</name>
-    <value>default</value>
-    <description>Choose the YARN queue in this cluster that is dedicated to interactive query.</description>
-    <display-name>Interactive Query Queue</display-name>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>combo</type>
-        <entries>
-            <entry>
-                <value>default</value>
-                <label>Default</label>
-            </entry>
-        </entries>
-        <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on>
-        <property>
-            <name>yarn.scheduler.capacity.root.queues</name>
-            <type>capacity-scheduler</type>
-        </property>
-        <property>
-            <name>enable_hive_interactive</name>
-            <type>hive-interactive-env</type>
-        </property>
-    </depends-on>
-    <property_depended_by>
-        <dependedByProperties>
-            <name>hive.server2.tez.default.queues</name>
-            <type>hive-interactive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.llap.daemon.num.executors</name>
-            <type>hive-interactive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>llap_queue_capacity</name>
-            <type>hive-interactive-env</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>llap_heap_size</name>
-            <type>hive-interactive-env</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>tez.am.resource.memory.mb</name>
-            <type>tez-interactive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.llap.daemon.yarn.container.mb</name>
-            <type>hive-interactive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.server2.tez.sessions.per.default.queue</name>
-            <type>hive-interactive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.llap.io.memory.size</name>
-            <type>hive-interactive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>num_llap_nodes</name>
-            <type>hive-interactive-env</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>slider_am_container_mb</name>
-            <type>hive-interactive-env</type>
-        </dependedByProperties>
-    </property_depended_by>
-</property><property require-input="false">
-    <name>hive.llap.daemon.yarn.shuffle.port</name>
-    <value>15551</value>
-    <description>YARN shuffle port for LLAP-daemon-hosted shuffle.</description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.execution.engine</name>
-    <value>tez</value>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.execution.mode</name>
-    <value>llap</value>
-    <description>Chooses whether query fragments will run in container or in llap</description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.llap.execution.mode</name>
-    <value>all</value>
-    <description>Chooses which fragments of a query will run in llap</description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.llap.io.enabled</name>
-    <value>true</value>
-    <description>Whether the LLAP IO layer is enabled.</description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on>
-        <property>
-            <name>hive.llap.io.memory.size</name>
-            <type>hive-interactive-site</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.llap.io.use.lrfu</name>
-    <value>true</value>
-    <description>Whether ORC low-level cache should use LRFU cache policy instead of default (FIFO).</description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.llap.auto.allow.uber</name>
-    <value>false</value>
-    <description>Whether or not to allow the planner to run vertices in the AM.</description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.llap.object.cache.enabled</name>
-    <value>true</value>
-    <description>Cache objects (plans, hashtables, etc) in llap</description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.tez.input.generate.consistent.splits</name>
-    <value>true</value>
-    <description>Whether to generate consistent split locations when generating splits in the AM</description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.llap.client.consistent.splits</name>
-    <value>true</value>
-    <description>
-      Whether to setup split locations to match nodes on which llap daemons are running,
-      instead of using the locations provided by the split itself.
-    </description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.llap.task.scheduler.locality.delay</name>
-    <value>-1</value>
-    <description>
-      Amount of time to wait before allocating a request which contains location information,
-      to a location other than the ones requested. Set to -1 for an infinite delay, 0
-      for no delay.
-    </description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.exec.orc.split.strategy</name>
-    <value>HYBRID</value>
-    <description>
-      This is not a user level config. BI strategy is used when the requirement is to spend less time in split generation
-      as opposed to query execution (split generation does not read or cache file footers).
-      ETL strategy is used when spending little more time in split generation is acceptable
-      (split generation reads and caches file footers). HYBRID chooses between the above strategies
-      based on heuristics.
-    </description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.llap.daemon.service.hosts</name>
-    <value>@llap0</value>
-    <description>
-      Explicitly specified hosts to use for LLAP scheduling. Useful for testing. By default,
-      YARN registry is used.
-    </description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.llap.daemon.allow.permanent.fns</name>
-    <value>false</value>
-    <description>Whether LLAP daemon should localize the resources for permanent UDFs.</description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.llap.io.memory.size</name>
-    <value>0</value>
-    <description>The amount of memory reserved for Hive's optimized in-memory cache.</description>
-    <display-name>In-Memory Cache per Daemon</display-name>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>int</type>
-        <unit>MB</unit>
-        <overridable>false</overridable>
-    </value-attributes>
-    <depends-on>
-        <property>
-            <name>tez.am.resource.memory.mb</name>
-            <type>tez-site</type>
-        </property>
-        <property>
-            <name>yarn.scheduler.capacity.root.queues</name>
-            <type>capacity-scheduler</type>
-        </property>
-        <property>
-            <name>hive.llap.daemon.queue.name</name>
-            <type>hive-interactive-site</type>
-        </property>
-        <property>
-            <name>llap_queue_capacity</name>
-            <type>hive-interactive-env</type>
-        </property>
-        <property>
-            <name>enable_hive_interactive</name>
-            <type>hive-interactive-env</type>
-        </property>
-        <property>
-            <name>hive.server2.tez.sessions.per.default.queue</name>
-            <type>hive-interactive-site</type>
-        </property>
-    </depends-on>
-    <property_depended_by>
-        <dependedByProperties>
-            <name>hive.llap.io.enabled</name>
-            <type>hive-interactive-site</type>
-        </dependedByProperties>
-    </property_depended_by>
-</property><property require-input="false">
-    <name>hive.llap.daemon.num.executors</name>
-    <value>1</value>
-    <description>The maximum number of CPUs a single LLAP daemon will use. Usually this should be equal to the number of available CPUs.</description>
-    <display-name>Maximum CPUs per Daemon</display-name>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>int</type>
-    </value-attributes>
-    <depends-on>
-        <property>
-            <name>tez.am.resource.memory.mb</name>
-            <type>tez-site</type>
-        </property>
-        <property>
-            <name>yarn.scheduler.capacity.root.queues</name>
-            <type>capacity-scheduler</type>
-        </property>
-        <property>
-            <name>hive.llap.daemon.queue.name</name>
-            <type>hive-interactive-site</type>
-        </property>
-        <property>
-            <name>llap_queue_capacity</name>
-            <type>hive-interactive-env</type>
-        </property>
-        <property>
-            <name>enable_hive_interactive</name>
-            <type>hive-interactive-env</type>
-        </property>
-        <property>
-            <name>hive.server2.tez.sessions.per.default.queue</name>
-            <type>hive-interactive-site</type>
-        </property>
-    </depends-on>
-    <property_depended_by>
-        <dependedByProperties>
-            <name>hive.llap.io.threadpool.size</name>
-            <type>hive-interactive-site</type>
-        </dependedByProperties>
-    </property_depended_by>
-</property><property require-input="false">
-    <name>hive.llap.daemon.vcpus.per.instance</name>
-    <value>${hive.llap.daemon.num.executors}</value>
-    <description>The total number of vcpus to use for the executors inside LLAP.</description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.llap.daemon.yarn.container.mb</name>
-    <value>341</value>
-    <description>Total memory used by individual LLAP daemons. This includes memory for the cache as well as for the query execution.</description>
-    <display-name>Memory per daemon</display-name>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>int</type>
-        <unit>MB</unit>
-        <overridable>false</overridable>
-    </value-attributes>
-    <depends-on>
-        <property>
-            <name>yarn.scheduler.capacity.root.queues</name>
-            <type>capacity-scheduler</type>
-        </property>
-        <property>
-            <name>hive.llap.daemon.queue.name</name>
-            <type>hive-interactive-site</type>
-        </property>
-        <property>
-            <name>llap_queue_capacity</name>
-            <type>hive-interactive-env</type>
-        </property>
-        <property>
-            <name>enable_hive_interactive</name>
-            <type>hive-interactive-env</type>
-        </property>
-        <property>
-            <name>hive.server2.tez.sessions.per.default.queue</name>
-            <type>hive-interactive-site</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>llap.shuffle.connection-keep-alive.enable</name>
-    <value>true</value>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>llap.shuffle.connection-keep-alive.timeout</name>
-    <value>60</value>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.llap.io.threadpool.size</name>
-    <value>2</value>
-    <description>Specify the number of threads to use for low-level IO thread pool.</description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on>
-        <property>
-            <name>hive.llap.daemon.num.executors</name>
-            <type>hive-interactive-site</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.llap.daemon.rpc.port</name>
-    <value>15001</value>
-    <description>The LLAP daemon RPC port.</description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.llap.management.rpc.port</name>
-    <value>15004</value>
-    <description>RPC port for LLAP daemon management service.</description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.llap.daemon.task.scheduler.enable.preemption</name>
-    <value>true</value>
-    <description>hive.llap.daemon.task.scheduler.enable.preemption</description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.tez.exec.print.summary</name>
-    <value>true</value>
-    <description>Display breakdown of execution steps, for every query executed by the shell.</description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.vectorized.execution.mapjoin.native.enabled</name>
-    <value>true</value>
-    <description>
-      This flag should be set to true to enable native (i.e. non-pass through) vectorization
-      of queries using MapJoin.
-    </description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.vectorized.execution.mapjoin.minmax.enabled</name>
-    <value>true</value>
-    <description>
-      This flag should be set to true to enable vector map join hash tables to
-      use max / max filtering for integer join queries using MapJoin.
-    </description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.vectorized.execution.mapjoin.native.fast.hashtable.enabled</name>
-    <value>true</value>
-    <description>
-      This flag should be set to true to enable use of native fast vector map join hash tables in
-      queries using MapJoin.
-    </description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.mapjoin.hybridgrace.hashtable</name>
-    <value>false</value>
-    <description>Whether to use hybrid grace hash join as the join method for mapjoin. Tez only.</description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.tez.bucket.pruning</name>
-    <value>true</value>
-    <description>
-      When pruning is enabled, filters on bucket columns will be processed by
-      filtering the splits against a bitset of included buckets. This needs predicates
-      produced by hive.optimize.ppd and hive.optimize.index.filters.
-    </description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.optimize.dynamic.partition.hashjoin</name>
-    <value>true</value>
-    <description>
-      Whether to enable dynamically partitioned hash join optimization.
-      This setting is also dependent on enabling hive.auto.convert.join
-    </description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.llap.zk.sm.connectionString</name>
-    <value>localhost:2181</value>
-    <description>ZooKeeper connection string for ZooKeeper SecretManager.</description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on>
-        <property>
-            <name>clientPort</name>
-            <type>zoo.cfg</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.llap.io.memory.mode</name>
-    <value></value>
-    <description>
-      LLAP IO memory usage; 'cache' (the default) uses data and metadata cache with a
-      custom off-heap allocator, 'allocator' uses the custom allocator without the caches,
-      'none' doesn't use either (this mode may result in significant performance degradation)
-    </description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.metastore.event.listeners</name>
-    <value></value>
-    <description>
-      Listeners for metastore events
-    </description>
-    <filename>hive-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b6475d77/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-log4j2.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-log4j2.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-log4j2.xml
deleted file mode 100755
index 798063b..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-log4j2.xml
+++ /dev/null
@@ -1,90 +0,0 @@
-<configuration><property require-input="false">
-    <name>content</name>
-    <value>
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-status = INFO
-name = HiveLog4j2
-packages = org.apache.hadoop.hive.ql.log
-
-# list of properties
-property.hive.log.level = INFO
-property.hive.root.logger = DRFA
-property.hive.log.dir = ${sys:java.io.tmpdir}/${sys:user.name}
-property.hive.log.file = hive.log
-
-# list of all appenders
-appenders = console, DRFA
-
-# console appender
-appender.console.type = Console
-appender.console.name = console
-appender.console.target = SYSTEM_ERR
-appender.console.layout.type = PatternLayout
-appender.console.layout.pattern = %d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n
-
-# daily rolling file appender
-appender.DRFA.type = RollingFile
-appender.DRFA.name = DRFA
-appender.DRFA.fileName = ${sys:hive.log.dir}/${sys:hive.log.file}
-# Use %pid in the filePattern to append process-id@host-name to the filename if you want separate log files for different CLI session
-appender.DRFA.filePattern = ${sys:hive.log.dir}/${sys:hive.log.file}.%d{yyyy-MM-dd}.gz
-appender.DRFA.layout.type = PatternLayout
-appender.DRFA.layout.pattern = %d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n
-appender.DRFA.policies.type = Policies
-appender.DRFA.policies.time.type = TimeBasedTriggeringPolicy
-appender.DRFA.policies.time.interval = 1
-appender.DRFA.policies.time.modulate = true
-appender.DRFA.strategy.type = DefaultRolloverStrategy
-appender.DRFA.strategy.max = 30
-
-# list of all loggers
-loggers = NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX
-
-logger.NIOServerCnxn.name = org.apache.zookeeper.server.NIOServerCnxn
-logger.NIOServerCnxn.level = WARN
-
-logger.ClientCnxnSocketNIO.name = org.apache.zookeeper.ClientCnxnSocketNIO
-logger.ClientCnxnSocketNIO.level = WARN
-
-logger.DataNucleus.name = DataNucleus
-logger.DataNucleus.level = ERROR
-
-logger.Datastore.name = Datastore
-logger.Datastore.level = ERROR
-
-logger.JPOX.name = JPOX
-logger.JPOX.level = ERROR
-
-# root logger
-rootLogger.level = ${sys:hive.log.level}
-rootLogger.appenderRefs = root
-rootLogger.appenderRef.root.ref = ${sys:hive.root.logger}
-  </value>
-    <description>Custom hive-log4j2.properties</description>
-    <display-name>hive-log4j2 template</display-name>
-    <filename>hive-log4j2.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>content</type>
-        <show-property-name>false</show-property-name>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b6475d77/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hiveserver2-interactive-site.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hiveserver2-interactive-site.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hiveserver2-interactive-site.xml
deleted file mode 100755
index 6954e56..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hiveserver2-interactive-site.xml
+++ /dev/null
@@ -1,56 +0,0 @@
-<configuration><property require-input="false">
-    <name>hive.metastore.metrics.enabled</name>
-    <value>true</value>
-    <filename>hiveserver2-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>boolean</type>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.service.metrics.reporter</name>
-    <value>JSON_FILE, JMX, HADOOP2</value>
-    <filename>hiveserver2-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.service.metrics.hadoop2.component</name>
-    <value>hiveserver2</value>
-    <filename>hiveserver2-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.service.metrics.file.location</name>
-    <value>/var/log/hive/hiveserver2Interactive-report.json</value>
-    <filename>hiveserver2-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.async.log.enabled</name>
-    <value>false</value>
-    <description>Whether to enable Log4j2's asynchronous logging. Asynchronous logging can give significant performance improvement as logging will be handled in separate thread that uses LMAX disruptor queue for buffering log messages. Refer https://logging.apache.org/log4j/2.x/manual/async.html for benefits and drawbacks.</description>
-    <filename>hiveserver2-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>boolean</type>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b6475d77/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/llap-cli-log4j2.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/llap-cli-log4j2.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/llap-cli-log4j2.xml
deleted file mode 100755
index b7f6523..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/llap-cli-log4j2.xml
+++ /dev/null
@@ -1,91 +0,0 @@
-<configuration><property require-input="false">
-    <name>content</name>
-    <value>
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-status = WARN
-name = LlapCliLog4j2
-packages = org.apache.hadoop.hive.ql.log
-
-# list of properties
-property.hive.log.level = INFO
-property.hive.root.logger = console
-property.hive.log.dir = ${sys:java.io.tmpdir}/${sys:user.name}
-property.hive.log.file = llap-cli.log
-
-# list of all appenders
-appenders = console, DRFA
-
-# console appender
-appender.console.type = Console
-appender.console.name = console
-appender.console.target = SYSTEM_ERR
-appender.console.layout.type = PatternLayout
-appender.console.layout.pattern = %p %c{2}: %m%n
-
-# daily rolling file appender
-appender.DRFA.type = RollingRandomAccessFile
-appender.DRFA.name = DRFA
-appender.DRFA.fileName = ${sys:hive.log.dir}/${sys:hive.log.file}
-# Use %pid in the filePattern to append process-id@host-name to the filename if you want separate log files for different CLI session
-appender.DRFA.filePattern = ${sys:hive.log.dir}/${sys:hive.log.file}.%d{yyyy-MM-dd}
-appender.DRFA.layout.type = PatternLayout
-appender.DRFA.layout.pattern = %d{ISO8601} %5p [%t] %c{2}: %m%n
-appender.DRFA.policies.type = Policies
-appender.DRFA.policies.time.type = TimeBasedTriggeringPolicy
-appender.DRFA.policies.time.interval = 1
-appender.DRFA.policies.time.modulate = true
-appender.DRFA.strategy.type = DefaultRolloverStrategy
-appender.DRFA.strategy.max = 30
-
-# list of all loggers
-loggers = ZooKeeper, DataNucleus, Datastore, JPOX, HadoopConf
-
-logger.ZooKeeper.name = org.apache.zookeeper
-logger.ZooKeeper.level = WARN
-
-logger.DataNucleus.name = DataNucleus
-logger.DataNucleus.level = ERROR
-
-logger.Datastore.name = Datastore
-logger.Datastore.level = ERROR
-
-logger.JPOX.name = JPOX
-logger.JPOX.level = ERROR
-
-logger.HadoopConf.name = org.apache.hadoop.conf.Configuration
-logger.HadoopConf.level = ERROR
-
-# root logger
-rootLogger.level = ${sys:hive.log.level}
-rootLogger.appenderRefs = root, DRFA
-rootLogger.appenderRef.root.ref = ${sys:hive.root.logger}
-rootLogger.appenderRef.DRFA.ref = DRFA
-  </value>
-    <description>Custom llap-cli-log4j2.properties</description>
-    <display-name>llap-cli-log4j2 template</display-name>
-    <filename>llap-cli-log4j2.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>content</type>
-        <show-property-name>false</show-property-name>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b6475d77/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/llap-daemon-log4j.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/llap-daemon-log4j.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/llap-daemon-log4j.xml
deleted file mode 100755
index 30c31be..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/llap-daemon-log4j.xml
+++ /dev/null
@@ -1,158 +0,0 @@
-<configuration><property require-input="false">
-    <name>content</name>
-    <value>
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# This is the log4j2 properties file used by llap-daemons. There's several loggers defined, which
-# can be selected while configuring LLAP.
-# Based on the one selected - UI links etc need to be manipulated in the system.
-# Note: Some names and logic is common to this file and llap LogHelpers. Make sure to change that
-# as well, if changing this file.
-
-status = INFO
-name = LlapDaemonLog4j2
-packages = org.apache.hadoop.hive.ql.log
-
-# list of properties
-property.llap.daemon.log.level = INFO
-property.llap.daemon.root.logger = console
-property.llap.daemon.log.dir = .
-property.llap.daemon.log.file = llapdaemon.log
-property.llap.daemon.historylog.file = llapdaemon_history.log
-property.llap.daemon.log.maxfilesize = 256MB
-property.llap.daemon.log.maxbackupindex = 240
-
-# list of all appenders
-appenders = console, RFA, HISTORYAPPENDER, query-routing
-
-# console appender
-appender.console.type = Console
-appender.console.name = console
-appender.console.target = SYSTEM_ERR
-appender.console.layout.type = PatternLayout
-appender.console.layout.pattern = %d{ISO8601} %5p [%t (%X{fragmentId})] %c{2}: %m%n
-
-# rolling file appender
-appender.RFA.type = RollingRandomAccessFile
-appender.RFA.name = RFA
-appender.RFA.fileName = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.log.file}
-appender.RFA.filePattern = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.log.file}_%d{yyyy-MM-dd-HH}_%i.done
-appender.RFA.layout.type = PatternLayout
-appender.RFA.layout.pattern = %d{ISO8601} %-5p [%t (%X{fragmentId})] %c: %m%n
-appender.RFA.policies.type = Policies
-appender.RFA.policies.time.type = TimeBasedTriggeringPolicy
-appender.RFA.policies.time.interval = 1
-appender.RFA.policies.time.modulate = true
-appender.RFA.policies.size.type = SizeBasedTriggeringPolicy
-appender.RFA.policies.size.size = ${sys:llap.daemon.log.maxfilesize}
-appender.RFA.strategy.type = DefaultRolloverStrategy
-appender.RFA.strategy.max = ${sys:llap.daemon.log.maxbackupindex}
-
-# history file appender
-appender.HISTORYAPPENDER.type = RollingRandomAccessFile
-appender.HISTORYAPPENDER.name = HISTORYAPPENDER
-appender.HISTORYAPPENDER.fileName = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.historylog.file}
-appender.HISTORYAPPENDER.filePattern = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.historylog.file}_%d{yyyy-MM-dd}_%i.done
-appender.HISTORYAPPENDER.layout.type = PatternLayout
-appender.HISTORYAPPENDER.layout.pattern = %m%n
-appender.HISTORYAPPENDER.policies.type = Policies
-appender.HISTORYAPPENDER.policies.size.type = SizeBasedTriggeringPolicy
-appender.HISTORYAPPENDER.policies.size.size = ${sys:llap.daemon.log.maxfilesize}
-appender.HISTORYAPPENDER.policies.time.type = TimeBasedTriggeringPolicy
-appender.HISTORYAPPENDER.policies.time.interval = 1
-appender.HISTORYAPPENDER.policies.time.modulate = true
-appender.HISTORYAPPENDER.strategy.type = DefaultRolloverStrategy
-appender.HISTORYAPPENDER.strategy.max = ${sys:llap.daemon.log.maxbackupindex}
-
-# queryId based routing file appender
-appender.query-routing.type = Routing
-appender.query-routing.name = query-routing
-appender.query-routing.routes.type = Routes
-appender.query-routing.routes.pattern = $${ctx:queryId}
-#Purge polciy for query-based Routing Appender
-appender.query-routing.purgePolicy.type = LlapRoutingAppenderPurgePolicy
-# Note: Do not change this name without changing the corresponding entry in LlapConstants
-appender.query-routing.purgePolicy.name = llapLogPurgerQueryRouting
-# default route
-appender.query-routing.routes.route-default.type = Route
-appender.query-routing.routes.route-default.key = $${ctx:queryId}
-appender.query-routing.routes.route-default.ref = RFA
-# queryId based route
-appender.query-routing.routes.route-mdc.type = Route
-appender.query-routing.routes.route-mdc.file-mdc.type = LlapWrappedAppender
-appender.query-routing.routes.route-mdc.file-mdc.name = IrrelevantName-query-routing
-appender.query-routing.routes.route-mdc.file-mdc.app.type = RandomAccessFile
-appender.query-routing.routes.route-mdc.file-mdc.app.name = file-mdc
-appender.query-routing.routes.route-mdc.file-mdc.app.fileName = ${sys:llap.daemon.log.dir}/${ctx:queryId}-${ctx:dagId}.log
-appender.query-routing.routes.route-mdc.file-mdc.app.layout.type = PatternLayout
-appender.query-routing.routes.route-mdc.file-mdc.app.layout.pattern = %d{ISO8601} %5p [%t (%X{fragmentId})] %c{2}: %m%n
-
-# list of all loggers
-loggers = NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX, HistoryLogger, LlapIoImpl, LlapIoOrc, LlapIoCache, LlapIoLocking
-
-logger.LlapIoImpl.name = LlapIoImpl
-logger.LlapIoImpl.level = INFO
-
-logger.LlapIoOrc.name = LlapIoOrc
-logger.LlapIoOrc.level = WARN
-
-logger.LlapIoCache.name = LlapIoCache
-logger.LlapIOCache.level = WARN
-
-logger.LlapIoLocking.name = LlapIoLocking
-logger.LlapIoLocking.level = WARN
-
-logger.NIOServerCnxn.name = org.apache.zookeeper.server.NIOServerCnxn
-logger.NIOServerCnxn.level = WARN
-
-logger.ClientCnxnSocketNIO.name = org.apache.zookeeper.ClientCnxnSocketNIO
-logger.ClientCnxnSocketNIO.level = WARN
-
-logger.DataNucleus.name = DataNucleus
-logger.DataNucleus.level = ERROR
-
-logger.Datastore.name = Datastore
-logger.Datastore.level = ERROR
-
-logger.JPOX.name = JPOX
-logger.JPOX.level = ERROR
-
-logger.HistoryLogger.name = org.apache.hadoop.hive.llap.daemon.HistoryLogger
-logger.HistoryLogger.level = INFO
-logger.HistoryLogger.additivity = false
-logger.HistoryLogger.appenderRefs = HistoryAppender
-logger.HistoryLogger.appenderRef.HistoryAppender.ref = HISTORYAPPENDER
-
-# root logger
-rootLogger.level = ${sys:llap.daemon.log.level}
-rootLogger.appenderRefs = root
-rootLogger.appenderRef.root.ref = ${sys:llap.daemon.root.logger}
-  </value>
-    <description>Custom llap-daemon-log4j2.properties</description>
-    <display-name>llap-deamon-log4j template</display-name>
-    <filename>llap-daemon-log4j.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>content</type>
-        <show-property-name>false</show-property-name>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b6475d77/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-audit.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-audit.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-audit.xml
deleted file mode 100755
index 9e74aa0..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-audit.xml
+++ /dev/null
@@ -1,136 +0,0 @@
-<configuration><property require-input="false">
-    <name>xasecure.audit.is.enabled</name>
-    <value>true</value>
-    <description>Is Audit enabled?</description>
-    <filename>ranger-hive-audit.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>xasecure.audit.destination.hdfs</name>
-    <value>true</value>
-    <description>Is Audit to HDFS enabled?</description>
-    <display-name>Audit to HDFS</display-name>
-    <filename>ranger-hive-audit.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>boolean</type>
-    </value-attributes>
-    <depends-on>
-        <property>
-            <name>xasecure.audit.destination.hdfs</name>
-            <type>ranger-env</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>xasecure.audit.destination.hdfs.dir</name>
-    <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
-    <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
-    <filename>ranger-hive-audit.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on>
-        <property>
-            <name>xasecure.audit.destination.hdfs.dir</name>
-            <type>ranger-env</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
-    <value>/var/log/hive/audit/hdfs/spool</value>
-    <description>/var/log/hive/audit/hdfs/spool</description>
-    <filename>ranger-hive-audit.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>xasecure.audit.destination.solr</name>
-    <value>false</value>
-    <description>Is Solr audit enabled?</description>
-    <display-name>Audit to SOLR</display-name>
-    <filename>ranger-hive-audit.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>boolean</type>
-    </value-attributes>
-    <depends-on>
-        <property>
-            <name>xasecure.audit.destination.solr</name>
-            <type>ranger-env</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>xasecure.audit.destination.solr.urls</name>
-    <value></value>
-    <description>Solr URL</description>
-    <filename>ranger-hive-audit.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <depends-on>
-        <property>
-            <name>ranger.audit.solr.urls</name>
-            <type>ranger-admin-site</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>xasecure.audit.destination.solr.zookeepers</name>
-    <value>NONE</value>
-    <description>Solr Zookeeper string</description>
-    <filename>ranger-hive-audit.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on>
-        <property>
-            <name>ranger.audit.solr.zookeepers</name>
-            <type>ranger-admin-site</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
-    <value>/var/log/hive/audit/solr/spool</value>
-    <description>/var/log/hive/audit/solr/spool</description>
-    <filename>ranger-hive-audit.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>xasecure.audit.provider.summary.enabled</name>
-    <value>false</value>
-    <description>Enable Summary audit?</description>
-    <display-name>Audit provider summary enabled</display-name>
-    <filename>ranger-hive-audit.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>boolean</type>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property></configuration>
\ No newline at end of file


[47/52] bigtop git commit: BIGTOP-1406. package Ambari in Bigtop

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.12.0.postgres.sql
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.12.0.postgres.sql b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.12.0.postgres.sql
deleted file mode 100755
index bc6486b..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.12.0.postgres.sql
+++ /dev/null
@@ -1,1406 +0,0 @@
---
--- PostgreSQL database dump
---
-
-SET statement_timeout = 0;
-SET client_encoding = 'UTF8';
-SET standard_conforming_strings = off;
-SET check_function_bodies = false;
-SET client_min_messages = warning;
-SET escape_string_warning = off;
-
-SET search_path = public, pg_catalog;
-
-SET default_tablespace = '';
-
-SET default_with_oids = false;
-
---
--- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "BUCKETING_COLS" (
-    "SD_ID" bigint NOT NULL,
-    "BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "CDS" (
-    "CD_ID" bigint NOT NULL
-);
-
-
---
--- Name: COLUMNS_OLD; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "COLUMNS_OLD" (
-    "SD_ID" bigint NOT NULL,
-    "COMMENT" character varying(256) DEFAULT NULL::character varying,
-    "COLUMN_NAME" character varying(128) NOT NULL,
-    "TYPE_NAME" character varying(4000) NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "COLUMNS_V2" (
-    "CD_ID" bigint NOT NULL,
-    "COMMENT" character varying(4000),
-    "COLUMN_NAME" character varying(128) NOT NULL,
-    "TYPE_NAME" character varying(4000),
-    "INTEGER_IDX" integer NOT NULL
-);
-
-
---
--- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "DATABASE_PARAMS" (
-    "DB_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(180) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "DBS" (
-    "DB_ID" bigint NOT NULL,
-    "DESC" character varying(4000) DEFAULT NULL::character varying,
-    "DB_LOCATION_URI" character varying(4000) NOT NULL,
-    "NAME" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "DB_PRIVS" (
-    "DB_GRANT_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "DB_ID" bigint,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "DB_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "GLOBAL_PRIVS" (
-    "USER_GRANT_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "USER_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "IDXS" (
-    "INDEX_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "DEFERRED_REBUILD" boolean NOT NULL,
-    "INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character varying,
-    "INDEX_NAME" character varying(128) DEFAULT NULL::character varying,
-    "INDEX_TBL_ID" bigint,
-    "LAST_ACCESS_TIME" bigint NOT NULL,
-    "ORIG_TBL_ID" bigint,
-    "SD_ID" bigint
-);
-
-
---
--- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "INDEX_PARAMS" (
-    "INDEX_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "NUCLEUS_TABLES" (
-    "CLASS_NAME" character varying(128) NOT NULL,
-    "TABLE_NAME" character varying(128) NOT NULL,
-    "TYPE" character varying(4) NOT NULL,
-    "OWNER" character varying(2) NOT NULL,
-    "VERSION" character varying(20) NOT NULL,
-    "INTERFACE_NAME" character varying(255) DEFAULT NULL::character varying
-);
-
-
---
--- Name: PARTITIONS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITIONS" (
-    "PART_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "LAST_ACCESS_TIME" bigint NOT NULL,
-    "PART_NAME" character varying(767) DEFAULT NULL::character varying,
-    "SD_ID" bigint,
-    "TBL_ID" bigint
-);
-
-
---
--- Name: PARTITION_EVENTS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITION_EVENTS" (
-    "PART_NAME_ID" bigint NOT NULL,
-    "DB_NAME" character varying(128),
-    "EVENT_TIME" bigint NOT NULL,
-    "EVENT_TYPE" integer NOT NULL,
-    "PARTITION_NAME" character varying(767),
-    "TBL_NAME" character varying(128)
-);
-
-
---
--- Name: PARTITION_KEYS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITION_KEYS" (
-    "TBL_ID" bigint NOT NULL,
-    "PKEY_COMMENT" character varying(4000) DEFAULT NULL::character varying,
-    "PKEY_NAME" character varying(128) NOT NULL,
-    "PKEY_TYPE" character varying(767) NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: PARTITION_KEY_VALS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITION_KEY_VALS" (
-    "PART_ID" bigint NOT NULL,
-    "PART_KEY_VAL" character varying(256) DEFAULT NULL::character varying,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: PARTITION_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITION_PARAMS" (
-    "PART_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: PART_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PART_COL_PRIVS" (
-    "PART_COLUMN_GRANT_ID" bigint NOT NULL,
-    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PART_ID" bigint,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PART_COL_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: PART_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PART_PRIVS" (
-    "PART_GRANT_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PART_ID" bigint,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PART_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: ROLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "ROLES" (
-    "ROLE_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
-    "ROLE_NAME" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: ROLE_MAP; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "ROLE_MAP" (
-    "ROLE_GRANT_ID" bigint NOT NULL,
-    "ADD_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "ROLE_ID" bigint
-);
-
-
---
--- Name: SDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SDS" (
-    "SD_ID" bigint NOT NULL,
-    "INPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
-    "IS_COMPRESSED" boolean NOT NULL,
-    "LOCATION" character varying(4000) DEFAULT NULL::character varying,
-    "NUM_BUCKETS" bigint NOT NULL,
-    "OUTPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
-    "SERDE_ID" bigint,
-    "CD_ID" bigint,
-    "IS_STOREDASSUBDIRECTORIES" boolean NOT NULL
-);
-
-
---
--- Name: SD_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SD_PARAMS" (
-    "SD_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: SEQUENCE_TABLE; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SEQUENCE_TABLE" (
-    "SEQUENCE_NAME" character varying(255) NOT NULL,
-    "NEXT_VAL" bigint NOT NULL
-);
-
-
---
--- Name: SERDES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SERDES" (
-    "SERDE_ID" bigint NOT NULL,
-    "NAME" character varying(128) DEFAULT NULL::character varying,
-    "SLIB" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: SERDE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SERDE_PARAMS" (
-    "SERDE_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: SORT_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SORT_COLS" (
-    "SD_ID" bigint NOT NULL,
-    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
-    "ORDER" bigint NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: TABLE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TABLE_PARAMS" (
-    "TBL_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: TBLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TBLS" (
-    "TBL_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "DB_ID" bigint,
-    "LAST_ACCESS_TIME" bigint NOT NULL,
-    "OWNER" character varying(767) DEFAULT NULL::character varying,
-    "RETENTION" bigint NOT NULL,
-    "SD_ID" bigint,
-    "TBL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "VIEW_EXPANDED_TEXT" text,
-    "VIEW_ORIGINAL_TEXT" text
-);
-
-
---
--- Name: TBL_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TBL_COL_PRIVS" (
-    "TBL_COLUMN_GRANT_ID" bigint NOT NULL,
-    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "TBL_COL_PRIV" character varying(128) DEFAULT NULL::character varying,
-    "TBL_ID" bigint
-);
-
-
---
--- Name: TBL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TBL_PRIVS" (
-    "TBL_GRANT_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "TBL_PRIV" character varying(128) DEFAULT NULL::character varying,
-    "TBL_ID" bigint
-);
-
-
---
--- Name: TYPES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TYPES" (
-    "TYPES_ID" bigint NOT NULL,
-    "TYPE_NAME" character varying(128) DEFAULT NULL::character varying,
-    "TYPE1" character varying(767) DEFAULT NULL::character varying,
-    "TYPE2" character varying(767) DEFAULT NULL::character varying
-);
-
-
---
--- Name: TYPE_FIELDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TYPE_FIELDS" (
-    "TYPE_NAME" bigint NOT NULL,
-    "COMMENT" character varying(256) DEFAULT NULL::character varying,
-    "FIELD_NAME" character varying(128) NOT NULL,
-    "FIELD_TYPE" character varying(767) NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-CREATE TABLE "SKEWED_STRING_LIST" (
-    "STRING_LIST_ID" bigint NOT NULL
-);
-
-CREATE TABLE "SKEWED_STRING_LIST_VALUES" (
-    "STRING_LIST_ID" bigint NOT NULL,
-    "STRING_LIST_VALUE" character varying(256) DEFAULT NULL::character varying,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-CREATE TABLE "SKEWED_COL_NAMES" (
-    "SD_ID" bigint NOT NULL,
-    "SKEWED_COL_NAME" character varying(256) DEFAULT NULL::character varying,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-CREATE TABLE "SKEWED_COL_VALUE_LOC_MAP" (
-    "SD_ID" bigint NOT NULL,
-    "STRING_LIST_ID_KID" bigint NOT NULL,
-    "LOCATION" character varying(4000) DEFAULT NULL::character varying
-);
-
-CREATE TABLE "SKEWED_VALUES" (
-    "SD_ID_OID" bigint NOT NULL,
-    "STRING_LIST_ID_EID" bigint NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: TAB_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE  "MASTER_KEYS"
-(
-    "KEY_ID" SERIAL,
-    "MASTER_KEY" varchar(767) NULL,
-    PRIMARY KEY ("KEY_ID")
-);
-
-CREATE TABLE  "DELEGATION_TOKENS"
-(
-    "TOKEN_IDENT" varchar(767) NOT NULL,
-    "TOKEN" varchar(767) NULL,
-    PRIMARY KEY ("TOKEN_IDENT")
-);
-
-CREATE TABLE "TAB_COL_STATS" (
- "CS_ID" bigint NOT NULL,
- "DB_NAME" character varying(128) DEFAULT NULL::character varying,
- "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
- "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
- "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
- "TBL_ID" bigint NOT NULL,
- "LONG_LOW_VALUE" bigint,
- "LONG_HIGH_VALUE" bigint,
- "DOUBLE_LOW_VALUE" double precision,
- "DOUBLE_HIGH_VALUE" double precision,
- "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
- "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
- "NUM_NULLS" bigint NOT NULL,
- "NUM_DISTINCTS" bigint,
- "AVG_COL_LEN" double precision,
- "MAX_COL_LEN" bigint,
- "NUM_TRUES" bigint,
- "NUM_FALSES" bigint,
- "LAST_ANALYZED" bigint NOT NULL
-);
-
---
--- Table structure for VERSION
---
-CREATE TABLE "VERSION" (
-  "VER_ID" bigint,
-  "SCHEMA_VERSION" character varying(127) NOT NULL,
-  "VERSION_COMMENT" character varying(255) NOT NULL
-);
-
---
--- Name: PART_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PART_COL_STATS" (
- "CS_ID" bigint NOT NULL,
- "DB_NAME" character varying(128) DEFAULT NULL::character varying,
- "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
- "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying,
- "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
- "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
- "PART_ID" bigint NOT NULL,
- "LONG_LOW_VALUE" bigint,
- "LONG_HIGH_VALUE" bigint,
- "DOUBLE_LOW_VALUE" double precision,
- "DOUBLE_HIGH_VALUE" double precision,
- "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
- "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
- "NUM_NULLS" bigint NOT NULL,
- "NUM_DISTINCTS" bigint,
- "AVG_COL_LEN" double precision,
- "MAX_COL_LEN" bigint,
- "NUM_TRUES" bigint,
- "NUM_FALSES" bigint,
- "LAST_ANALYZED" bigint NOT NULL
-);
-
---
--- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "BUCKETING_COLS"
-    ADD CONSTRAINT "BUCKETING_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-
---
--- Name: CDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "CDS"
-    ADD CONSTRAINT "CDS_pkey" PRIMARY KEY ("CD_ID");
-
-
---
--- Name: COLUMNS_V2_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "COLUMNS_V2"
-    ADD CONSTRAINT "COLUMNS_V2_pkey" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
-
-
---
--- Name: COLUMNS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "COLUMNS_OLD"
-    ADD CONSTRAINT "COLUMNS_pkey" PRIMARY KEY ("SD_ID", "COLUMN_NAME");
-
-
---
--- Name: DATABASE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DATABASE_PARAMS"
-    ADD CONSTRAINT "DATABASE_PARAMS_pkey" PRIMARY KEY ("DB_ID", "PARAM_KEY");
-
-
---
--- Name: DBPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DB_PRIVS"
-    ADD CONSTRAINT "DBPRIVILEGEINDEX" UNIQUE ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: DBS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DBS"
-    ADD CONSTRAINT "DBS_pkey" PRIMARY KEY ("DB_ID");
-
-
---
--- Name: DB_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DB_PRIVS"
-    ADD CONSTRAINT "DB_PRIVS_pkey" PRIMARY KEY ("DB_GRANT_ID");
-
-
---
--- Name: GLOBALPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "GLOBAL_PRIVS"
-    ADD CONSTRAINT "GLOBALPRIVILEGEINDEX" UNIQUE ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: GLOBAL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "GLOBAL_PRIVS"
-    ADD CONSTRAINT "GLOBAL_PRIVS_pkey" PRIMARY KEY ("USER_GRANT_ID");
-
-
---
--- Name: IDXS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "IDXS_pkey" PRIMARY KEY ("INDEX_ID");
-
-
---
--- Name: INDEX_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "INDEX_PARAMS"
-    ADD CONSTRAINT "INDEX_PARAMS_pkey" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
-
-
---
--- Name: NUCLEUS_TABLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "NUCLEUS_TABLES"
-    ADD CONSTRAINT "NUCLEUS_TABLES_pkey" PRIMARY KEY ("CLASS_NAME");
-
-
---
--- Name: PARTITIONS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITIONS"
-    ADD CONSTRAINT "PARTITIONS_pkey" PRIMARY KEY ("PART_ID");
-
-
---
--- Name: PARTITION_EVENTS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITION_EVENTS"
-    ADD CONSTRAINT "PARTITION_EVENTS_pkey" PRIMARY KEY ("PART_NAME_ID");
-
-
---
--- Name: PARTITION_KEYS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITION_KEYS"
-    ADD CONSTRAINT "PARTITION_KEYS_pkey" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
-
-
---
--- Name: PARTITION_KEY_VALS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITION_KEY_VALS"
-    ADD CONSTRAINT "PARTITION_KEY_VALS_pkey" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
-
-
---
--- Name: PARTITION_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITION_PARAMS"
-    ADD CONSTRAINT "PARTITION_PARAMS_pkey" PRIMARY KEY ("PART_ID", "PARAM_KEY");
-
-
---
--- Name: PART_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PART_COL_PRIVS"
-    ADD CONSTRAINT "PART_COL_PRIVS_pkey" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
-
-
---
--- Name: PART_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PART_PRIVS"
-    ADD CONSTRAINT "PART_PRIVS_pkey" PRIMARY KEY ("PART_GRANT_ID");
-
-
---
--- Name: ROLEENTITYINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "ROLES"
-    ADD CONSTRAINT "ROLEENTITYINDEX" UNIQUE ("ROLE_NAME");
-
-
---
--- Name: ROLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "ROLES"
-    ADD CONSTRAINT "ROLES_pkey" PRIMARY KEY ("ROLE_ID");
-
-
---
--- Name: ROLE_MAP_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "ROLE_MAP"
-    ADD CONSTRAINT "ROLE_MAP_pkey" PRIMARY KEY ("ROLE_GRANT_ID");
-
-
---
--- Name: SDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SDS"
-    ADD CONSTRAINT "SDS_pkey" PRIMARY KEY ("SD_ID");
-
-
---
--- Name: SD_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SD_PARAMS"
-    ADD CONSTRAINT "SD_PARAMS_pkey" PRIMARY KEY ("SD_ID", "PARAM_KEY");
-
-
---
--- Name: SEQUENCE_TABLE_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SEQUENCE_TABLE"
-    ADD CONSTRAINT "SEQUENCE_TABLE_pkey" PRIMARY KEY ("SEQUENCE_NAME");
-
-
---
--- Name: SERDES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SERDES"
-    ADD CONSTRAINT "SERDES_pkey" PRIMARY KEY ("SERDE_ID");
-
-
---
--- Name: SERDE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SERDE_PARAMS"
-    ADD CONSTRAINT "SERDE_PARAMS_pkey" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
-
-
---
--- Name: SORT_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SORT_COLS"
-    ADD CONSTRAINT "SORT_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-
---
--- Name: TABLE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TABLE_PARAMS"
-    ADD CONSTRAINT "TABLE_PARAMS_pkey" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
-
-
---
--- Name: TBLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TBLS"
-    ADD CONSTRAINT "TBLS_pkey" PRIMARY KEY ("TBL_ID");
-
-
---
--- Name: TBL_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TBL_COL_PRIVS"
-    ADD CONSTRAINT "TBL_COL_PRIVS_pkey" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
-
-
---
--- Name: TBL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TBL_PRIVS"
-    ADD CONSTRAINT "TBL_PRIVS_pkey" PRIMARY KEY ("TBL_GRANT_ID");
-
-
---
--- Name: TYPES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TYPES"
-    ADD CONSTRAINT "TYPES_pkey" PRIMARY KEY ("TYPES_ID");
-
-
---
--- Name: TYPE_FIELDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TYPE_FIELDS"
-    ADD CONSTRAINT "TYPE_FIELDS_pkey" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
-
-ALTER TABLE ONLY "SKEWED_STRING_LIST"
-    ADD CONSTRAINT "SKEWED_STRING_LIST_pkey" PRIMARY KEY ("STRING_LIST_ID");
-
-ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
-    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_pkey" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
-
-
-ALTER TABLE ONLY "SKEWED_COL_NAMES"
-    ADD CONSTRAINT "SKEWED_COL_NAMES_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
-    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_pkey" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
-
-ALTER TABLE ONLY "SKEWED_VALUES"
-    ADD CONSTRAINT "SKEWED_VALUES_pkey" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
-
---
--- Name: TAB_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_pkey" PRIMARY KEY("CS_ID");
-
---
--- Name: PART_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY KEY("CS_ID");
-
---
--- Name: UNIQUEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "UNIQUEINDEX" UNIQUE ("INDEX_NAME", "ORIG_TBL_ID");
-
-
---
--- Name: UNIQUEPARTITION; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITIONS"
-    ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("PART_NAME", "TBL_ID");
-
-
---
--- Name: UNIQUETABLE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TBLS"
-    ADD CONSTRAINT "UNIQUETABLE" UNIQUE ("TBL_NAME", "DB_ID");
-
-
---
--- Name: UNIQUE_DATABASE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DBS"
-    ADD CONSTRAINT "UNIQUE_DATABASE" UNIQUE ("NAME");
-
-
---
--- Name: UNIQUE_TYPE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TYPES"
-    ADD CONSTRAINT "UNIQUE_TYPE" UNIQUE ("TYPE_NAME");
-
-
---
--- Name: USERROLEMAPINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "ROLE_MAP"
-    ADD CONSTRAINT "USERROLEMAPINDEX" UNIQUE ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "BUCKETING_COLS_N49" ON "BUCKETING_COLS" USING btree ("SD_ID");
-
-
---
--- Name: COLUMNS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "COLUMNS_N49" ON "COLUMNS_OLD" USING btree ("SD_ID");
-
-
---
--- Name: DATABASE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "DATABASE_PARAMS_N49" ON "DATABASE_PARAMS" USING btree ("DB_ID");
-
-
---
--- Name: DB_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "DB_PRIVS_N49" ON "DB_PRIVS" USING btree ("DB_ID");
-
-
---
--- Name: IDXS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "IDXS_N49" ON "IDXS" USING btree ("ORIG_TBL_ID");
-
-
---
--- Name: IDXS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "IDXS_N50" ON "IDXS" USING btree ("INDEX_TBL_ID");
-
-
---
--- Name: IDXS_N51; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "IDXS_N51" ON "IDXS" USING btree ("SD_ID");
-
-
---
--- Name: INDEX_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "INDEX_PARAMS_N49" ON "INDEX_PARAMS" USING btree ("INDEX_ID");
-
-
---
--- Name: PARTITIONCOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITIONCOLUMNPRIVILEGEINDEX" ON "PART_COL_PRIVS" USING btree ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: PARTITIONEVENTINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITIONEVENTINDEX" ON "PARTITION_EVENTS" USING btree ("PARTITION_NAME");
-
-
---
--- Name: PARTITIONS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITIONS_N49" ON "PARTITIONS" USING btree ("TBL_ID");
-
-
---
--- Name: PARTITIONS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITIONS_N50" ON "PARTITIONS" USING btree ("SD_ID");
-
-
---
--- Name: PARTITION_KEYS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITION_KEYS_N49" ON "PARTITION_KEYS" USING btree ("TBL_ID");
-
-
---
--- Name: PARTITION_KEY_VALS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITION_KEY_VALS_N49" ON "PARTITION_KEY_VALS" USING btree ("PART_ID");
-
-
---
--- Name: PARTITION_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITION_PARAMS_N49" ON "PARTITION_PARAMS" USING btree ("PART_ID");
-
-
---
--- Name: PARTPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTPRIVILEGEINDEX" ON "PART_PRIVS" USING btree ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: PART_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PART_COL_PRIVS_N49" ON "PART_COL_PRIVS" USING btree ("PART_ID");
-
-
---
--- Name: PART_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID");
-
-
---
--- Name: ROLE_MAP_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "ROLE_MAP_N49" ON "ROLE_MAP" USING btree ("ROLE_ID");
-
-
---
--- Name: SDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "SDS_N49" ON "SDS" USING btree ("SERDE_ID");
-
-
---
--- Name: SD_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "SD_PARAMS_N49" ON "SD_PARAMS" USING btree ("SD_ID");
-
-
---
--- Name: SERDE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "SERDE_PARAMS_N49" ON "SERDE_PARAMS" USING btree ("SERDE_ID");
-
-
---
--- Name: SORT_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "SORT_COLS_N49" ON "SORT_COLS" USING btree ("SD_ID");
-
-
---
--- Name: TABLECOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TABLECOLUMNPRIVILEGEINDEX" ON "TBL_COL_PRIVS" USING btree ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: TABLEPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TABLEPRIVILEGEINDEX" ON "TBL_PRIVS" USING btree ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: TABLE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TABLE_PARAMS_N49" ON "TABLE_PARAMS" USING btree ("TBL_ID");
-
-
---
--- Name: TBLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TBLS_N49" ON "TBLS" USING btree ("DB_ID");
-
-
---
--- Name: TBLS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TBLS_N50" ON "TBLS" USING btree ("SD_ID");
-
-
---
--- Name: TBL_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TBL_COL_PRIVS_N49" ON "TBL_COL_PRIVS" USING btree ("TBL_ID");
-
-
---
--- Name: TBL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TBL_PRIVS_N49" ON "TBL_PRIVS" USING btree ("TBL_ID");
-
-
---
--- Name: TYPE_FIELDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TYPE_FIELDS_N49" ON "TYPE_FIELDS" USING btree ("TYPE_NAME");
-
---
--- Name: TAB_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TAB_COL_STATS_N49" ON "TAB_COL_STATS" USING btree ("TBL_ID");
-
---
--- Name: PART_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID");
-
-
-ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
-    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_fkey" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
-
-
-ALTER TABLE ONLY "SKEWED_COL_NAMES"
-    ADD CONSTRAINT "SKEWED_COL_NAMES_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
-ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
-    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey1" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
-    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
-
-ALTER TABLE ONLY "SKEWED_VALUES"
-    ADD CONSTRAINT "SKEWED_VALUES_fkey1" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
-
-ALTER TABLE ONLY "SKEWED_VALUES"
-    ADD CONSTRAINT "SKEWED_VALUES_fkey2" FOREIGN KEY ("SD_ID_OID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: BUCKETING_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "BUCKETING_COLS"
-    ADD CONSTRAINT "BUCKETING_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: COLUMNS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "COLUMNS_OLD"
-    ADD CONSTRAINT "COLUMNS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: COLUMNS_V2_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "COLUMNS_V2"
-    ADD CONSTRAINT "COLUMNS_V2_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
-
-
---
--- Name: DATABASE_PARAMS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "DATABASE_PARAMS"
-    ADD CONSTRAINT "DATABASE_PARAMS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
-
-
---
--- Name: DB_PRIVS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "DB_PRIVS"
-    ADD CONSTRAINT "DB_PRIVS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
-
-
---
--- Name: IDXS_INDEX_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "IDXS_INDEX_TBL_ID_fkey" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: IDXS_ORIG_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "IDXS_ORIG_TBL_ID_fkey" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: IDXS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "IDXS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: INDEX_PARAMS_INDEX_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "INDEX_PARAMS"
-    ADD CONSTRAINT "INDEX_PARAMS_INDEX_ID_fkey" FOREIGN KEY ("INDEX_ID") REFERENCES "IDXS"("INDEX_ID") DEFERRABLE;
-
-
---
--- Name: PARTITIONS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITIONS"
-    ADD CONSTRAINT "PARTITIONS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: PARTITIONS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITIONS"
-    ADD CONSTRAINT "PARTITIONS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: PARTITION_KEYS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITION_KEYS"
-    ADD CONSTRAINT "PARTITION_KEYS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: PARTITION_KEY_VALS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITION_KEY_VALS"
-    ADD CONSTRAINT "PARTITION_KEY_VALS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
---
--- Name: PARTITION_PARAMS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITION_PARAMS"
-    ADD CONSTRAINT "PARTITION_PARAMS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
---
--- Name: PART_COL_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PART_COL_PRIVS"
-    ADD CONSTRAINT "PART_COL_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
---
--- Name: PART_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PART_PRIVS"
-    ADD CONSTRAINT "PART_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
---
--- Name: ROLE_MAP_ROLE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "ROLE_MAP"
-    ADD CONSTRAINT "ROLE_MAP_ROLE_ID_fkey" FOREIGN KEY ("ROLE_ID") REFERENCES "ROLES"("ROLE_ID") DEFERRABLE;
-
-
---
--- Name: SDS_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SDS"
-    ADD CONSTRAINT "SDS_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
-
-
---
--- Name: SDS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SDS"
-    ADD CONSTRAINT "SDS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
-
-
---
--- Name: SD_PARAMS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SD_PARAMS"
-    ADD CONSTRAINT "SD_PARAMS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: SERDE_PARAMS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SERDE_PARAMS"
-    ADD CONSTRAINT "SERDE_PARAMS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
-
-
---
--- Name: SORT_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SORT_COLS"
-    ADD CONSTRAINT "SORT_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: TABLE_PARAMS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TABLE_PARAMS"
-    ADD CONSTRAINT "TABLE_PARAMS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: TBLS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TBLS"
-    ADD CONSTRAINT "TBLS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
-
-
---
--- Name: TBLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TBLS"
-    ADD CONSTRAINT "TBLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: TBL_COL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TBL_COL_PRIVS"
-    ADD CONSTRAINT "TBL_COL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: TBL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TBL_PRIVS"
-    ADD CONSTRAINT "TBL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: TYPE_FIELDS_TYPE_NAME_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TYPE_FIELDS"
-    ADD CONSTRAINT "TYPE_FIELDS_TYPE_NAME_fkey" FOREIGN KEY ("TYPE_NAME") REFERENCES "TYPES"("TYPES_ID") DEFERRABLE;
-
---
--- Name: TAB_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_fkey" FOREIGN KEY("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: PART_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
-ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID");
-
---
--- Name: public; Type: ACL; Schema: -; Owner: hiveuser
---
-
-REVOKE ALL ON SCHEMA public FROM PUBLIC;
-GRANT ALL ON SCHEMA public TO PUBLIC;
-
-
-INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '0.12.0', 'Hive release version 0.12.0');
---
--- PostgreSQL database dump complete
---
-

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.13.0.mysql.sql
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.13.0.mysql.sql b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.13.0.mysql.sql
deleted file mode 100755
index 89ce15d..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.13.0.mysql.sql
+++ /dev/null
@@ -1,889 +0,0 @@
--- MySQL dump 10.13  Distrib 5.5.25, for osx10.6 (i386)
---
--- Host: localhost    Database: test
--- ------------------------------------------------------
--- Server version	5.5.25
-
-/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
-/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
-/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
-/*!40101 SET NAMES utf8 */;
-/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
-/*!40103 SET TIME_ZONE='+00:00' */;
-/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
-/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
-/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
-/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
-
---
--- Table structure for table `BUCKETING_COLS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` (
-  `SD_ID` bigint(20) NOT NULL,
-  `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
-  KEY `BUCKETING_COLS_N49` (`SD_ID`),
-  CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `CDS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `CDS` (
-  `CD_ID` bigint(20) NOT NULL,
-  PRIMARY KEY (`CD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `COLUMNS_V2`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
-  `CD_ID` bigint(20) NOT NULL,
-  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `TYPE_NAME` varchar(4000) DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
-  KEY `COLUMNS_V2_N49` (`CD_ID`),
-  CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `DATABASE_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` (
-  `DB_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`DB_ID`,`PARAM_KEY`),
-  KEY `DATABASE_PARAMS_N49` (`DB_ID`),
-  CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `DBS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `DBS` (
-  `DB_ID` bigint(20) NOT NULL,
-  `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `OWNER_TYPE` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`DB_ID`),
-  UNIQUE KEY `UNIQUE_DATABASE` (`NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `DB_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
-  `DB_GRANT_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `DB_ID` bigint(20) DEFAULT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `DB_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`DB_GRANT_ID`),
-  UNIQUE KEY `DBPRIVILEGEINDEX` (`DB_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DB_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  KEY `DB_PRIVS_N49` (`DB_ID`),
-  CONSTRAINT `DB_PRIVS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `GLOBAL_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` (
-  `USER_GRANT_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `USER_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`USER_GRANT_ID`),
-  UNIQUE KEY `GLOBALPRIVILEGEINDEX` (`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`USER_PRIV`,`GRANTOR`,`GRANTOR_TYPE`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `IDXS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `IDXS` (
-  `INDEX_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `DEFERRED_REBUILD` bit(1) NOT NULL,
-  `INDEX_HANDLER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INDEX_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INDEX_TBL_ID` bigint(20) DEFAULT NULL,
-  `LAST_ACCESS_TIME` int(11) NOT NULL,
-  `ORIG_TBL_ID` bigint(20) DEFAULT NULL,
-  `SD_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`INDEX_ID`),
-  UNIQUE KEY `UNIQUEINDEX` (`INDEX_NAME`,`ORIG_TBL_ID`),
-  KEY `IDXS_N51` (`SD_ID`),
-  KEY `IDXS_N50` (`INDEX_TBL_ID`),
-  KEY `IDXS_N49` (`ORIG_TBL_ID`),
-  CONSTRAINT `IDXS_FK1` FOREIGN KEY (`ORIG_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
-  CONSTRAINT `IDXS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
-  CONSTRAINT `IDXS_FK3` FOREIGN KEY (`INDEX_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `INDEX_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `INDEX_PARAMS` (
-  `INDEX_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`),
-  KEY `INDEX_PARAMS_N49` (`INDEX_ID`),
-  CONSTRAINT `INDEX_PARAMS_FK1` FOREIGN KEY (`INDEX_ID`) REFERENCES `IDXS` (`INDEX_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `NUCLEUS_TABLES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `NUCLEUS_TABLES` (
-  `CLASS_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `TYPE` varchar(4) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `OWNER` varchar(2) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `VERSION` varchar(20) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `INTERFACE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`CLASS_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITIONS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITIONS` (
-  `PART_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `LAST_ACCESS_TIME` int(11) NOT NULL,
-  `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `SD_ID` bigint(20) DEFAULT NULL,
-  `TBL_ID` bigint(20) DEFAULT NULL,
-  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`PART_ID`),
-  UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
-  KEY `PARTITIONS_N49` (`TBL_ID`),
-  KEY `PARTITIONS_N50` (`SD_ID`),
-  KEY `PARTITIONS_N51` (`LINK_TARGET_ID`),
-  CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
-  CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
-  CONSTRAINT `PARTITIONS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITION_EVENTS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` (
-  `PART_NAME_ID` bigint(20) NOT NULL,
-  `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `EVENT_TIME` bigint(20) NOT NULL,
-  `EVENT_TYPE` int(11) NOT NULL,
-  `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`PART_NAME_ID`),
-  KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITION_KEYS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` (
-  `TBL_ID` bigint(20) NOT NULL,
-  `PKEY_COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PKEY_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PKEY_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`TBL_ID`,`PKEY_NAME`),
-  KEY `PARTITION_KEYS_N49` (`TBL_ID`),
-  CONSTRAINT `PARTITION_KEYS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITION_KEY_VALS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` (
-  `PART_ID` bigint(20) NOT NULL,
-  `PART_KEY_VAL` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`PART_ID`,`INTEGER_IDX`),
-  KEY `PARTITION_KEY_VALS_N49` (`PART_ID`),
-  CONSTRAINT `PARTITION_KEY_VALS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITION_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` (
-  `PART_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`PART_ID`,`PARAM_KEY`),
-  KEY `PARTITION_PARAMS_N49` (`PART_ID`),
-  CONSTRAINT `PARTITION_PARAMS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PART_COL_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` (
-  `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL,
-  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PART_ID` bigint(20) DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PART_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`PART_COLUMN_GRANT_ID`),
-  KEY `PART_COL_PRIVS_N49` (`PART_ID`),
-  KEY `PARTITIONCOLUMNPRIVILEGEINDEX` (`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  CONSTRAINT `PART_COL_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PART_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PART_PRIVS` (
-  `PART_GRANT_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PART_ID` bigint(20) DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PART_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`PART_GRANT_ID`),
-  KEY `PARTPRIVILEGEINDEX` (`PART_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  KEY `PART_PRIVS_N49` (`PART_ID`),
-  CONSTRAINT `PART_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `ROLES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `ROLES` (
-  `ROLE_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `ROLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`ROLE_ID`),
-  UNIQUE KEY `ROLEENTITYINDEX` (`ROLE_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `ROLE_MAP`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `ROLE_MAP` (
-  `ROLE_GRANT_ID` bigint(20) NOT NULL,
-  `ADD_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `ROLE_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`ROLE_GRANT_ID`),
-  UNIQUE KEY `USERROLEMAPINDEX` (`PRINCIPAL_NAME`,`ROLE_ID`,`GRANTOR`,`GRANTOR_TYPE`),
-  KEY `ROLE_MAP_N49` (`ROLE_ID`),
-  CONSTRAINT `ROLE_MAP_FK1` FOREIGN KEY (`ROLE_ID`) REFERENCES `ROLES` (`ROLE_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SDS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SDS` (
-  `SD_ID` bigint(20) NOT NULL,
-  `CD_ID` bigint(20) DEFAULT NULL,
-  `INPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `IS_COMPRESSED` bit(1) NOT NULL,
-  `IS_STOREDASSUBDIRECTORIES` bit(1) NOT NULL,
-  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `NUM_BUCKETS` int(11) NOT NULL,
-  `OUTPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `SERDE_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`SD_ID`),
-  KEY `SDS_N49` (`SERDE_ID`),
-  KEY `SDS_N50` (`CD_ID`),
-  CONSTRAINT `SDS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
-  CONSTRAINT `SDS_FK2` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SD_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SD_PARAMS` (
-  `SD_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`SD_ID`,`PARAM_KEY`),
-  KEY `SD_PARAMS_N49` (`SD_ID`),
-  CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SEQUENCE_TABLE`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` (
-  `SEQUENCE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `NEXT_VAL` bigint(20) NOT NULL,
-  PRIMARY KEY (`SEQUENCE_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SERDES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SERDES` (
-  `SERDE_ID` bigint(20) NOT NULL,
-  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`SERDE_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SERDE_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` (
-  `SERDE_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`),
-  KEY `SERDE_PARAMS_N49` (`SERDE_ID`),
-  CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_COL_NAMES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` (
-  `SD_ID` bigint(20) NOT NULL,
-  `SKEWED_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
-  KEY `SKEWED_COL_NAMES_N49` (`SD_ID`),
-  CONSTRAINT `SKEWED_COL_NAMES_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_COL_VALUE_LOC_MAP`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` (
-  `SD_ID` bigint(20) NOT NULL,
-  `STRING_LIST_ID_KID` bigint(20) NOT NULL,
-  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`),
-  KEY `SKEWED_COL_VALUE_LOC_MAP_N49` (`STRING_LIST_ID_KID`),
-  KEY `SKEWED_COL_VALUE_LOC_MAP_N50` (`SD_ID`),
-  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK2` FOREIGN KEY (`STRING_LIST_ID_KID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
-  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_STRING_LIST`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` (
-  `STRING_LIST_ID` bigint(20) NOT NULL,
-  PRIMARY KEY (`STRING_LIST_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_STRING_LIST_VALUES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` (
-  `STRING_LIST_ID` bigint(20) NOT NULL,
-  `STRING_LIST_VALUE` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`),
-  KEY `SKEWED_STRING_LIST_VALUES_N49` (`STRING_LIST_ID`),
-  CONSTRAINT `SKEWED_STRING_LIST_VALUES_FK1` FOREIGN KEY (`STRING_LIST_ID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_VALUES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` (
-  `SD_ID_OID` bigint(20) NOT NULL,
-  `STRING_LIST_ID_EID` bigint(20) NOT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`),
-  KEY `SKEWED_VALUES_N50` (`SD_ID_OID`),
-  KEY `SKEWED_VALUES_N49` (`STRING_LIST_ID_EID`),
-  CONSTRAINT `SKEWED_VALUES_FK2` FOREIGN KEY (`STRING_LIST_ID_EID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
-  CONSTRAINT `SKEWED_VALUES_FK1` FOREIGN KEY (`SD_ID_OID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SORT_COLS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SORT_COLS` (
-  `SD_ID` bigint(20) NOT NULL,
-  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `ORDER` int(11) NOT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
-  KEY `SORT_COLS_N49` (`SD_ID`),
-  CONSTRAINT `SORT_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TABLE_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` (
-  `TBL_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`TBL_ID`,`PARAM_KEY`),
-  KEY `TABLE_PARAMS_N49` (`TBL_ID`),
-  CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TBLS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TBLS` (
-  `TBL_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `DB_ID` bigint(20) DEFAULT NULL,
-  `LAST_ACCESS_TIME` int(11) NOT NULL,
-  `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `RETENTION` int(11) NOT NULL,
-  `SD_ID` bigint(20) DEFAULT NULL,
-  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `VIEW_EXPANDED_TEXT` mediumtext,
-  `VIEW_ORIGINAL_TEXT` mediumtext,
-  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`TBL_ID`),
-  UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
-  KEY `TBLS_N50` (`SD_ID`),
-  KEY `TBLS_N49` (`DB_ID`),
-  KEY `TBLS_N51` (`LINK_TARGET_ID`),
-  CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
-  CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`),
-  CONSTRAINT `TBLS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TBL_COL_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` (
-  `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL,
-  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`TBL_COLUMN_GRANT_ID`),
-  KEY `TABLECOLUMNPRIVILEGEINDEX` (`TBL_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  KEY `TBL_COL_PRIVS_N49` (`TBL_ID`),
-  CONSTRAINT `TBL_COL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TBL_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TBL_PRIVS` (
-  `TBL_GRANT_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`TBL_GRANT_ID`),
-  KEY `TBL_PRIVS_N49` (`TBL_ID`),
-  KEY `TABLEPRIVILEGEINDEX` (`TBL_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  CONSTRAINT `TBL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TAB_COL_STATS`
---
-CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
- `CS_ID` bigint(20) NOT NULL,
- `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `TBL_ID` bigint(20) NOT NULL,
- `LONG_LOW_VALUE` bigint(20),
- `LONG_HIGH_VALUE` bigint(20),
- `DOUBLE_HIGH_VALUE` double(53,4),
- `DOUBLE_LOW_VALUE` double(53,4),
- `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
- `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
- `NUM_NULLS` bigint(20) NOT NULL,
- `NUM_DISTINCTS` bigint(20),
- `AVG_COL_LEN` double(53,4),
- `MAX_COL_LEN` bigint(20),
- `NUM_TRUES` bigint(20),
- `NUM_FALSES` bigint(20),
- `LAST_ANALYZED` bigint(20) NOT NULL,
-  PRIMARY KEY (`CS_ID`),
-  CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
---
--- Table structure for table `PART_COL_STATS`
---
-CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
- `CS_ID` bigint(20) NOT NULL,
- `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `PART_ID` bigint(20) NOT NULL,
- `LONG_LOW_VALUE` bigint(20),
- `LONG_HIGH_VALUE` bigint(20),
- `DOUBLE_HIGH_VALUE` double(53,4),
- `DOUBLE_LOW_VALUE` double(53,4),
- `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
- `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
- `NUM_NULLS` bigint(20) NOT NULL,
- `NUM_DISTINCTS` bigint(20),
- `AVG_COL_LEN` double(53,4),
- `MAX_COL_LEN` bigint(20),
- `NUM_TRUES` bigint(20),
- `NUM_FALSES` bigint(20),
- `LAST_ANALYZED` bigint(20) NOT NULL,
-  PRIMARY KEY (`CS_ID`),
-  CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
---
--- Table structure for table `TYPES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TYPES` (
-  `TYPES_ID` bigint(20) NOT NULL,
-  `TYPE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TYPE1` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TYPE2` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`TYPES_ID`),
-  UNIQUE KEY `UNIQUE_TYPE` (`TYPE_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TYPE_FIELDS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TYPE_FIELDS` (
-  `TYPE_NAME` bigint(20) NOT NULL,
-  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `FIELD_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `FIELD_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`TYPE_NAME`,`FIELD_NAME`),
-  KEY `TYPE_FIELDS_N49` (`TYPE_NAME`),
-  CONSTRAINT `TYPE_FIELDS_FK1` FOREIGN KEY (`TYPE_NAME`) REFERENCES `TYPES` (`TYPES_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
--- Table `MASTER_KEYS` for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
-CREATE TABLE IF NOT EXISTS `MASTER_KEYS` 
-(
-    `KEY_ID` INTEGER NOT NULL AUTO_INCREMENT,
-    `MASTER_KEY` VARCHAR(767) BINARY NULL,
-    PRIMARY KEY (`KEY_ID`)
-) ENGINE=INNODB DEFAULT CHARSET=latin1;
-
--- Table `DELEGATION_TOKENS` for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
-CREATE TABLE IF NOT EXISTS `DELEGATION_TOKENS`
-(
-    `TOKEN_IDENT` VARCHAR(767) BINARY NOT NULL,
-    `TOKEN` VARCHAR(767) BINARY NULL,
-    PRIMARY KEY (`TOKEN_IDENT`)
-) ENGINE=INNODB DEFAULT CHARSET=latin1;
-
---
--- Table structure for VERSION
---
-CREATE TABLE IF NOT EXISTS `VERSION` (
-  `VER_ID` BIGINT NOT NULL,
-  `SCHEMA_VERSION` VARCHAR(127) NOT NULL,
-  `VERSION_COMMENT` VARCHAR(255),
-  PRIMARY KEY (`VER_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
---
--- Table structure for table FUNCS
---
-CREATE TABLE IF NOT EXISTS `FUNCS` (
-  `FUNC_ID` BIGINT(20) NOT NULL,
-  `CLASS_NAME` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin,
-  `CREATE_TIME` INT(11) NOT NULL,
-  `DB_ID` BIGINT(20),
-  `FUNC_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin,
-  `FUNC_TYPE` INT(11) NOT NULL,
-  `OWNER_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin,
-  `OWNER_TYPE` VARCHAR(10) CHARACTER SET latin1 COLLATE latin1_bin,
-  PRIMARY KEY (`FUNC_ID`),
-  UNIQUE KEY `UNIQUEFUNCTION` (`FUNC_NAME`, `DB_ID`),
-  KEY `FUNCS_N49` (`DB_ID`),
-  CONSTRAINT `FUNCS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
---
--- Table structure for table FUNC_RU
---
-CREATE TABLE IF NOT EXISTS `FUNC_RU` (
-  `FUNC_ID` BIGINT(20) NOT NULL,
-  `RESOURCE_TYPE` INT(11) NOT NULL,
-  `RESOURCE_URI` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin,
-  `INTEGER_IDX` INT(11) NOT NULL,
-  PRIMARY KEY (`FUNC_ID`, `INTEGER_IDX`),
-  CONSTRAINT `FUNC_RU_FK1` FOREIGN KEY (`FUNC_ID`) REFERENCES `FUNCS` (`FUNC_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-
--- -----------------------------------------------------------------------------------------------------------------------------------------------
--- Transaction and Lock Tables
--- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file.
--- -----------------------------------------------------------------------------------------------------------------------------------------------
-
-CREATE TABLE TXNS (
-  TXN_ID bigint PRIMARY KEY,
-  TXN_STATE char(1) NOT NULL,
-  TXN_STARTED bigint NOT NULL,
-  TXN_LAST_HEARTBEAT bigint NOT NULL,
-  TXN_USER varchar(128) NOT NULL,
-  TXN_HOST varchar(128) NOT NULL
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-CREATE TABLE TXN_COMPONENTS (
-  TC_TXNID bigint,
-  TC_DATABASE varchar(128) NOT NULL,
-  TC_TABLE varchar(128),
-  TC_PARTITION varchar(767),
-  FOREIGN KEY (TC_TXNID) REFERENCES TXNS (TXN_ID)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-CREATE TABLE COMPLETED_TXN_COMPONENTS (
-  CTC_TXNID bigint,
-  CTC_DATABASE varchar(128) NOT NULL,
-  CTC_TABLE varchar(128),
-  CTC_PARTITION varchar(767)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-CREATE TABLE NEXT_TXN_ID (
-  NTXN_NEXT bigint NOT NULL
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-INSERT INTO NEXT_TXN_ID VALUES(1);
-
-CREATE TABLE HIVE_LOCKS (
-  HL_LOCK_EXT_ID bigint NOT NULL,
-  HL_LOCK_INT_ID bigint NOT NULL,
-  HL_TXNID bigint,
-  HL_DB varchar(128) NOT NULL,
-  HL_TABLE varchar(128),
-  HL_PARTITION varchar(767),
-  HL_LOCK_STATE char(1) not null,
-  HL_LOCK_TYPE char(1) not null,
-  HL_LAST_HEARTBEAT bigint NOT NULL,
-  HL_ACQUIRED_AT bigint,
-  HL_USER varchar(128) NOT NULL,
-  HL_HOST varchar(128) NOT NULL,
-  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID),
-  KEY HIVE_LOCK_TXNID_INDEX (HL_TXNID)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-CREATE INDEX HL_TXNID_IDX ON HIVE_LOCKS (HL_TXNID);
-
-CREATE TABLE NEXT_LOCK_ID (
-  NL_NEXT bigint NOT NULL
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-INSERT INTO NEXT_LOCK_ID VALUES(1);
-
-CREATE TABLE COMPACTION_QUEUE (
-  CQ_ID bigint PRIMARY KEY,
-  CQ_DATABASE varchar(128) NOT NULL,
-  CQ_TABLE varchar(128) NOT NULL,
-  CQ_PARTITION varchar(767),
-  CQ_STATE char(1) NOT NULL,
-  CQ_TYPE char(1) NOT NULL,
-  CQ_WORKER_ID varchar(128),
-  CQ_START bigint,
-  CQ_RUN_AS varchar(128)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
-  NCQ_NEXT bigint NOT NULL
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
-
-
--- -----------------------------------------------------------------
--- Record schema version. Should be the last step in the init script
--- -----------------------------------------------------------------
-INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.13.0', 'Hive release version 0.13.0');
-
-/*!40101 SET character_set_client = @saved_cs_client */;
-/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
-
-/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
-/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
-/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
-/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
-/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
-/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
-/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-
--- Dump completed on 2012-08-23  0:56:31


[43/52] bigtop git commit: BIGTOP-1406. package Ambari in Bigtop

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/etc/hive-schema-0.12.0.postgres.sql
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/etc/hive-schema-0.12.0.postgres.sql b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/etc/hive-schema-0.12.0.postgres.sql
deleted file mode 100755
index bc6486b..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/etc/hive-schema-0.12.0.postgres.sql
+++ /dev/null
@@ -1,1406 +0,0 @@
---
--- PostgreSQL database dump
---
-
-SET statement_timeout = 0;
-SET client_encoding = 'UTF8';
-SET standard_conforming_strings = off;
-SET check_function_bodies = false;
-SET client_min_messages = warning;
-SET escape_string_warning = off;
-
-SET search_path = public, pg_catalog;
-
-SET default_tablespace = '';
-
-SET default_with_oids = false;
-
---
--- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "BUCKETING_COLS" (
-    "SD_ID" bigint NOT NULL,
-    "BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "CDS" (
-    "CD_ID" bigint NOT NULL
-);
-
-
---
--- Name: COLUMNS_OLD; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "COLUMNS_OLD" (
-    "SD_ID" bigint NOT NULL,
-    "COMMENT" character varying(256) DEFAULT NULL::character varying,
-    "COLUMN_NAME" character varying(128) NOT NULL,
-    "TYPE_NAME" character varying(4000) NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "COLUMNS_V2" (
-    "CD_ID" bigint NOT NULL,
-    "COMMENT" character varying(4000),
-    "COLUMN_NAME" character varying(128) NOT NULL,
-    "TYPE_NAME" character varying(4000),
-    "INTEGER_IDX" integer NOT NULL
-);
-
-
---
--- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "DATABASE_PARAMS" (
-    "DB_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(180) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "DBS" (
-    "DB_ID" bigint NOT NULL,
-    "DESC" character varying(4000) DEFAULT NULL::character varying,
-    "DB_LOCATION_URI" character varying(4000) NOT NULL,
-    "NAME" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "DB_PRIVS" (
-    "DB_GRANT_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "DB_ID" bigint,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "DB_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "GLOBAL_PRIVS" (
-    "USER_GRANT_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "USER_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "IDXS" (
-    "INDEX_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "DEFERRED_REBUILD" boolean NOT NULL,
-    "INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character varying,
-    "INDEX_NAME" character varying(128) DEFAULT NULL::character varying,
-    "INDEX_TBL_ID" bigint,
-    "LAST_ACCESS_TIME" bigint NOT NULL,
-    "ORIG_TBL_ID" bigint,
-    "SD_ID" bigint
-);
-
-
---
--- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "INDEX_PARAMS" (
-    "INDEX_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "NUCLEUS_TABLES" (
-    "CLASS_NAME" character varying(128) NOT NULL,
-    "TABLE_NAME" character varying(128) NOT NULL,
-    "TYPE" character varying(4) NOT NULL,
-    "OWNER" character varying(2) NOT NULL,
-    "VERSION" character varying(20) NOT NULL,
-    "INTERFACE_NAME" character varying(255) DEFAULT NULL::character varying
-);
-
-
---
--- Name: PARTITIONS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITIONS" (
-    "PART_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "LAST_ACCESS_TIME" bigint NOT NULL,
-    "PART_NAME" character varying(767) DEFAULT NULL::character varying,
-    "SD_ID" bigint,
-    "TBL_ID" bigint
-);
-
-
---
--- Name: PARTITION_EVENTS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITION_EVENTS" (
-    "PART_NAME_ID" bigint NOT NULL,
-    "DB_NAME" character varying(128),
-    "EVENT_TIME" bigint NOT NULL,
-    "EVENT_TYPE" integer NOT NULL,
-    "PARTITION_NAME" character varying(767),
-    "TBL_NAME" character varying(128)
-);
-
-
---
--- Name: PARTITION_KEYS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITION_KEYS" (
-    "TBL_ID" bigint NOT NULL,
-    "PKEY_COMMENT" character varying(4000) DEFAULT NULL::character varying,
-    "PKEY_NAME" character varying(128) NOT NULL,
-    "PKEY_TYPE" character varying(767) NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: PARTITION_KEY_VALS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITION_KEY_VALS" (
-    "PART_ID" bigint NOT NULL,
-    "PART_KEY_VAL" character varying(256) DEFAULT NULL::character varying,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: PARTITION_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITION_PARAMS" (
-    "PART_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: PART_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PART_COL_PRIVS" (
-    "PART_COLUMN_GRANT_ID" bigint NOT NULL,
-    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PART_ID" bigint,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PART_COL_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: PART_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PART_PRIVS" (
-    "PART_GRANT_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PART_ID" bigint,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PART_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: ROLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "ROLES" (
-    "ROLE_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
-    "ROLE_NAME" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: ROLE_MAP; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "ROLE_MAP" (
-    "ROLE_GRANT_ID" bigint NOT NULL,
-    "ADD_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "ROLE_ID" bigint
-);
-
-
---
--- Name: SDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SDS" (
-    "SD_ID" bigint NOT NULL,
-    "INPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
-    "IS_COMPRESSED" boolean NOT NULL,
-    "LOCATION" character varying(4000) DEFAULT NULL::character varying,
-    "NUM_BUCKETS" bigint NOT NULL,
-    "OUTPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
-    "SERDE_ID" bigint,
-    "CD_ID" bigint,
-    "IS_STOREDASSUBDIRECTORIES" boolean NOT NULL
-);
-
-
---
--- Name: SD_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SD_PARAMS" (
-    "SD_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: SEQUENCE_TABLE; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SEQUENCE_TABLE" (
-    "SEQUENCE_NAME" character varying(255) NOT NULL,
-    "NEXT_VAL" bigint NOT NULL
-);
-
-
---
--- Name: SERDES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SERDES" (
-    "SERDE_ID" bigint NOT NULL,
-    "NAME" character varying(128) DEFAULT NULL::character varying,
-    "SLIB" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: SERDE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SERDE_PARAMS" (
-    "SERDE_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: SORT_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SORT_COLS" (
-    "SD_ID" bigint NOT NULL,
-    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
-    "ORDER" bigint NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: TABLE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TABLE_PARAMS" (
-    "TBL_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: TBLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TBLS" (
-    "TBL_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "DB_ID" bigint,
-    "LAST_ACCESS_TIME" bigint NOT NULL,
-    "OWNER" character varying(767) DEFAULT NULL::character varying,
-    "RETENTION" bigint NOT NULL,
-    "SD_ID" bigint,
-    "TBL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "VIEW_EXPANDED_TEXT" text,
-    "VIEW_ORIGINAL_TEXT" text
-);
-
-
---
--- Name: TBL_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TBL_COL_PRIVS" (
-    "TBL_COLUMN_GRANT_ID" bigint NOT NULL,
-    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "TBL_COL_PRIV" character varying(128) DEFAULT NULL::character varying,
-    "TBL_ID" bigint
-);
-
-
---
--- Name: TBL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TBL_PRIVS" (
-    "TBL_GRANT_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "TBL_PRIV" character varying(128) DEFAULT NULL::character varying,
-    "TBL_ID" bigint
-);
-
-
---
--- Name: TYPES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TYPES" (
-    "TYPES_ID" bigint NOT NULL,
-    "TYPE_NAME" character varying(128) DEFAULT NULL::character varying,
-    "TYPE1" character varying(767) DEFAULT NULL::character varying,
-    "TYPE2" character varying(767) DEFAULT NULL::character varying
-);
-
-
---
--- Name: TYPE_FIELDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TYPE_FIELDS" (
-    "TYPE_NAME" bigint NOT NULL,
-    "COMMENT" character varying(256) DEFAULT NULL::character varying,
-    "FIELD_NAME" character varying(128) NOT NULL,
-    "FIELD_TYPE" character varying(767) NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-CREATE TABLE "SKEWED_STRING_LIST" (
-    "STRING_LIST_ID" bigint NOT NULL
-);
-
-CREATE TABLE "SKEWED_STRING_LIST_VALUES" (
-    "STRING_LIST_ID" bigint NOT NULL,
-    "STRING_LIST_VALUE" character varying(256) DEFAULT NULL::character varying,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-CREATE TABLE "SKEWED_COL_NAMES" (
-    "SD_ID" bigint NOT NULL,
-    "SKEWED_COL_NAME" character varying(256) DEFAULT NULL::character varying,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-CREATE TABLE "SKEWED_COL_VALUE_LOC_MAP" (
-    "SD_ID" bigint NOT NULL,
-    "STRING_LIST_ID_KID" bigint NOT NULL,
-    "LOCATION" character varying(4000) DEFAULT NULL::character varying
-);
-
-CREATE TABLE "SKEWED_VALUES" (
-    "SD_ID_OID" bigint NOT NULL,
-    "STRING_LIST_ID_EID" bigint NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: TAB_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE  "MASTER_KEYS"
-(
-    "KEY_ID" SERIAL,
-    "MASTER_KEY" varchar(767) NULL,
-    PRIMARY KEY ("KEY_ID")
-);
-
-CREATE TABLE  "DELEGATION_TOKENS"
-(
-    "TOKEN_IDENT" varchar(767) NOT NULL,
-    "TOKEN" varchar(767) NULL,
-    PRIMARY KEY ("TOKEN_IDENT")
-);
-
-CREATE TABLE "TAB_COL_STATS" (
- "CS_ID" bigint NOT NULL,
- "DB_NAME" character varying(128) DEFAULT NULL::character varying,
- "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
- "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
- "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
- "TBL_ID" bigint NOT NULL,
- "LONG_LOW_VALUE" bigint,
- "LONG_HIGH_VALUE" bigint,
- "DOUBLE_LOW_VALUE" double precision,
- "DOUBLE_HIGH_VALUE" double precision,
- "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
- "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
- "NUM_NULLS" bigint NOT NULL,
- "NUM_DISTINCTS" bigint,
- "AVG_COL_LEN" double precision,
- "MAX_COL_LEN" bigint,
- "NUM_TRUES" bigint,
- "NUM_FALSES" bigint,
- "LAST_ANALYZED" bigint NOT NULL
-);
-
---
--- Table structure for VERSION
---
-CREATE TABLE "VERSION" (
-  "VER_ID" bigint,
-  "SCHEMA_VERSION" character varying(127) NOT NULL,
-  "VERSION_COMMENT" character varying(255) NOT NULL
-);
-
---
--- Name: PART_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PART_COL_STATS" (
- "CS_ID" bigint NOT NULL,
- "DB_NAME" character varying(128) DEFAULT NULL::character varying,
- "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
- "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying,
- "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
- "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
- "PART_ID" bigint NOT NULL,
- "LONG_LOW_VALUE" bigint,
- "LONG_HIGH_VALUE" bigint,
- "DOUBLE_LOW_VALUE" double precision,
- "DOUBLE_HIGH_VALUE" double precision,
- "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
- "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
- "NUM_NULLS" bigint NOT NULL,
- "NUM_DISTINCTS" bigint,
- "AVG_COL_LEN" double precision,
- "MAX_COL_LEN" bigint,
- "NUM_TRUES" bigint,
- "NUM_FALSES" bigint,
- "LAST_ANALYZED" bigint NOT NULL
-);
-
---
--- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "BUCKETING_COLS"
-    ADD CONSTRAINT "BUCKETING_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-
---
--- Name: CDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "CDS"
-    ADD CONSTRAINT "CDS_pkey" PRIMARY KEY ("CD_ID");
-
-
---
--- Name: COLUMNS_V2_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "COLUMNS_V2"
-    ADD CONSTRAINT "COLUMNS_V2_pkey" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
-
-
---
--- Name: COLUMNS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "COLUMNS_OLD"
-    ADD CONSTRAINT "COLUMNS_pkey" PRIMARY KEY ("SD_ID", "COLUMN_NAME");
-
-
---
--- Name: DATABASE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DATABASE_PARAMS"
-    ADD CONSTRAINT "DATABASE_PARAMS_pkey" PRIMARY KEY ("DB_ID", "PARAM_KEY");
-
-
---
--- Name: DBPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DB_PRIVS"
-    ADD CONSTRAINT "DBPRIVILEGEINDEX" UNIQUE ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: DBS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DBS"
-    ADD CONSTRAINT "DBS_pkey" PRIMARY KEY ("DB_ID");
-
-
---
--- Name: DB_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DB_PRIVS"
-    ADD CONSTRAINT "DB_PRIVS_pkey" PRIMARY KEY ("DB_GRANT_ID");
-
-
---
--- Name: GLOBALPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "GLOBAL_PRIVS"
-    ADD CONSTRAINT "GLOBALPRIVILEGEINDEX" UNIQUE ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: GLOBAL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "GLOBAL_PRIVS"
-    ADD CONSTRAINT "GLOBAL_PRIVS_pkey" PRIMARY KEY ("USER_GRANT_ID");
-
-
---
--- Name: IDXS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "IDXS_pkey" PRIMARY KEY ("INDEX_ID");
-
-
---
--- Name: INDEX_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "INDEX_PARAMS"
-    ADD CONSTRAINT "INDEX_PARAMS_pkey" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
-
-
---
--- Name: NUCLEUS_TABLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "NUCLEUS_TABLES"
-    ADD CONSTRAINT "NUCLEUS_TABLES_pkey" PRIMARY KEY ("CLASS_NAME");
-
-
---
--- Name: PARTITIONS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITIONS"
-    ADD CONSTRAINT "PARTITIONS_pkey" PRIMARY KEY ("PART_ID");
-
-
---
--- Name: PARTITION_EVENTS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITION_EVENTS"
-    ADD CONSTRAINT "PARTITION_EVENTS_pkey" PRIMARY KEY ("PART_NAME_ID");
-
-
---
--- Name: PARTITION_KEYS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITION_KEYS"
-    ADD CONSTRAINT "PARTITION_KEYS_pkey" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
-
-
---
--- Name: PARTITION_KEY_VALS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITION_KEY_VALS"
-    ADD CONSTRAINT "PARTITION_KEY_VALS_pkey" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
-
-
---
--- Name: PARTITION_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITION_PARAMS"
-    ADD CONSTRAINT "PARTITION_PARAMS_pkey" PRIMARY KEY ("PART_ID", "PARAM_KEY");
-
-
---
--- Name: PART_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PART_COL_PRIVS"
-    ADD CONSTRAINT "PART_COL_PRIVS_pkey" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
-
-
---
--- Name: PART_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PART_PRIVS"
-    ADD CONSTRAINT "PART_PRIVS_pkey" PRIMARY KEY ("PART_GRANT_ID");
-
-
---
--- Name: ROLEENTITYINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "ROLES"
-    ADD CONSTRAINT "ROLEENTITYINDEX" UNIQUE ("ROLE_NAME");
-
-
---
--- Name: ROLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "ROLES"
-    ADD CONSTRAINT "ROLES_pkey" PRIMARY KEY ("ROLE_ID");
-
-
---
--- Name: ROLE_MAP_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "ROLE_MAP"
-    ADD CONSTRAINT "ROLE_MAP_pkey" PRIMARY KEY ("ROLE_GRANT_ID");
-
-
---
--- Name: SDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SDS"
-    ADD CONSTRAINT "SDS_pkey" PRIMARY KEY ("SD_ID");
-
-
---
--- Name: SD_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SD_PARAMS"
-    ADD CONSTRAINT "SD_PARAMS_pkey" PRIMARY KEY ("SD_ID", "PARAM_KEY");
-
-
---
--- Name: SEQUENCE_TABLE_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SEQUENCE_TABLE"
-    ADD CONSTRAINT "SEQUENCE_TABLE_pkey" PRIMARY KEY ("SEQUENCE_NAME");
-
-
---
--- Name: SERDES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SERDES"
-    ADD CONSTRAINT "SERDES_pkey" PRIMARY KEY ("SERDE_ID");
-
-
---
--- Name: SERDE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SERDE_PARAMS"
-    ADD CONSTRAINT "SERDE_PARAMS_pkey" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
-
-
---
--- Name: SORT_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SORT_COLS"
-    ADD CONSTRAINT "SORT_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-
---
--- Name: TABLE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TABLE_PARAMS"
-    ADD CONSTRAINT "TABLE_PARAMS_pkey" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
-
-
---
--- Name: TBLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TBLS"
-    ADD CONSTRAINT "TBLS_pkey" PRIMARY KEY ("TBL_ID");
-
-
---
--- Name: TBL_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TBL_COL_PRIVS"
-    ADD CONSTRAINT "TBL_COL_PRIVS_pkey" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
-
-
---
--- Name: TBL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TBL_PRIVS"
-    ADD CONSTRAINT "TBL_PRIVS_pkey" PRIMARY KEY ("TBL_GRANT_ID");
-
-
---
--- Name: TYPES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TYPES"
-    ADD CONSTRAINT "TYPES_pkey" PRIMARY KEY ("TYPES_ID");
-
-
---
--- Name: TYPE_FIELDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TYPE_FIELDS"
-    ADD CONSTRAINT "TYPE_FIELDS_pkey" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
-
-ALTER TABLE ONLY "SKEWED_STRING_LIST"
-    ADD CONSTRAINT "SKEWED_STRING_LIST_pkey" PRIMARY KEY ("STRING_LIST_ID");
-
-ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
-    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_pkey" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
-
-
-ALTER TABLE ONLY "SKEWED_COL_NAMES"
-    ADD CONSTRAINT "SKEWED_COL_NAMES_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
-    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_pkey" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
-
-ALTER TABLE ONLY "SKEWED_VALUES"
-    ADD CONSTRAINT "SKEWED_VALUES_pkey" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
-
---
--- Name: TAB_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_pkey" PRIMARY KEY("CS_ID");
-
---
--- Name: PART_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY KEY("CS_ID");
-
---
--- Name: UNIQUEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "UNIQUEINDEX" UNIQUE ("INDEX_NAME", "ORIG_TBL_ID");
-
-
---
--- Name: UNIQUEPARTITION; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITIONS"
-    ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("PART_NAME", "TBL_ID");
-
-
---
--- Name: UNIQUETABLE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TBLS"
-    ADD CONSTRAINT "UNIQUETABLE" UNIQUE ("TBL_NAME", "DB_ID");
-
-
---
--- Name: UNIQUE_DATABASE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DBS"
-    ADD CONSTRAINT "UNIQUE_DATABASE" UNIQUE ("NAME");
-
-
---
--- Name: UNIQUE_TYPE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TYPES"
-    ADD CONSTRAINT "UNIQUE_TYPE" UNIQUE ("TYPE_NAME");
-
-
---
--- Name: USERROLEMAPINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "ROLE_MAP"
-    ADD CONSTRAINT "USERROLEMAPINDEX" UNIQUE ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "BUCKETING_COLS_N49" ON "BUCKETING_COLS" USING btree ("SD_ID");
-
-
---
--- Name: COLUMNS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "COLUMNS_N49" ON "COLUMNS_OLD" USING btree ("SD_ID");
-
-
---
--- Name: DATABASE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "DATABASE_PARAMS_N49" ON "DATABASE_PARAMS" USING btree ("DB_ID");
-
-
---
--- Name: DB_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "DB_PRIVS_N49" ON "DB_PRIVS" USING btree ("DB_ID");
-
-
---
--- Name: IDXS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "IDXS_N49" ON "IDXS" USING btree ("ORIG_TBL_ID");
-
-
---
--- Name: IDXS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "IDXS_N50" ON "IDXS" USING btree ("INDEX_TBL_ID");
-
-
---
--- Name: IDXS_N51; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "IDXS_N51" ON "IDXS" USING btree ("SD_ID");
-
-
---
--- Name: INDEX_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "INDEX_PARAMS_N49" ON "INDEX_PARAMS" USING btree ("INDEX_ID");
-
-
---
--- Name: PARTITIONCOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITIONCOLUMNPRIVILEGEINDEX" ON "PART_COL_PRIVS" USING btree ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: PARTITIONEVENTINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITIONEVENTINDEX" ON "PARTITION_EVENTS" USING btree ("PARTITION_NAME");
-
-
---
--- Name: PARTITIONS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITIONS_N49" ON "PARTITIONS" USING btree ("TBL_ID");
-
-
---
--- Name: PARTITIONS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITIONS_N50" ON "PARTITIONS" USING btree ("SD_ID");
-
-
---
--- Name: PARTITION_KEYS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITION_KEYS_N49" ON "PARTITION_KEYS" USING btree ("TBL_ID");
-
-
---
--- Name: PARTITION_KEY_VALS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITION_KEY_VALS_N49" ON "PARTITION_KEY_VALS" USING btree ("PART_ID");
-
-
---
--- Name: PARTITION_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITION_PARAMS_N49" ON "PARTITION_PARAMS" USING btree ("PART_ID");
-
-
---
--- Name: PARTPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTPRIVILEGEINDEX" ON "PART_PRIVS" USING btree ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: PART_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PART_COL_PRIVS_N49" ON "PART_COL_PRIVS" USING btree ("PART_ID");
-
-
---
--- Name: PART_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID");
-
-
---
--- Name: ROLE_MAP_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "ROLE_MAP_N49" ON "ROLE_MAP" USING btree ("ROLE_ID");
-
-
---
--- Name: SDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "SDS_N49" ON "SDS" USING btree ("SERDE_ID");
-
-
---
--- Name: SD_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "SD_PARAMS_N49" ON "SD_PARAMS" USING btree ("SD_ID");
-
-
---
--- Name: SERDE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "SERDE_PARAMS_N49" ON "SERDE_PARAMS" USING btree ("SERDE_ID");
-
-
---
--- Name: SORT_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "SORT_COLS_N49" ON "SORT_COLS" USING btree ("SD_ID");
-
-
---
--- Name: TABLECOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TABLECOLUMNPRIVILEGEINDEX" ON "TBL_COL_PRIVS" USING btree ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: TABLEPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TABLEPRIVILEGEINDEX" ON "TBL_PRIVS" USING btree ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: TABLE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TABLE_PARAMS_N49" ON "TABLE_PARAMS" USING btree ("TBL_ID");
-
-
---
--- Name: TBLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TBLS_N49" ON "TBLS" USING btree ("DB_ID");
-
-
---
--- Name: TBLS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TBLS_N50" ON "TBLS" USING btree ("SD_ID");
-
-
---
--- Name: TBL_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TBL_COL_PRIVS_N49" ON "TBL_COL_PRIVS" USING btree ("TBL_ID");
-
-
---
--- Name: TBL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TBL_PRIVS_N49" ON "TBL_PRIVS" USING btree ("TBL_ID");
-
-
---
--- Name: TYPE_FIELDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TYPE_FIELDS_N49" ON "TYPE_FIELDS" USING btree ("TYPE_NAME");
-
---
--- Name: TAB_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TAB_COL_STATS_N49" ON "TAB_COL_STATS" USING btree ("TBL_ID");
-
---
--- Name: PART_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID");
-
-
-ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
-    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_fkey" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
-
-
-ALTER TABLE ONLY "SKEWED_COL_NAMES"
-    ADD CONSTRAINT "SKEWED_COL_NAMES_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
-ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
-    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey1" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
-    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
-
-ALTER TABLE ONLY "SKEWED_VALUES"
-    ADD CONSTRAINT "SKEWED_VALUES_fkey1" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
-
-ALTER TABLE ONLY "SKEWED_VALUES"
-    ADD CONSTRAINT "SKEWED_VALUES_fkey2" FOREIGN KEY ("SD_ID_OID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: BUCKETING_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "BUCKETING_COLS"
-    ADD CONSTRAINT "BUCKETING_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: COLUMNS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "COLUMNS_OLD"
-    ADD CONSTRAINT "COLUMNS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: COLUMNS_V2_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "COLUMNS_V2"
-    ADD CONSTRAINT "COLUMNS_V2_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
-
-
---
--- Name: DATABASE_PARAMS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "DATABASE_PARAMS"
-    ADD CONSTRAINT "DATABASE_PARAMS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
-
-
---
--- Name: DB_PRIVS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "DB_PRIVS"
-    ADD CONSTRAINT "DB_PRIVS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
-
-
---
--- Name: IDXS_INDEX_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "IDXS_INDEX_TBL_ID_fkey" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: IDXS_ORIG_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "IDXS_ORIG_TBL_ID_fkey" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: IDXS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "IDXS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: INDEX_PARAMS_INDEX_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "INDEX_PARAMS"
-    ADD CONSTRAINT "INDEX_PARAMS_INDEX_ID_fkey" FOREIGN KEY ("INDEX_ID") REFERENCES "IDXS"("INDEX_ID") DEFERRABLE;
-
-
---
--- Name: PARTITIONS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITIONS"
-    ADD CONSTRAINT "PARTITIONS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: PARTITIONS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITIONS"
-    ADD CONSTRAINT "PARTITIONS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: PARTITION_KEYS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITION_KEYS"
-    ADD CONSTRAINT "PARTITION_KEYS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: PARTITION_KEY_VALS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITION_KEY_VALS"
-    ADD CONSTRAINT "PARTITION_KEY_VALS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
---
--- Name: PARTITION_PARAMS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITION_PARAMS"
-    ADD CONSTRAINT "PARTITION_PARAMS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
---
--- Name: PART_COL_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PART_COL_PRIVS"
-    ADD CONSTRAINT "PART_COL_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
---
--- Name: PART_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PART_PRIVS"
-    ADD CONSTRAINT "PART_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
---
--- Name: ROLE_MAP_ROLE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "ROLE_MAP"
-    ADD CONSTRAINT "ROLE_MAP_ROLE_ID_fkey" FOREIGN KEY ("ROLE_ID") REFERENCES "ROLES"("ROLE_ID") DEFERRABLE;
-
-
---
--- Name: SDS_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SDS"
-    ADD CONSTRAINT "SDS_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
-
-
---
--- Name: SDS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SDS"
-    ADD CONSTRAINT "SDS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
-
-
---
--- Name: SD_PARAMS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SD_PARAMS"
-    ADD CONSTRAINT "SD_PARAMS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: SERDE_PARAMS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SERDE_PARAMS"
-    ADD CONSTRAINT "SERDE_PARAMS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
-
-
---
--- Name: SORT_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SORT_COLS"
-    ADD CONSTRAINT "SORT_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: TABLE_PARAMS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TABLE_PARAMS"
-    ADD CONSTRAINT "TABLE_PARAMS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: TBLS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TBLS"
-    ADD CONSTRAINT "TBLS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
-
-
---
--- Name: TBLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TBLS"
-    ADD CONSTRAINT "TBLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: TBL_COL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TBL_COL_PRIVS"
-    ADD CONSTRAINT "TBL_COL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: TBL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TBL_PRIVS"
-    ADD CONSTRAINT "TBL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: TYPE_FIELDS_TYPE_NAME_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TYPE_FIELDS"
-    ADD CONSTRAINT "TYPE_FIELDS_TYPE_NAME_fkey" FOREIGN KEY ("TYPE_NAME") REFERENCES "TYPES"("TYPES_ID") DEFERRABLE;
-
---
--- Name: TAB_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_fkey" FOREIGN KEY("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: PART_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
-ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID");
-
---
--- Name: public; Type: ACL; Schema: -; Owner: hiveuser
---
-
-REVOKE ALL ON SCHEMA public FROM PUBLIC;
-GRANT ALL ON SCHEMA public TO PUBLIC;
-
-
-INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '0.12.0', 'Hive release version 0.12.0');
---
--- PostgreSQL database dump complete
---
-

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/addMysqlUser.sh
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/addMysqlUser.sh b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/addMysqlUser.sh
deleted file mode 100755
index 862e9b2..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/addMysqlUser.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-mysqldservice=$1
-mysqldbuser=$2
-mysqldbpasswd=$3
-userhost=$4
-
-# The restart (not start) is required to pick up mysql configuration changes made by sed
-# during install, in case mysql is already started. The changes are required by Hive later on.
-/var/lib/ambari-agent/ambari-sudo.sh service $mysqldservice restart
-
-# MySQL 5.7 installed in non-interactive way uses a socket authentication plugin.
-# "mysql -u root" should be executed from root user
-echo "Adding user $mysqldbuser@% and removing users with empty name"
-/var/lib/ambari-agent/ambari-sudo.sh mysql -u root -e "CREATE USER '$mysqldbuser'@'%' IDENTIFIED BY '$mysqldbpasswd';"
-/var/lib/ambari-agent/ambari-sudo.sh mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'%';"
-/var/lib/ambari-agent/ambari-sudo.sh mysql -u root -e "DELETE FROM mysql.user WHERE user='';"
-/var/lib/ambari-agent/ambari-sudo.sh mysql -u root -e "flush privileges;"
-/var/lib/ambari-agent/ambari-sudo.sh service $mysqldservice stop

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hcatSmoke.sh
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hcatSmoke.sh b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hcatSmoke.sh
deleted file mode 100755
index 39e63a6..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hcatSmoke.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-export tablename=$1
-
-export purge_cmd=""
-if [ "$3" == "true" ]; then
-	export purge_cmd="purge"
-fi
-
-case "$2" in
-
-prepare)
-  hcat -e "show tables"
-  hcat -e "drop table IF EXISTS ${tablename} ${purge_cmd}"
-  hcat -e "create table ${tablename} ( id INT, name string ) stored as rcfile ;"
-;;
-
-cleanup)
-  hcat -e "drop table IF EXISTS ${tablename} ${purge_cmd}"
-;;
-
-esac

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveSmoke.sh
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveSmoke.sh b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveSmoke.sh
deleted file mode 100755
index f9f2020..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveSmoke.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-export tablename=$1
-echo "CREATE EXTERNAL TABLE IF NOT EXISTS ${tablename} ( foo INT, bar STRING );" | hive
-echo "DESCRIBE ${tablename};" | hive

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveTezSetup.cmd
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveTezSetup.cmd b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveTezSetup.cmd
deleted file mode 100755
index 10d6a1c..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveTezSetup.cmd
+++ /dev/null
@@ -1,58 +0,0 @@
-@echo off
-rem Licensed to the Apache Software Foundation (ASF) under one or more
-rem contributor license agreements.  See the NOTICE file distributed with
-rem this work for additional information regarding copyright ownership.
-rem The ASF licenses this file to You under the Apache License, Version 2.0
-rem (the "License"); you may not use this file except in compliance with
-rem the License.  You may obtain a copy of the License at
-rem
-rem     http://www.apache.org/licenses/LICENSE-2.0
-rem
-rem Unless required by applicable law or agreed to in writing, software
-rem distributed under the License is distributed on an "AS IS" BASIS,
-rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-rem See the License for the specific language governing permissions and
-rem limitations under the License.
-
-if not defined HADOOP_HOME (
-  set EXITCODE=5
-  goto :errorexit
-)
-if not defined HIVE_HOME (
-  set EXITCODE=6
-  goto :errorexit
-)
-if not defined TEZ_HOME (
-  set EXITCODE=7
-  goto :errorexit
-)
-
-set EXITCODE=0
-
-if not exist %HIVE_HOME%\conf\hive-tez-configured (
-  %HADOOP_HOME%\bin\hadoop.cmd fs -mkdir /apps/tez
-  set EXITCODE=%ERRORLEVEL%
-  if %EXITCODE% neq 0 goto :errorexit
-
-  %HADOOP_HOME%\bin\hadoop.cmd fs -chmod -R 755 /apps/tez
-  set EXITCODE=%ERRORLEVEL%
-  if %EXITCODE% neq 0 goto :errorexit
-
-  %HADOOP_HOME%\bin\hadoop.cmd fs -chown -R hadoop:users /apps/tez
-  set EXITCODE=%ERRORLEVEL%
-  if %EXITCODE% neq 0 goto :errorexit
-
-  %HADOOP_HOME%\bin\hadoop.cmd fs -put %TEZ_HOME%\* /apps/tez
-  set EXITCODE=%ERRORLEVEL%
-  if %EXITCODE% neq 0 goto :errorexit
-
-  %HADOOP_HOME%\bin\hadoop.cmd fs -rm -r -skipTrash /apps/tez/conf
-  set EXITCODE=%ERRORLEVEL%
-  if %EXITCODE% neq 0 goto :errorexit
-
-  echo done > %HIVE_HOME%\conf\hive-tez-configured
-)
-goto :eof
-
-:errorexit
-exit /B %EXITCODE%

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveserver2.sql
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveserver2.sql b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveserver2.sql
deleted file mode 100755
index 99a3865..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveserver2.sql
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-CREATE EXTERNAL TABLE IF NOT EXISTS hiveserver2smoke20408 ( foo INT, bar STRING );
-DESCRIBE hiveserver2smoke20408;

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveserver2Smoke.sh
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveserver2Smoke.sh b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveserver2Smoke.sh
deleted file mode 100755
index 77d7b3e..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveserver2Smoke.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-smokeout=`/usr/lib/hive/bin/beeline -u $1 -n fakeuser -p fakepwd -d org.apache.hive.jdbc.HiveDriver -e '!run $2' 2>&1| awk '{print}'|grep Error`
-
-if [ "x$smokeout" == "x" ]; then
-  echo "Smoke test of hiveserver2 passed"
-  exit 0
-else
-  echo "Smoke test of hiveserver2 wasnt passed"
-  echo $smokeout
-  exit 1
-fi

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/pigSmoke.sh b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/pigSmoke.sh
deleted file mode 100755
index 2e90ac0..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/pigSmoke.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License
-
-A = load 'passwd' using PigStorage(':');
-B = foreach A generate \$0 as id;
-store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/removeMysqlUser.sh
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/removeMysqlUser.sh b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/removeMysqlUser.sh
deleted file mode 100755
index 7b6d331..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/removeMysqlUser.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-mysqldservice=$1
-mysqldbuser=$2
-userhost=$3
-myhostname=$(hostname -f)
-sudo_prefix = "/var/lib/ambari-agent/ambari-sudo.sh -H -E"
-
-$sudo_prefix service $mysqldservice start
-echo "Removing user $mysqldbuser@$userhost"
-/var/lib/ambari-agent/ambari-sudo.sh su mysql -s /bin/bash - -c "mysql -u root -e \"DROP USER '$mysqldbuser'@'$userhost';\""
-/var/lib/ambari-agent/ambari-sudo.sh su mysql -s /bin/bash - -c "mysql -u root -e \"flush privileges;\""
-$sudo_prefix service $mysqldservice stop

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/startMetastore.sh
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/startMetastore.sh b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/startMetastore.sh
deleted file mode 100755
index 86541f0..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/startMetastore.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-HIVE_BIN=${HIVE_BIN:-"hive"}
-
-HIVE_CONF_DIR=$4 $HIVE_BIN --service metastore -hiveconf hive.log.file=hivemetastore.log -hiveconf hive.log.dir=$5 > $1 2> $2 &
-echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/templetonSmoke.sh
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/templetonSmoke.sh b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/templetonSmoke.sh
deleted file mode 100755
index 0ab94fe..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/templetonSmoke.sh
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export ttonhost=$1
-export smoke_test_user=$2
-export templeton_port=$3
-export ttonTestScript=$4
-export smoke_user_keytab=$5
-export security_enabled=$6
-export kinit_path_local=$7
-export smokeuser_principal=$8
-export tmp_dir=$9
-export ttonurl="http://${ttonhost}:${templeton_port}/templeton/v1"
-
-if [[ $security_enabled == "true" ]]; then
-  kinitcmd="${kinit_path_local}  -kt ${smoke_user_keytab} ${smokeuser_principal}; "
-else
-  kinitcmd=""
-fi
-
-export no_proxy=$ttonhost
-cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>'  $ttonurl/status 2>&1"
-retVal=`/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-
-# try again for 2.3 username requirement
-if [[ "$httpExitCode" == "500" ]] ; then
-  cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>'  $ttonurl/status?user.name=$smoke_test_user 2>&1"
-  retVal=`/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
-  httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-fi
-
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (status cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit 1
-fi
-
-exit 0

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/__init__.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/__init__.py
deleted file mode 100755
index 5561e10..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hcat.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hcat.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hcat.py
deleted file mode 100755
index 5e2c709..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hcat.py
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-
-from resource_management import *
-import sys
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-from ambari_commons import OSConst
-from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster, setup_atlas_hook
-from ambari_commons.constants import SERVICE
-
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def hcat():
-  import params
-
-  XmlConfig("hive-site.xml",
-            conf_dir = params.hive_conf_dir,
-            configurations = params.config['configurations']['hive-site'],
-            owner=params.hive_user,
-            configuration_attributes=params.config['configuration_attributes']['hive-site']
-  )
-
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def hcat():
-  import params
-
-  Directory(params.hive_conf_dir,
-            create_parents = True,
-            owner=params.hcat_user,
-            group=params.user_group,
-  )
-
-
-  Directory(params.hcat_conf_dir,
-            create_parents = True,
-            owner=params.hcat_user,
-            group=params.user_group,
-  )
-
-  Directory(params.hcat_pid_dir,
-            owner=params.webhcat_user,
-            create_parents = True
-  )
-
-  XmlConfig("hive-site.xml",
-            conf_dir=params.hive_client_conf_dir,
-            configurations=params.config['configurations']['hive-site'],
-            configuration_attributes=params.config['configuration_attributes']['hive-site'],
-            owner=params.hive_user,
-            group=params.user_group,
-            mode=0644)
-
-  File(format("{hcat_conf_dir}/hcat-env.sh"),
-       owner=params.hcat_user,
-       group=params.user_group,
-       content=InlineTemplate(params.hcat_env_sh_template)
-  )
-
-  # Generate atlas-application.properties.xml file
-  if has_atlas_in_cluster():
-    atlas_hook_filepath = os.path.join(params.hive_config_dir, params.atlas_hook_filename)
-    setup_atlas_hook(SERVICE.HIVE, params.hive_atlas_application_properties, atlas_hook_filepath, params.hive_user, params.user_group)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hcat_client.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hcat_client.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hcat_client.py
deleted file mode 100755
index b37698e..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hcat_client.py
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from hcat import hcat
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-from resource_management.core.logger import Logger
-from resource_management.core.exceptions import ClientComponentHasNoStatus
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.script.script import Script
-
-
-class HCatClient(Script):
-  def install(self, env):
-    import params
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    hcat()
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class HCatClientWindows(HCatClient):
-  pass
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class HCatClientDefault(HCatClient):
-  def get_component_name(self):
-    # HCat client doesn't have a first-class entry in <stack-selector-tool>. Since clients always
-    # update after daemons, this ensures that the hcat directories are correct on hosts
-    # which do not include the WebHCat daemon
-    return "hive-webhcat"
-
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    """
-    Execute <stack-selector-tool> before reconfiguring this client to the new stack version.
-
-    :param env:
-    :param upgrade_type:
-    :return:
-    """
-    Logger.info("Executing Hive HCat Client Stack Upgrade pre-restart")
-
-    import params
-    env.set_params(params)
-
-    # this function should not execute if the stack version does not support rolling upgrade
-    if not (params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version)):
-      return
-
-    # HCat client doesn't have a first-class entry in <stack-selector-tool>. Since clients always
-    # update after daemons, this ensures that the hcat directories are correct on hosts
-    # which do not include the WebHCat daemon
-    stack_select.select("hive-webhcat", params.version)
-
-
-if __name__ == "__main__":
-  HCatClient().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hcat_service_check.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hcat_service_check.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hcat_service_check.py
deleted file mode 100755
index 07b4095..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hcat_service_check.py
+++ /dev/null
@@ -1,86 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-from resource_management import *
-from resource_management.libraries.functions import get_unique_id_and_date
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-from ambari_commons import OSConst
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def hcat_service_check():
-  import params
-  smoke_cmd = os.path.join(params.stack_root, "Run-SmokeTests.cmd")
-  service = "HCatalog"
-  Execute(format("cmd /C {smoke_cmd} {service}"), user=params.hcat_user, logoutput=True)
-
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def hcat_service_check():
-    import params
-    unique = get_unique_id_and_date()
-    output_file = format("{hive_apps_whs_dir}/hcatsmoke{unique}")
-    test_cmd = format("fs -test -e {output_file}")
-
-    if params.security_enabled:
-      kinit_cmd = format(
-        "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; ")
-    else:
-      kinit_cmd = ""
-
-    File(format("{tmp_dir}/hcatSmoke.sh"),
-         content=StaticFile("hcatSmoke.sh"),
-         mode=0755
-    )
-
-    prepare_cmd = format("{kinit_cmd}env JAVA_HOME={java64_home} {tmp_dir}/hcatSmoke.sh hcatsmoke{unique} prepare {purge_tables}")
-
-    exec_path = params.execute_path
-    if params.version and params.stack_root:
-      upgrade_hive_bin = format("{stack_root}/{version}/hive/bin")
-      exec_path =  os.environ['PATH'] + os.pathsep + params.hadoop_bin_dir + os.pathsep + upgrade_hive_bin
-
-    Execute(prepare_cmd,
-            tries=3,
-            user=params.smokeuser,
-            try_sleep=5,
-            path=['/usr/sbin', '/usr/local/bin', '/bin', '/usr/bin', exec_path],
-            logoutput=True)
-
-    if params.security_enabled:
-      Execute (format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
-               user = params.hdfs_user,
-      )
-
-    ExecuteHadoop(test_cmd,
-                  user=params.hdfs_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  bin_dir=params.execute_path
-    )
-
-    cleanup_cmd = format("{kinit_cmd} {tmp_dir}/hcatSmoke.sh hcatsmoke{unique} cleanup {purge_tables}")
-
-    Execute(cleanup_cmd,
-            tries=3,
-            user=params.smokeuser,
-            try_sleep=5,
-            path=['/usr/sbin', '/usr/local/bin', '/bin', '/usr/bin', exec_path],
-            logoutput=True)


[39/52] bigtop git commit: BIGTOP-1406. package Ambari in Bigtop

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/MAPREDUCE2_metrics.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/MAPREDUCE2_metrics.json b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/MAPREDUCE2_metrics.json
deleted file mode 100755
index f44e3b2..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/MAPREDUCE2_metrics.json
+++ /dev/null
@@ -1,2596 +0,0 @@
-{
-  "HISTORYSERVER": {
-    "Component": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "default": {
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "jvm.JvmMetrics.ThreadsRunnable",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "jvm.JvmMetrics.ThreadsNew",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationFailures": {
-              "metric": "rpc.metrics.RpcAuthorizationFailures",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_avg_time": {
-              "metric": "ugi.ugi.LoginSuccessAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_avg_time": {
-              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "rpc.rpc.SentBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logWarn": {
-              "metric": "jvm.JvmMetrics.LogWarn",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTimedWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/process/proc_run": {
-              "metric": "proc_run",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/gcCount": {
-              "metric": "jvm.JvmMetrics.GcCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "rpc.rpc.ReceivedBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/memory/swap_total": {
-              "metric": "swap_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsBlocked": {
-              "metric": "jvm.JvmMetrics.ThreadsBlocked",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_num_ops": {
-              "metric": "rpc.rpc.RpcQueueTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/process/proc_total": {
-              "metric": "proc_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/part_max_used": {
-              "metric": "part_max_used",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/NumOpenConnections": {
-              "metric": "rpc.rpc.NumOpenConnections",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/memory/mem_buffers": {
-              "metric": "mem_buffers",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_num_ops": {
-              "metric": "ugi.ugi.LoginSuccessNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcTimeMillis": {
-              "metric": "jvm.JvmMetrics.GcTimeMillis",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTerminated": {
-              "metric": "jvm.JvmMetrics.ThreadsTerminated",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/network/bytes_out": {
-              "metric": "bytes_out",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_aidle": {
-              "metric": "cpu_aidle",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/bytes_in": {
-              "metric": "bytes_in",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/boottime": {
-              "metric": "boottime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/pkts_out": {
-              "metric": "pkts_out",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/callQueueLen": {
-              "metric": "rpc.rpc.CallQueueLength",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logInfo": {
-              "metric": "jvm.JvmMetrics.LogInfo",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_num_ops": {
-              "metric": "ugi.ugi.LoginFailureNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_num_ops": {
-              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logError": {
-              "metric": "jvm.JvmMetrics.LogError",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_avg_time": {
-              "metric": "ugi.ugi.LoginFailureAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationSuccesses": {
-              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logFatal": {
-              "metric": "jvm.JvmMetrics.LogFatal",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_avg_time": {
-              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthenticationSuccesses": {
-              "metric": "rpc.metrics.RpcAuthenticationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthenticationFailures": {
-              "metric": "rpc.metrics.RpcAuthenticationFailures",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/network/pkts_in": {
-              "metric": "pkts_in",
-              "pointInTime": true,
-              "temporal": true
-            }
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "default": {
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.ReceivedBytes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.SentBytes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcQueueTimeNumOps": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcQueueTimeNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcQueueTimeAvgTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcQueueTimeAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcProcessingTimeNumOps": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcProcessingTimeNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcProcessingTimeAvgTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcProcessingTimeAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcAuthenticationFailures": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthenticationFailures",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcAuthenticationSuccesses": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthenticationSuccesses",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcAuthorizationFailures": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthorizationFailures",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcAuthorizationSuccesses": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthorizationSuccesses",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/NumOpenConnections": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.NumOpenConnections",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/CallQueueLength": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.CallQueueLength",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/MemNonHeapUsedM": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapUsedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/MemNonHeapCommittedM": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapCommittedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/MemHeapUsedM": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapUsedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/MemHeapCommittedM": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapCommittedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/MemMaxM": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemMaxM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/GcCountCopy": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCountCopy",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/GcTimeMillisCopy": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillisCopy",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/GcCountMarkSweepCompact": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCountMarkSweepCompact",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/GcTimeMillisMarkSweepCompact": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillisMarkSweepCompact",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/GcCount": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/GcTimeMillis": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillis",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/ThreadsNew": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsNew",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/ThreadsRunnable": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsRunnable",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/ThreadsBlocked": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsBlocked",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/ThreadsWaiting": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsWaiting",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/ThreadsTimedWaiting": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/ThreadsTerminated": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTerminated",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/LogFatal": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogFatal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/LogError": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogError",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/LogWarn": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogWarn",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/LogInfo": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogInfo",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/HeapMemoryMax": {
-              "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/HeapMemoryUsed": {
-              "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/HeapMemoryCommitted": {
-              "metric": "java.lang:type=Memory.HeapMemoryUsage[committed]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/HeapMemoryInit": {
-              "metric": "java.lang:type=Memory.HeapMemoryUsage[init]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/NonHeapMemoryMax": {
-              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/NonHeapMemoryUsed": {
-              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/NonHeapMemoryCommitted": {
-              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[committed]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/NonHeapMemoryInit": {
-              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[init]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MBeanServerDelegate/MBeanServerId": {
-              "metric": "JMImplementation:type=MBeanServerDelegate.MBeanServerId",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MBeanServerDelegate/SpecificationName": {
-              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationName",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MBeanServerDelegate/SpecificationVersion": {
-              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationVersion",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MBeanServerDelegate/SpecificationVendor": {
-              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationVendor",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MBeanServerDelegate/ImplementationName": {
-              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationName",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MBeanServerDelegate/ImplementationVersion": {
-              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationVersion",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MBeanServerDelegate/ImplementationVendor": {
-              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationVendor",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/ElapsedTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.ElapsedTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/PercentComplete": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.PercentComplete",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingFsImageCount": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingFsImageElapsedTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageElapsedTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingFsImageTotal": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageTotal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingFsImagePercentComplete": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImagePercentComplete",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingEditsCount": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingEditsElapsedTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsElapsedTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingEditsTotal": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsTotal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingEditsPercentComplete": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsPercentComplete",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SavingCheckpointCount": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SavingCheckpointElapsedTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointElapsedTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SavingCheckpointTotal": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointTotal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SavingCheckpointPercentComplete": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointPercentComplete",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SafeModeCount": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SafeModeElapsedTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeElapsedTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SafeModeTotal": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeTotal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SafeModePercentComplete": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModePercentComplete",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/HotSpotDiagnostic/DiagnosticOptions": {
-              "metric": "com.sun.management:type=HotSpotDiagnostic.DiagnosticOptions",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryManager/MemoryPoolNames": {
-              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.MemoryPoolNames",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryManager/Name": {
-              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.Name",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryManager/Valid": {
-              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.Valid",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryManager/ObjectName": {
-              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.ObjectName",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Logging/LoggerNames": {
-              "metric": "java.util.logging:type=Logging.LoggerNames",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/UgiMetrics/LoginSuccessNumOps": {
-              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginSuccessNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/UgiMetrics/LoginSuccessAvgTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginSuccessAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/UgiMetrics/LoginFailureNumOps": {
-              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginFailureNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/UgiMetrics/LoginFailureAvgTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginFailureAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/CollectionUsage": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThreshold": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThreshold",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdCount": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/MemoryManagerNames": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.MemoryManagerNames",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/PeakUsage": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.PeakUsage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/Usage": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Usage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdExceeded": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdExceeded",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdSupported": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/UsageThresholdSupported": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.UsageThresholdSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/Name": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Name",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/Type": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Type",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/Valid": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Valid",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ThreadAllocatedMemoryEnabled": {
-              "metric": "java.lang:type=Threading.ThreadAllocatedMemoryEnabled",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ThreadAllocatedMemorySupported": {
-              "metric": "java.lang:type=Threading.ThreadAllocatedMemorySupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/DaemonThreadCount": {
-              "metric": "java.lang:type=Threading.DaemonThreadCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/PeakThreadCount": {
-              "metric": "java.lang:type=Threading.PeakThreadCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/CurrentThreadCpuTimeSupported": {
-              "metric": "java.lang:type=Threading.CurrentThreadCpuTimeSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ObjectMonitorUsageSupported": {
-              "metric": "java.lang:type=Threading.ObjectMonitorUsageSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/SynchronizerUsageSupported": {
-              "metric": "java.lang:type=Threading.SynchronizerUsageSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ThreadContentionMonitoringSupported": {
-              "metric": "java.lang:type=Threading.ThreadContentionMonitoringSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ThreadCpuTimeEnabled": {
-              "metric": "java.lang:type=Threading.ThreadCpuTimeEnabled",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/CurrentThreadCpuTime": {
-              "metric": "java.lang:type=Threading.CurrentThreadCpuTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/CurrentThreadUserTime": {
-              "metric": "java.lang:type=Threading.CurrentThreadUserTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ThreadCount": {
-              "metric": "java.lang:type=Threading.ThreadCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/TotalStartedThreadCount": {
-              "metric": "java.lang:type=Threading.TotalStartedThreadCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ThreadCpuTimeSupported": {
-              "metric": "java.lang:type=Threading.ThreadCpuTimeSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ThreadContentionMonitoringEnabled": {
-              "metric": "java.lang:type=Threading.ThreadContentionMonitoringEnabled",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/AllThreadIds": {
-              "metric": "java.lang:type=Threading.AllThreadIds",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ClassLoading/LoadedClassCount": {
-              "metric": "java.lang:type=ClassLoading.LoadedClassCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ClassLoading/UnloadedClassCount": {
-              "metric": "java.lang:type=ClassLoading.UnloadedClassCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ClassLoading/TotalLoadedClassCount": {
-              "metric": "java.lang:type=ClassLoading.TotalLoadedClassCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ClassLoading/Verbose": {
-              "metric": "java.lang:type=ClassLoading.Verbose",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/BootClassPath": {
-              "metric": "java.lang:type=Runtime.BootClassPath",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/LibraryPath": {
-              "metric": "java.lang:type=Runtime.LibraryPath",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/VmName": {
-              "metric": "java.lang:type=Runtime.VmName",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/VmVendor": {
-              "metric": "java.lang:type=Runtime.VmVendor",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/VmVersion": {
-              "metric": "java.lang:type=Runtime.VmVersion",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/BootClassPathSupported": {
-              "metric": "java.lang:type=Runtime.BootClassPathSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/StartTime": {
-              "metric": "java.lang:type=Runtime.StartTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/InputArguments": {
-              "metric": "java.lang:type=Runtime.InputArguments",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/ManagementSpecVersion": {
-              "metric": "java.lang:type=Runtime.ManagementSpecVersion",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/SpecName": {
-              "metric": "java.lang:type=Runtime.SpecName",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/SpecVendor": {
-              "metric": "java.lang:type=Runtime.SpecVendor",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/SpecVersion": {
-              "metric": "java.lang:type=Runtime.SpecVersion",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/SystemProperties": {
-              "metric": "java.lang:type=Runtime.SystemProperties",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/Uptime": {
-              "metric": "java.lang:type=Runtime.Uptime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/Name": {
-              "metric": "java.lang:type=Runtime.Name",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Runtime/ClassPath": {
-              "metric": "java.lang:type=Runtime.ClassPath",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/MaxFileDescriptorCount": {
-              "metric": "java.lang:type=OperatingSystem.MaxFileDescriptorCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/OpenFileDescriptorCount": {
-              "metric": "java.lang:type=OperatingSystem.OpenFileDescriptorCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/CommittedVirtualMemorySize": {
-              "metric": "java.lang:type=OperatingSystem.CommittedVirtualMemorySize",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/FreePhysicalMemorySize": {
-              "metric": "java.lang:type=OperatingSystem.FreePhysicalMemorySize",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/FreeSwapSpaceSize": {
-              "metric": "java.lang:type=OperatingSystem.FreeSwapSpaceSize",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/ProcessCpuLoad": {
-              "metric": "java.lang:type=OperatingSystem.ProcessCpuLoad",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/ProcessCpuTime": {
-              "metric": "java.lang:type=OperatingSystem.ProcessCpuTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/SystemCpuLoad": {
-              "metric": "java.lang:type=OperatingSystem.SystemCpuLoad",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/TotalPhysicalMemorySize": {
-              "metric": "java.lang:type=OperatingSystem.TotalPhysicalMemorySize",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/TotalSwapSpaceSize": {
-              "metric": "java.lang:type=OperatingSystem.TotalSwapSpaceSize",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/AvailableProcessors": {
-              "metric": "java.lang:type=OperatingSystem.AvailableProcessors",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/Version": {
-              "metric": "java.lang:type=OperatingSystem.Version",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/Arch": {
-              "metric": "java.lang:type=OperatingSystem.Arch",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/SystemLoadAverage": {
-              "metric": "java.lang:type=OperatingSystem.SystemLoadAverage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/OperatingSystem/Name": {
-              "metric": "java.lang:type=OperatingSystem.Name",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/CollectionUsage": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/CollectionUsageThreshold": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsageThreshold",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/CollectionUsageThresholdCount": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsageThresholdCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/MemoryManagerNames": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.MemoryManagerNames",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/PeakUsage": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.PeakUsage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/Usage": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.Usage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/UsageThreshold": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.UsageThreshold",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/UsageThresholdCount": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.UsageThresholdCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/CollectionUsageThresholdExceeded": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsageThresholdExceeded",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/CollectionUsageThresholdSupported": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsageThresholdSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/UsageThresholdExceeded": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.UsageThresholdExceeded",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/UsageThresholdSupported": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.UsageThresholdSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/Name": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.Name",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/Type": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.Type",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/PermGen/Valid": {
-              "metric": "java.lang:type=MemoryPool,name=Perm Gen.Valid",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/BufferPool/mapred/Count": {
-              "metric": "java.nio:type=BufferPool,name=mapped.Count",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/BufferPool/mapred/MemoryUsed": {
-              "metric": "java.nio:type=BufferPool,name=mapped.MemoryUsed",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/BufferPool/mapred/TotalCapacity": {
-              "metric": "java.nio:type=BufferPool,name=mapped.TotalCapacity",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/BufferPool/mapred/Name": {
-              "metric": "java.nio:type=BufferPool,name=mapped.Name",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/BufferPool/direct/Count": {
-              "metric": "java.nio:type=BufferPool,name=direct.Count",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/BufferPool/direct/MemoryUsed": {
-              "metric": "java.nio:type=BufferPool,name=direct.MemoryUsed",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/BufferPool/direct/TotalCapacity": {
-              "metric": "java.nio:type=BufferPool,name=direct.TotalCapacity",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/BufferPool/direct/Name": {
-              "metric": "java.nio:type=BufferPool,name=direct.Name",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/GarbageCollector/MarkSweepCompact/LastGcInfo": {
-              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.LastGcInfo",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/GarbageCollector/MarkSweepCompact/CollectionCount": {
-              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.CollectionCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/GarbageCollector/MarkSweepCompact/CollectionTime": {
-              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.CollectionTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/GarbageCollector/MarkSweepCompact/MemoryPoolNames": {
-              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.MemoryPoolNames",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/GarbageCollector/MarkSweepCompact/Name": {
-              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.Name",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/GarbageCollector/MarkSweepCompact/Valid": {
-              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.Valid",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/GarbageCollector/Copy/LastGcInfo": {
-              "metric": "java.lang:type=GarbageCollector,name=Copy.LastGcInfo",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/GarbageCollector/Copy/CollectionCount": {
-              "metric": "java.lang:type=GarbageCollector,name=Copy.CollectionCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/GarbageCollector/Copy/CollectionTime": {
-              "metric": "java.lang:type=GarbageCollector,name=Copy.CollectionTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/GarbageCollector/Copy/MemoryPoolNames": {
-              "metric": "java.lang:type=GarbageCollector,name=Copy.MemoryPoolNames",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/GarbageCollector/Copy/Name": {
-              "metric": "java.lang:type=GarbageCollector,name=Copy.Name",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/GarbageCollector/Copy/Valid": {
-              "metric": "java.lang:type=GarbageCollector,name=Copy.Valid",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/CodeCache/CollectionUsage": {
-              "metric": "java.lang:type=MemoryPool,name=Code Cache.CollectionUsage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/CodeCache/MemoryManagerNames": {
-              "metric": "java.lang:type=MemoryPool,name=Code Cache.MemoryManagerNames",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/CodeCache/PeakUsage": {
-              "metric": "java.lang:type=MemoryPool,name=Code Cache.PeakUsage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/CodeCache/Usage": {
-              "metric": "java.lang:type=MemoryPool,name=Code Cache.Usage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/CodeCache/UsageThreshold": {
-              "metric": "java.lang:type=MemoryPool,name=Code Cache.UsageThreshold",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/CodeCache/UsageThresholdCount": {
-              "metric": "java.lang:type=MemoryPool,name=Code Cache.UsageThresholdCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/CodeCache/CollectionUsageThresholdSupported": {
-              "metric": "java.lang:type=MemoryPool,name=Code Cache.CollectionUsageThresholdSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/CodeCache/UsageThresholdExceeded": {
-              "metric": "java.lang:type=MemoryPool,name=Code Cache.UsageThresholdExceeded",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/CodeCache/UsageThresholdSupported": {
-              "metric": "java.lang:type=MemoryPool,name=Code Cache.UsageThresholdSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/CodeCache/Name": {
-              "metric": "java.lang:type=MemoryPool,name=Code Cache.Name",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/CodeCache/Type": {
-              "metric": "java.lang:type=MemoryPool,name=Code Cache.Type",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/CodeCache/Valid": {
-              "metric": "java.lang:type=MemoryPool,name=Code Cache.Valid",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/EdenSpace/CollectionUsage": {
-              "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/EdenSpace/CollectionUsageThreshold": {
-              "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsageThreshold",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/EdenSpace/CollectionUsageThresholdCount": {
-              "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsageThresholdCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/EdenSpace/MemoryManagerNames": {
-              "metric": "java.lang:type=MemoryPool,name=Eden Space.MemoryManagerNames",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/EdenSpace/PeakUsage": {
-              "metric": "java.lang:type=MemoryPool,name=Eden Space.PeakUsage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/EdenSpace/Usage": {
-              "metric": "java.lang:type=MemoryPool,name=Eden Space.Usage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/EdenSpace/CollectionUsageThresholdSupported": {
-              "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsageThresholdSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/EdenSpace/CollectionUsageThresholdExceeded": {
-              "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsageThresholdExceeded",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/EdenSpace/UsageThresholdSupported": {
-              "metric": "java.lang:type=MemoryPool,name=Eden Space.UsageThresholdSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/EdenSpace/Name": {
-              "metric": "java.lang:type=MemoryPool,name=Eden Space.Name",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/EdenSpace/Type": {
-              "metric": "java.lang:type=MemoryPool,name=Eden Space.Type",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/EdenSpace/Valid": {
-              "metric": "java.lang:type=MemoryPool,name=Eden Space.Valid",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/CollectionUsage": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/CollectionUsageThreshold": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsageThreshold",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/CollectionUsageThresholdCount": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsageThresholdCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/MemoryManagerNames": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.MemoryManagerNames",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/PeakUsage": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.PeakUsage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/Usage": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.Usage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/UsageThreshold": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.UsageThreshold",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/UsageThresholdCount": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.UsageThresholdCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/CollectionUsageThresholdSupported": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsageThresholdSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/CollectionUsageThresholdExceeded": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsageThresholdExceeded",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/UsageThresholdSupported": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.UsageThresholdSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/UsageThresholdExceeded": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.UsageThresholdExceeded",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/Name": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.Name",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/Type": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.Type",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/TenuredGen/Valid": {
-              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.Valid",
-              "pointInTime": true,
-              "temporal": false
-            }
-          }
-        }
-      }
-    ],
-    "HostComponent": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "default": {
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "jvm.JvmMetrics.ThreadsRunnable",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "jvm.JvmMetrics.ThreadsNew",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationFailures": {
-              "metric": "rpc.metrics.RpcAuthorizationFailures",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_avg_time": {
-              "metric": "ugi.ugi.LoginSuccessAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_avg_time": {
-              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "rpc.rpc.SentBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logWarn": {
-              "metric": "jvm.JvmMetrics.LogWarn",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTimedWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/process/proc_run": {
-              "metric": "proc_run",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/gcCount": {
-              "metric": "jvm.JvmMetrics.GcCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "rpc.rpc.ReceivedBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/memory/swap_total": {
-              "metric": "swap_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsBlocked": {
-              "metric": "jvm.JvmMetrics.ThreadsBlocked",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_num_ops": {
-              "metric": "rpc.rpc.RpcQueueTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/process/proc_total": {
-              "metric": "proc_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/part_max_used": {
-              "metric": "part_max_used",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/NumOpenConnections": {
-              "metric": "rpc.rpc.NumOpenConnections",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/memory/mem_buffers": {
-              "metric": "mem_buffers",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_num_ops": {
-              "metric": "ugi.ugi.LoginSuccessNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcTimeMillis": {
-              "metric": "jvm.JvmMetrics.GcTimeMillis",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTerminated": {
-              "metric": "jvm.JvmMetrics.ThreadsTerminated",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/network/bytes_out": {
-              "metric": "bytes_out",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_aidle": {
-              "metric": "cpu_aidle",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/bytes_in": {
-              "metric": "bytes_in",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/boottime": {
-              "metric": "boottime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/pkts_out": {
-              "metric": "pkts_out",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/callQueueLen": {
-              "metric": "rpc.rpc.CallQueueLength",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logInfo": {
-              "metric": "jvm.JvmMetrics.LogInfo",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_num_ops": {
-              "metric": "ugi.ugi.LoginFailureNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_num_ops": {
-              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logError": {
-              "metric": "jvm.JvmMetrics.LogError",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_avg_time": {
-              "metric": "ugi.ugi.LoginFailureAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationSuccesses": {
-              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logFatal": {
-              "metric": "jvm.JvmMetrics.LogFatal",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_avg_time": {
-              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthenticationSuccesses": {
-              "metric": "rpc.metrics.RpcAuthenticationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthenticationFailures": {
-              "metric": "rpc.metrics.RpcAuthenticationFailures",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/network/pkts_in": {
-              "metric": "pkts_in",
-              "pointInTime": true,
-              "temporal": true
-            }
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "default": {
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.ReceivedBytes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.SentBytes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcQueueTimeNumOps": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcQueueTimeNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcQueueTimeAvgTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcQueueTimeAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcProcessingTimeNumOps": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcProcessingTimeNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcProcessingTimeAvgTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcProcessingTimeAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcAuthenticationFailures": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthenticationFailures",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcAuthenticationSuccesses": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthenticationSuccesses",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcAuthorizationFailures": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthorizationFailures",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcAuthorizationSuccesses": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthorizationSuccesses",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/NumOpenConnections": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.NumOpenConnections",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/CallQueueLength": {
-              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.CallQueueLength",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/MemNonHeapUsedM": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapUsedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/MemNonHeapCommittedM": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapCommittedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/MemHeapUsedM": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapUsedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/MemHeapCommittedM": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapCommittedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/MemMaxM": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemMaxM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/GcCountCopy": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCountCopy",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/GcTimeMillisCopy": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillisCopy",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/GcCountMarkSweepCompact": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCountMarkSweepCompact",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/GcTimeMillisMarkSweepCompact": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillisMarkSweepCompact",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/GcCount": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/GcTimeMillis": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillis",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/ThreadsNew": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsNew",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/ThreadsRunnable": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsRunnable",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/ThreadsBlocked": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsBlocked",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/ThreadsWaiting": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsWaiting",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/ThreadsTimedWaiting": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/ThreadsTerminated": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTerminated",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/LogFatal": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogFatal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/LogError": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogError",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/LogWarn": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogWarn",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/LogInfo": {
-              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogInfo",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/HeapMemoryMax": {
-              "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/HeapMemoryUsed": {
-              "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/HeapMemoryCommitted": {
-              "metric": "java.lang:type=Memory.HeapMemoryUsage[committed]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/HeapMemoryInit": {
-              "metric": "java.lang:type=Memory.HeapMemoryUsage[init]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/NonHeapMemoryMax": {
-              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/NonHeapMemoryUsed": {
-              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/NonHeapMemoryCommitted": {
-              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[committed]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Memory/NonHeapMemoryInit": {
-              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[init]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MBeanServerDelegate/MBeanServerId": {
-              "metric": "JMImplementation:type=MBeanServerDelegate.MBeanServerId",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MBeanServerDelegate/SpecificationName": {
-              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationName",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MBeanServerDelegate/SpecificationVersion": {
-              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationVersion",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MBeanServerDelegate/SpecificationVendor": {
-              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationVendor",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MBeanServerDelegate/ImplementationName": {
-              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationName",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MBeanServerDelegate/ImplementationVersion": {
-              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationVersion",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MBeanServerDelegate/ImplementationVendor": {
-              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationVendor",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/ElapsedTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.ElapsedTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/PercentComplete": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.PercentComplete",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingFsImageCount": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingFsImageElapsedTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageElapsedTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingFsImageTotal": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageTotal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingFsImagePercentComplete": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImagePercentComplete",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingEditsCount": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingEditsElapsedTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsElapsedTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingEditsTotal": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsTotal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/LoadingEditsPercentComplete": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsPercentComplete",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SavingCheckpointCount": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SavingCheckpointElapsedTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointElapsedTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SavingCheckpointTotal": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointTotal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SavingCheckpointPercentComplete": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointPercentComplete",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SafeModeCount": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SafeModeElapsedTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeElapsedTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SafeModeTotal": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeTotal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/StartupProgress/SafeModePercentComplete": {
-              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModePercentComplete",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/HotSpotDiagnostic/DiagnosticOptions": {
-              "metric": "com.sun.management:type=HotSpotDiagnostic.DiagnosticOptions",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryManager/MemoryPoolNames": {
-              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.MemoryPoolNames",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryManager/Name": {
-              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.Name",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryManager/Valid": {
-              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.Valid",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryManager/ObjectName": {
-              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.ObjectName",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Logging/LoggerNames": {
-              "metric": "java.util.logging:type=Logging.LoggerNames",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/UgiMetrics/LoginSuccessNumOps": {
-              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginSuccessNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/UgiMetrics/LoginSuccessAvgTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginSuccessAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/UgiMetrics/LoginFailureNumOps": {
-              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginFailureNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/UgiMetrics/LoginFailureAvgTime": {
-              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginFailureAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/CollectionUsage": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThreshold": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThreshold",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdCount": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/MemoryManagerNames": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.MemoryManagerNames",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/PeakUsage": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.PeakUsage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/Usage": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Usage",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdExceeded": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdExceeded",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdSupported": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/UsageThresholdSupported": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.UsageThresholdSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/Name": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Name",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/Type": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Type",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/MemoryPool/SurvivorSpace/Valid": {
-              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Valid",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ThreadAllocatedMemoryEnabled": {
-              "metric": "java.lang:type=Threading.ThreadAllocatedMemoryEnabled",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ThreadAllocatedMemorySupported": {
-              "metric": "java.lang:type=Threading.ThreadAllocatedMemorySupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/DaemonThreadCount": {
-              "metric": "java.lang:type=Threading.DaemonThreadCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/PeakThreadCount": {
-              "metric": "java.lang:type=Threading.PeakThreadCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/CurrentThreadCpuTimeSupported": {
-              "metric": "java.lang:type=Threading.CurrentThreadCpuTimeSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ObjectMonitorUsageSupported": {
-              "metric": "java.lang:type=Threading.ObjectMonitorUsageSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/SynchronizerUsageSupported": {
-              "metric": "java.lang:type=Threading.SynchronizerUsageSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ThreadContentionMonitoringSupported": {
-              "metric": "java.lang:type=Threading.ThreadContentionMonitoringSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ThreadCpuTimeEnabled": {
-              "metric": "java.lang:type=Threading.ThreadCpuTimeEnabled",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/CurrentThreadCpuTime": {
-              "metric": "java.lang:type=Threading.CurrentThreadCpuTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/CurrentThreadUserTime": {
-              "metric": "java.lang:type=Threading.CurrentThreadUserTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ThreadCount": {
-              "metric": "java.lang:type=Threading.ThreadCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/TotalStartedThreadCount": {
-              "metric": "java.lang:type=Threading.TotalStartedThreadCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ThreadCpuTimeSupported": {
-              "metric": "java.lang:type=Threading.ThreadCpuTimeSupported",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/ThreadContentionMonitoringEnabled": {
-              "metric": "java.lang:type=Threading.ThreadContentionMonitoringEnabled",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/Threading/AllThreadIds": {
-              "metric": "java.lang:type=Threading.AllThreadIds",
-              "point

<TRUNCATED>

[31/52] bigtop git commit: ODPI-187. ODPi reference implementation Ambari deployment fails for extra services

Posted by rv...@apache.org.
ODPI-187. ODPi reference implementation Ambari deployment fails for extra services

(cherry picked from commit fce4f49f98564c89741092cea4341c43b64b415e)


Project: http://git-wip-us.apache.org/repos/asf/bigtop/repo
Commit: http://git-wip-us.apache.org/repos/asf/bigtop/commit/bf841ada
Tree: http://git-wip-us.apache.org/repos/asf/bigtop/tree/bf841ada
Diff: http://git-wip-us.apache.org/repos/asf/bigtop/diff/bf841ada

Branch: refs/heads/master
Commit: bf841adacd6743007ccdf6b190a75db453805678
Parents: 7bd98d5
Author: Roman Shaposhnik <rv...@apache.org>
Authored: Tue Nov 1 08:59:40 2016 -0700
Committer: Roman Shaposhnik <rv...@apache.org>
Committed: Tue Mar 21 23:08:10 2017 -0700

----------------------------------------------------------------------
 .../ODPi/1.0/configuration/cluster-env.xml      |   2 +-
 .../ODPi/1.0/properties/stack_features.json     | 257 -------------------
 .../ambari/ODPi/1.0/properties/stack_tools.json |   4 +-
 .../ambari/ODPi/1.0/services/YARN/metainfo.xml  |  12 +-
 4 files changed, 9 insertions(+), 266 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/bigtop/blob/bf841ada/bigtop-packages/src/common/ambari/ODPi/1.0/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/configuration/cluster-env.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/configuration/cluster-env.xml
index 81cb175..61274b6 100755
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/configuration/cluster-env.xml
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/configuration/cluster-env.xml
@@ -196,7 +196,7 @@ gpgcheck=0</value>
   </property>
   <property>
     <name>stack_root</name>
-    <value>/usr/hdp</value>
+    <value>/usr/odpi</value>
     <description>Stack root folder</description>
     <value-attributes>
       <read-only>true</read-only>

http://git-wip-us.apache.org/repos/asf/bigtop/blob/bf841ada/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_features.json b/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_features.json
index 8c838db..0c3e305 100755
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_features.json
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_features.json
@@ -12,175 +12,16 @@
       "min_version": "2.2.1.0"
     },
     {
-      "name": "express_upgrade",
-      "description": "Express upgrade support",
-      "min_version": "2.1.0.0"
-    },
-    {
-      "name": "rolling_upgrade",
-      "description": "Rolling upgrade support",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "config_versioning",
-      "description": "Configurable versions support",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "datanode_non_root",
-      "description": "DataNode running as non-root support (AMBARI-7615)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "remove_ranger_hdfs_plugin_env",
-      "description": "HDFS removes Ranger env files (AMBARI-14299)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "ranger",
-      "description": "Ranger Service support",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "ranger_tagsync_component",
-      "description": "Ranger Tagsync component support (AMBARI-14383)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "phoenix",
-      "description": "Phoenix Service support",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "nfs",
-      "description": "NFS support",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "tez_for_spark",
-      "description": "Tez dependency for Spark",
-      "min_version": "2.2.0.0",
-      "max_version": "2.3.0.0"
-    },
-    {
-      "name": "timeline_state_store",
-      "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
-      "min_version": "2.2.0.0"
-    },
-    {
       "name": "copy_tarball_to_hdfs",
       "description": "Copy tarball to HDFS support (AMBARI-12113)",
       "min_version": "2.2.0.0"
     },
     {
-      "name": "spark_16plus",
-      "description": "Spark 1.6+",
-      "min_version": "2.4.0.0"
-    },
-    {
-      "name": "spark_thriftserver",
-      "description": "Spark Thrift Server",
-      "min_version": "2.3.2.0"
-    },
-    {
-      "name": "storm_kerberos",
-      "description": "Storm Kerberos support (AMBARI-7570)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "storm_ams",
-      "description": "Storm AMS integration (AMBARI-10710)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "create_kafka_broker_id",
-      "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
-      "min_version": "2.2.0.0",
-      "max_version": "2.3.0.0"
-    },
-    {
-      "name": "kafka_listeners",
-      "description": "Kafka listeners (AMBARI-10984)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "kafka_kerberos",
-      "description": "Kafka Kerberos support (AMBARI-10984)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "pig_on_tez",
-      "description": "Pig on Tez support (AMBARI-7863)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "ranger_usersync_non_root",
-      "description": "Ranger Usersync as non-root user (AMBARI-10416)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "ranger_audit_db_support",
-      "description": "Ranger Audit to DB support",
-      "min_version": "2.2.0.0",
-      "max_version": "2.5.0.0"
-    },
-    {
-      "name": "accumulo_kerberos_user_auth",
-      "description": "Accumulo Kerberos User Auth (AMBARI-10163)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "knox_versioned_data_dir",
-      "description": "Use versioned data dir for Knox (AMBARI-13164)",
-      "min_version": "2.3.2.0"
-    },
-    {
-      "name": "knox_sso_topology",
-      "description": "Knox SSO Topology support (AMBARI-13975)",
-      "min_version": "2.3.8.0"
-    },
-    {
-      "name": "atlas_rolling_upgrade",
-      "description": "Rolling upgrade support for Atlas",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "oozie_admin_user",
-      "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "oozie_create_hive_tez_configs",
-      "description": "Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "oozie_setup_shared_lib",
-      "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "oozie_host_kerberos",
-      "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
-      "min_version": "2.0.0.0",
-      "max_version": "2.2.0.0"
-    },
-    {
-      "name": "falcon_extensions",
-      "description": "Falcon Extension",
-      "min_version": "2.5.0.0"
-    },
-    {
       "name": "hive_metastore_upgrade_schema",
       "description": "Hive metastore upgrade schema support (AMBARI-11176)",
       "min_version": "2.3.0.0"
      },
     {
-      "name": "hive_server_interactive",
-      "description": "Hive server interactive support (AMBARI-15573)",
-      "min_version": "2.5.0.0"
-     },
-    {
       "name": "hive_webhcat_specific_configs",
       "description": "Hive webhcat specific configurations support (AMBARI-12364)",
       "min_version": "2.3.0.0"
@@ -202,107 +43,9 @@
       "min_version": "2.2.0.0"
     },
     {
-      "name": "ranger_kms_hsm_support",
-      "description": "Ranger KMS HSM support (AMBARI-15752)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_log4j_support",
-      "description": "Ranger supporting log-4j properties (AMBARI-15681)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_kerberos_support",
-      "description": "Ranger Kerberos support",
-      "min_version": "2.5.0.0"
-    },
-    {
       "name": "hive_metastore_site_support",
       "description": "Hive Metastore site support",
       "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_usersync_password_jceks",
-      "description": "Saving Ranger Usersync credentials in jceks",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_install_infra_client",
-      "description": "Ambari Infra Service support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "falcon_atlas_support_2_3",
-      "description": "Falcon Atlas integration support for 2.3 stack",
-      "min_version": "2.3.99.0",
-      "max_version": "2.4.0.0"
-    },
-    {
-      "name": "falcon_atlas_support",
-      "description": "Falcon Atlas integration",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "hbase_home_directory",
-      "description": "Hbase home directory in HDFS needed for HBASE backup",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "spark_livy",
-      "description": "Livy as slave component of spark",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "atlas_ranger_plugin_support",
-      "description": "Atlas Ranger plugin support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "atlas_conf_dir_in_path",
-      "description": "Prepend the Atlas conf dir (/etc/atlas/conf) to the classpath of Storm and Falcon",
-      "min_version": "2.3.0.0",
-      "max_version": "2.4.99.99"
-    },
-    {
-      "name": "atlas_upgrade_support",
-      "description": "Atlas supports express and rolling upgrades",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "atlas_hook_support",
-      "description": "Atlas support for hooks in Hive, Storm, Falcon, and Sqoop",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_pid_support",
-      "description": "Ranger Service support pid generation AMBARI-16756",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_kms_pid_support",
-      "description": "Ranger KMS Service support pid generation",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_admin_password_change",
-      "description": "Allow ranger admin credentials to be specified during cluster creation (AMBARI-17000)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "storm_metrics_apache_classes",
-      "description": "Metrics sink for Storm that uses Apache class names",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "spark_java_opts_support",
-      "description": "Allow Spark to generate java-opts file",
-      "min_version": "2.2.0.0",
-      "max_version": "2.4.0.0"
-    },
-    {
-      "name": "atlas_hbase_setup",
-      "description": "Use script to create Atlas tables in Hbase and set permissions for Atlas user.",
-      "min_version": "2.5.0.0"
     }
   ]
 }

http://git-wip-us.apache.org/repos/asf/bigtop/blob/bf841ada/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_tools.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_tools.json b/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_tools.json
index d1aab4b..c3df235 100755
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_tools.json
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_tools.json
@@ -1,4 +1,4 @@
 {
-  "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"],
+  "stack_selector": ["distro-select", "/usr/bin/distro-select", "distro-select"],
   "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"]
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/bf841ada/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/metainfo.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/metainfo.xml
index 35da7fd..3e79bc5 100755
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/metainfo.xml
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/metainfo.xml
@@ -31,7 +31,7 @@
           <displayName>App Timeline Server</displayName>
           <category>MASTER</category>
           <cardinality>0-1</cardinality>
-          <versionAdvertised>true</versionAdvertised>
+          <versionAdvertised>false</versionAdvertised>
           <reassignAllowed>true</reassignAllowed>
           <commandScript>
             <script>scripts/application_timeline_server.py</script>
@@ -45,7 +45,7 @@
           <displayName>ResourceManager</displayName>
           <category>MASTER</category>
           <cardinality>1</cardinality>
-          <versionAdvertised>true</versionAdvertised>
+          <versionAdvertised>false</versionAdvertised>
           <reassignAllowed>true</reassignAllowed>
           <commandScript>
             <script>scripts/resourcemanager.py</script>
@@ -93,7 +93,7 @@
           <displayName>NodeManager</displayName>
           <category>SLAVE</category>
           <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
+          <versionAdvertised>false</versionAdvertised>
           <decommissionAllowed>true</decommissionAllowed>
           <commandScript>
             <script>scripts/nodemanager.py</script>
@@ -117,7 +117,7 @@
           <displayName>YARN Client</displayName>
           <category>CLIENT</category>
           <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
+          <versionAdvertised>false</versionAdvertised>
           <commandScript>
             <script>scripts/yarn_client.py</script>
             <scriptType>PYTHON</scriptType>
@@ -210,7 +210,7 @@
           <displayName>History Server</displayName>
           <category>MASTER</category>
           <cardinality>1</cardinality>
-          <versionAdvertised>true</versionAdvertised>
+          <versionAdvertised>false</versionAdvertised>
           <reassignAllowed>true</reassignAllowed>
           <auto-deploy>
             <enabled>true</enabled>
@@ -250,7 +250,7 @@
           <displayName>MapReduce2 Client</displayName>
           <category>CLIENT</category>
           <cardinality>0+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
+          <versionAdvertised>false</versionAdvertised>
           <commandScript>
             <script>scripts/mapreduce2_client.py</script>
             <scriptType>PYTHON</scriptType>


[50/52] bigtop git commit: BIGTOP-1406. package Ambari in Bigtop

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-exec-log4j.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-exec-log4j.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-exec-log4j.xml
deleted file mode 100755
index 538334c..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-exec-log4j.xml
+++ /dev/null
@@ -1,96 +0,0 @@
-<configuration><property require-input="false">
-    <name>content</name>
-    <value>
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Define some default values that can be overridden by system properties
-
-hive.log.threshold=ALL
-hive.root.logger=INFO,FA
-hive.log.dir=${java.io.tmpdir}/${user.name}
-hive.query.id=hadoop
-hive.log.file=${hive.query.id}.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hive.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=${hive.log.threshold}
-
-#
-# File Appender
-#
-
-log4j.appender.FA=org.apache.log4j.FileAppender
-log4j.appender.FA.File=${hive.log.dir}/${hive.log.file}
-log4j.appender.FA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-log4j.appender.FA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#custom logging levels
-#log4j.logger.xxx=DEBUG
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter
-
-
-log4j.category.DataNucleus=ERROR,FA
-log4j.category.Datastore=ERROR,FA
-log4j.category.Datastore.Schema=ERROR,FA
-log4j.category.JPOX.Datastore=ERROR,FA
-log4j.category.JPOX.Plugin=ERROR,FA
-log4j.category.JPOX.MetaData=ERROR,FA
-log4j.category.JPOX.Query=ERROR,FA
-log4j.category.JPOX.General=ERROR,FA
-log4j.category.JPOX.Enhancer=ERROR,FA
-
-
-# Silence useless ZK logs
-log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,FA
-log4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,FA
-
-    </value>
-    <description>Custom hive-exec-log4j</description>
-    <display-name>hive-exec-log4j template</display-name>
-    <filename>hive-exec-log4j.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>content</type>
-        <show-property-name>false</show-property-name>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-log4j.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-log4j.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-log4j.xml
deleted file mode 100755
index 3ecb24a..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-log4j.xml
+++ /dev/null
@@ -1,106 +0,0 @@
-<configuration><property require-input="false">
-    <name>content</name>
-    <value>
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Define some default values that can be overridden by system properties
-hive.log.threshold=ALL
-hive.root.logger=INFO,DRFA
-hive.log.dir=${java.io.tmpdir}/${user.name}
-hive.log.file=hive.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hive.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshold=${hive.log.threshold}
-
-#
-# Daily Rolling File Appender
-#
-# Use the PidDailyerRollingFileAppend class instead if you want to use separate log files
-# for different CLI session.
-#
-# log4j.appender.DRFA=org.apache.hadoop.hive.ql.log.PidDailyRollingFileAppender
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-
-log4j.appender.DRFA.File=${hive.log.dir}/${hive.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n
-log4j.appender.console.encoding=UTF-8
-
-#custom logging levels
-#log4j.logger.xxx=DEBUG
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter
-
-
-log4j.category.DataNucleus=ERROR,DRFA
-log4j.category.Datastore=ERROR,DRFA
-log4j.category.Datastore.Schema=ERROR,DRFA
-log4j.category.JPOX.Datastore=ERROR,DRFA
-log4j.category.JPOX.Plugin=ERROR,DRFA
-log4j.category.JPOX.MetaData=ERROR,DRFA
-log4j.category.JPOX.Query=ERROR,DRFA
-log4j.category.JPOX.General=ERROR,DRFA
-log4j.category.JPOX.Enhancer=ERROR,DRFA
-
-
-# Silence useless ZK logs
-log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,DRFA
-log4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,DRFA
-
-    </value>
-    <description>Custom log4j.properties</description>
-    <display-name>hive-log4j template</display-name>
-    <filename>hive-log4j.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>content</type>
-        <show-property-name>false</show-property-name>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property></configuration>
\ No newline at end of file


[38/52] bigtop git commit: BIGTOP-1406. package Ambari in Bigtop

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/YARN_metrics.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/YARN_metrics.json b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/YARN_metrics.json
deleted file mode 100755
index a66bb34..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/YARN_metrics.json
+++ /dev/null
@@ -1,3486 +0,0 @@
-{
-  "NODEMANAGER": {
-    "Component": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "default": {
-            "metrics/cpu/cpu_idle": {
-              "metric": "cpu_idle",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_nice": {
-              "metric": "cpu_nice",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_system": {
-              "metric": "cpu_system",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_user": {
-              "metric": "cpu_user",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_wio": {
-              "metric": "cpu_wio",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/disk_free": {
-              "metric": "disk_free",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/disk_total": {
-              "metric": "disk_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/load/load_fifteen": {
-              "metric": "load_fifteen",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/load/load_five": {
-              "metric": "load_five",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/load/load_one": {
-              "metric": "load_one",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/mem_buffered": {
-              "metric": "mem_buffered",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/mem_cached": {
-              "metric": "mem_cached",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/mem_free": {
-              "metric": "mem_free",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/mem_shared": {
-              "metric": "mem_shared",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/memory/mem_total": {
-              "metric": "mem_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/memory/swap_free": {
-              "metric": "swap_free",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/bytes_in": {
-              "metric": "bytes_in",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/bytes_out": {
-              "metric": "bytes_out",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/pkts_in": {
-              "metric": "pkts_in",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/network/pkts_out": {
-              "metric": "pkts_out",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/process/proc_run": {
-              "metric": "proc_run",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/process/proc_total": {
-              "metric": "proc_total",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/read_count": {
-              "metric": "read_count",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/write_count": {
-              "metric": "write_count",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/read_bytes": {
-              "metric": "read_bytes",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/write_bytes": {
-              "metric": "write_bytes",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/disk/read_time": {
-              "metric": "read_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/write_time": {
-              "metric": "write_time",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/read_bps":{
-              "metric":"read_bps",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/disk/write_bps":{
-              "metric":"write_bps",
-              "pointInTime":true,
-              "temporal":true,
-              "amsHostMetric":true
-            },
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/mapred/ShuffleOutputsFailed": {
-              "metric": "mapred.ShuffleMetrics.ShuffleOutputsFailed",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "jvm.JvmMetrics.ThreadsRunnable",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "jvm.JvmMetrics.ThreadsNew",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationFailures": {
-              "metric": "rpc.rpc.RpcAuthorizationFailures",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_avg_time": {
-              "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_avg_time": {
-              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "rpc.rpc.SentBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logWarn": {
-              "metric": "jvm.JvmMetrics.LogWarn",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTimedWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcCount": {
-              "metric": "jvm.JvmMetrics.GcCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "rpc.rpc.ReceivedBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsBlocked": {
-              "metric": "jvm.JvmMetrics.ThreadsBlocked",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_num_ops": {
-              "metric": "rpc.rpc.RpcQueueTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/AllocatedContainers": {
-              "metric": "yarn.NodeManagerMetrics.AllocatedContainers",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/AllocatedGB": {
-              "metric": "yarn.NodeManagerMetrics.AllocatedGB",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/AvailableGB": {
-              "metric": "yarn.NodeManagerMetrics.AvailableGB",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/AllocatedVCores": {
-              "metric": "yarn.NodeManagerMetrics.AllocatedVCores",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/AvailableVCores": {
-              "metric": "yarn.NodeManagerMetrics.AvailableVCores",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/BadLocalDirs": {
-              "metric": "yarn.NodeManagerMetrics.BadLocalDirs",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/BadLogDirs": {
-              "metric": "yarn.NodeManagerMetrics.BadLogDirs",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainerLaunchDurationAvgTime": {
-              "metric": "yarn.NodeManagerMetrics.ContainerLaunchDurationAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainerLaunchDurationNumOps": {
-              "metric": "yarn.NodeManagerMetrics.ContainerLaunchDurationNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersCompleted": {
-              "metric": "yarn.NodeManagerMetrics.ContainersCompleted",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersFailed": {
-              "metric": "yarn.NodeManagerMetrics.ContainersFailed",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersIniting": {
-              "metric": "yarn.NodeManagerMetrics.ContainersIniting",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersKilled": {
-              "metric": "yarn.NodeManagerMetrics.ContainersKilled",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersLaunched": {
-              "metric": "yarn.NodeManagerMetrics.ContainersLaunched",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersRunning": {
-              "metric": "yarn.NodeManagerMetrics.ContainersRunning",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/GoodLocalDirsDiskUtilizationPerc": {
-              "metric": "yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/GoodLogDirsDiskUtilizationPerc": {
-              "metric": "yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/NumOpenConnections": {
-              "metric": "rpc.rpc.NumOpenConnections",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/mapred/ShuffleOutputsOK": {
-              "metric": "mapred.ShuffleMetrics.ShuffleOutputsOK",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_num_ops": {
-              "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcTimeMillis": {
-              "metric": "jvm.JvmMetrics.GcTimeMillis",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTerminated": {
-              "metric": "jvm.JvmMetrics.ThreadsTerminated",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/callQueueLen": {
-              "metric": "rpc.rpc.CallQueueLength",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logInfo": {
-              "metric": "jvm.JvmMetrics.LogInfo",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_num_ops": {
-              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_num_ops": {
-              "metric": "ugi.UgiMetrics.LoginFailureNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/mapred/ShuffleConnections": {
-              "metric": "mapred.ShuffleMetrics.ShuffleConnections",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logError": {
-              "metric": "jvm.JvmMetrics.LogError",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_avg_time": {
-              "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationSuccesses": {
-              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logFatal": {
-              "metric": "jvm.JvmMetrics.LogFatal",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_avg_time": {
-              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/mapred/ShuffleOutputBytes": {
-              "metric": "mapred.ShuffleMetrics.ShuffleOutputBytes",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthenticationSuccesses": {
-              "metric": "rpc.rpc.RpcAuthenticationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthenticationFailures": {
-              "metric": "rpc.rpc.RpcAuthenticationFailures",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/GcCountCopy": {
-              "metric": "jvm.JvmMetrics.GcCountCopy",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/GcCountMarkSweepCompact": {
-              "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/GcTimeMillisCopy": {
-              "metric": "jvm.JvmMetrics.GcTimeMillisCopy",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/GcTimeMillisMarkSweepCompact": {
-              "metric": "jvm.JvmMetrics.GcTimeMillisMarkSweepCompact",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/MemHeapMaxM": {
-              "metric": "jvm.JvmMetrics.MemHeapMaxM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/MemMaxM": {
-              "metric": "jvm.JvmMetrics.MemMaxM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/MemNonHeapMaxM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapMaxM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/DroppedPubAll": {
-              "metric": "metricssystem.MetricsSystem.DroppedPubAll",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/NumActiveSinks": {
-              "metric": "metricssystem.MetricsSystem.NumActiveSinks",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/NumActiveSources": {
-              "metric": "metricssystem.MetricsSystem.NumActiveSources",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/NumAllSinks": {
-              "metric": "metricssystem.MetricsSystem.NumAllSinks",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/NumAllSources": {
-              "metric": "metricssystem.MetricsSystem.NumAllSources",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/PublishAvgTime": {
-              "metric": "metricssystem.MetricsSystem.PublishAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/PublishNumOps": {
-              "metric": "metricssystem.MetricsSystem.PublishNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/Sink_timelineAvgTime": {
-              "metric": "metricssystem.MetricsSystem.Sink_timelineAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/Sink_timelineDropped": {
-              "metric": "metricssystem.MetricsSystem.Sink_timelineDropped",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/Sink_timelineNumOps": {
-              "metric": "metricssystem.MetricsSystem.Sink_timelineNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/Sink_timelineQsize": {
-              "metric": "metricssystem.MetricsSystem.Sink_timelineQsize",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/SnapshotAvgTime": {
-              "metric": "metricssystem.MetricsSystem.SnapshotAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/SnapshotNumOps": {
-              "metric": "metricssystem.MetricsSystem.SnapshotNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpc/RpcClientBackoff": {
-              "metric": "rpc.rpc.RpcClientBackoff",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/GetContainerStatusesAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.GetContainerStatusesAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/GetContainerStatusesNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.GetContainerStatusesNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/HeartbeatAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.HeartbeatAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/HeartbeatNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.HeartbeatNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/StartContainersAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.StartContainersAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/StartContainersNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.StartContainersNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/StopContainersAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.StopContainersAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/StopContainersNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.StopContainersNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/UgiMetrics/GetGroupsAvgTime": {
-              "metric": "ugi.UgiMetrics.GetGroupsAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/UgiMetrics/GetGroupsNumOps": {
-              "metric": "ugi.UgiMetrics.GetGroupsNumOps",
-              "pointInTime": true,
-              "temporal": true
-            }
-          }
-        }
-      }
-    ],
-    "HostComponent": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "default": {
-            "metrics/cpu/cpu_idle": {
-              "metric": "cpu_idle",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/cpu/cpu_nice": {
-              "metric": "cpu_nice",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/cpu/cpu_system": {
-              "metric": "cpu_system",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/cpu/cpu_user": {
-              "metric": "cpu_user",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/cpu/cpu_wio": {
-              "metric": "cpu_wio",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/disk/disk_free": {
-              "metric": "disk_free",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/disk/disk_total": {
-              "metric": "disk_total",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/load/load_fifteen": {
-              "metric": "load_fifteen",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/load/load_five": {
-              "metric": "load_five",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/load/load_one": {
-              "metric": "load_one",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/memory/mem_buffered": {
-              "metric": "mem_buffered",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/memory/mem_cached": {
-              "metric": "mem_cached",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/memory/mem_free": {
-              "metric": "mem_free",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/memory/mem_shared": {
-              "metric": "mem_shared",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/memory/mem_total": {
-              "metric": "mem_total",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/memory/swap_free": {
-              "metric": "swap_free",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/network/bytes_in": {
-              "metric": "bytes_in",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/network/bytes_out": {
-              "metric": "bytes_out",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/network/pkts_in": {
-              "metric": "pkts_in",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/network/pkts_out": {
-              "metric": "pkts_out",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/process/proc_run": {
-              "metric": "proc_run",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/process/proc_total": {
-              "metric": "proc_total",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/disk/read_count": {
-              "metric": "read_count",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/disk/write_count": {
-              "metric": "write_count",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/disk/read_bytes": {
-              "metric": "read_bytes",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/disk/write_bytes": {
-              "metric": "write_bytes",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/disk/read_time": {
-              "metric": "read_time",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/disk/write_time": {
-              "metric": "write_time",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/mapred/ShuffleOutputsFailed": {
-              "metric": "mapred.ShuffleMetrics.ShuffleOutputsFailed",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "jvm.JvmMetrics.ThreadsRunnable",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "jvm.JvmMetrics.ThreadsNew",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationFailures": {
-              "metric": "rpc.rpc.RpcAuthorizationFailures",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_avg_time": {
-              "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_avg_time": {
-              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "rpc.rpc.SentBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logWarn": {
-              "metric": "jvm.JvmMetrics.LogWarn",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTimedWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcCount": {
-              "metric": "jvm.JvmMetrics.GcCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "rpc.rpc.ReceivedBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsBlocked": {
-              "metric": "jvm.JvmMetrics.ThreadsBlocked",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_num_ops": {
-              "metric": "rpc.rpc.RpcQueueTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/AllocatedContainers": {
-              "metric": "yarn.NodeManagerMetrics.AllocatedContainers",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/AllocatedGB": {
-              "metric": "yarn.NodeManagerMetrics.AllocatedGB",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/AvailableGB": {
-              "metric": "yarn.NodeManagerMetrics.AvailableGB",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/AllocatedVCores": {
-              "metric": "yarn.NodeManagerMetrics.AllocatedVCores",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/AvailableVCores": {
-              "metric": "yarn.NodeManagerMetrics.AvailableVCores",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/BadLocalDirs": {
-              "metric": "yarn.NodeManagerMetrics.BadLocalDirs",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/BadLogDirs": {
-              "metric": "yarn.NodeManagerMetrics.BadLogDirs",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainerLaunchDurationAvgTime": {
-              "metric": "yarn.NodeManagerMetrics.ContainerLaunchDurationAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainerLaunchDurationNumOps": {
-              "metric": "yarn.NodeManagerMetrics.ContainerLaunchDurationNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersCompleted": {
-              "metric": "yarn.NodeManagerMetrics.ContainersCompleted",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersFailed": {
-              "metric": "yarn.NodeManagerMetrics.ContainersFailed",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersIniting": {
-              "metric": "yarn.NodeManagerMetrics.ContainersIniting",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersKilled": {
-              "metric": "yarn.NodeManagerMetrics.ContainersKilled",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersLaunched": {
-              "metric": "yarn.NodeManagerMetrics.ContainersLaunched",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ContainersRunning": {
-              "metric": "yarn.NodeManagerMetrics.ContainersRunning",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/GoodLocalDirsDiskUtilizationPerc": {
-              "metric": "yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/GoodLogDirsDiskUtilizationPerc": {
-              "metric": "yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/NumOpenConnections": {
-              "metric": "rpc.rpc.NumOpenConnections",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/mapred/ShuffleOutputsOK": {
-              "metric": "mapred.ShuffleMetrics.ShuffleOutputsOK",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_num_ops": {
-              "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcTimeMillis": {
-              "metric": "jvm.JvmMetrics.GcTimeMillis",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTerminated": {
-              "metric": "jvm.JvmMetrics.ThreadsTerminated",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/callQueueLen": {
-              "metric": "rpc.rpc.CallQueueLength",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logInfo": {
-              "metric": "jvm.JvmMetrics.LogInfo",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_num_ops": {
-              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_num_ops": {
-              "metric": "ugi.UgiMetrics.LoginFailureNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/mapred/ShuffleConnections": {
-              "metric": "mapred.ShuffleMetrics.ShuffleConnections",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logError": {
-              "metric": "jvm.JvmMetrics.LogError",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_avg_time": {
-              "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationSuccesses": {
-              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logFatal": {
-              "metric": "jvm.JvmMetrics.LogFatal",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_avg_time": {
-              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/mapred/ShuffleOutputBytes": {
-              "metric": "mapred.ShuffleMetrics.ShuffleOutputBytes",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthenticationSuccesses": {
-              "metric": "rpc.rpc.RpcAuthenticationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthenticationFailures": {
-              "metric": "rpc.rpc.RpcAuthenticationFailures",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/GcCountCopy": {
-              "metric": "jvm.JvmMetrics.GcCountCopy",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/GcCountMarkSweepCompact": {
-              "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/GcTimeMillisCopy": {
-              "metric": "jvm.JvmMetrics.GcTimeMillisCopy",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/GcTimeMillisMarkSweepCompact": {
-              "metric": "jvm.JvmMetrics.GcTimeMillisMarkSweepCompact",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/MemHeapMaxM": {
-              "metric": "jvm.JvmMetrics.MemHeapMaxM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/MemMaxM": {
-              "metric": "jvm.JvmMetrics.MemMaxM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/MemNonHeapMaxM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapMaxM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/DroppedPubAll": {
-              "metric": "metricssystem.MetricsSystem.DroppedPubAll",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/NumActiveSinks": {
-              "metric": "metricssystem.MetricsSystem.NumActiveSinks",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/NumActiveSources": {
-              "metric": "metricssystem.MetricsSystem.NumActiveSources",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/NumAllSinks": {
-              "metric": "metricssystem.MetricsSystem.NumAllSinks",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/NumAllSources": {
-              "metric": "metricssystem.MetricsSystem.NumAllSources",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/PublishAvgTime": {
-              "metric": "metricssystem.MetricsSystem.PublishAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/PublishNumOps": {
-              "metric": "metricssystem.MetricsSystem.PublishNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/Sink_timelineAvgTime": {
-              "metric": "metricssystem.MetricsSystem.Sink_timelineAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/Sink_timelineDropped": {
-              "metric": "metricssystem.MetricsSystem.Sink_timelineDropped",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/Sink_timelineNumOps": {
-              "metric": "metricssystem.MetricsSystem.Sink_timelineNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/Sink_timelineQsize": {
-              "metric": "metricssystem.MetricsSystem.Sink_timelineQsize",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/SnapshotAvgTime": {
-              "metric": "metricssystem.MetricsSystem.SnapshotAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/SnapshotNumOps": {
-              "metric": "metricssystem.MetricsSystem.SnapshotNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpc/RpcClientBackoff": {
-              "metric": "rpc.rpc.RpcClientBackoff",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/GetContainerStatusesAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.GetContainerStatusesAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/GetContainerStatusesNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.GetContainerStatusesNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/HeartbeatAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.HeartbeatAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/HeartbeatNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.HeartbeatNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/StartContainersAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.StartContainersAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/StartContainersNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.StartContainersNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/StopContainersAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.StopContainersAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/StopContainersNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.StopContainersNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/UgiMetrics/GetGroupsAvgTime": {
-              "metric": "ugi.UgiMetrics.GetGroupsAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/UgiMetrics/GetGroupsNumOps": {
-              "metric": "ugi.UgiMetrics.GetGroupsNumOps",
-              "pointInTime": true,
-              "temporal": true
-            }
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "default": {
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapCommittedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsRunnable",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsNew",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memNonHeapCommittedM": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemNonHeapCommittedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/callQueueLen": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.CallQueueLength",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/rpcAuthorizationFailures": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthorizationFailures",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcQueueTime_avg_time": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcQueueTimeAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ugi/loginSuccess_avg_time": {
-              "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginSuccessAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.SentBytes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/logInfo": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogInfo",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memNonHeapUsedM": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemNonHeapUsedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/logWarn": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogWarn",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ugi/loginFailure_num_ops": {
-              "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginFailureNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsTimedWaiting": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcProcessingTime_num_ops": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcProcessingTimeNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/gcCount": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.GcCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.ReceivedBytes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsBlocked": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsBlocked",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcQueueTime_num_ops": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcQueueTimeNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/logError": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogError",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ugi/loginFailure_avg_time": {
-              "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginFailureAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/rpcAuthorizationSuccesses": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthorizationSuccesses",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/NumOpenConnections": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.NumOpenConnections",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memHeapUsedM": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/logFatal": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogFatal",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcProcessingTime_avg_time": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcProcessingTimeAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsWaiting": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsWaiting",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/gcTimeMillis": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ugi/loginSuccess_num_ops": {
-              "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginSuccessNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/rpcAuthenticationSuccesses": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthenticationSuccesses",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/rpcAuthenticationFailures": {
-              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthenticationFailures",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsTerminated": {
-              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsTerminated",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/nodemanager/GoodLocalDirsDiskUtilizationPerc": {
-              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/nodemanager/GoodLogDirsDiskUtilizationPerc": {
-              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/nodemanager/AllocatedGB": {
-              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.AllocatedGB",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/nodemanager/AllocatedVCores": {
-              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.AllocatedVCores",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/nodemanager/BadLocalDirs": {
-              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.BadLocalDirs",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/nodemanager/BadLogDirs": {
-              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.BadLogDirs",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/nodemanager/ContainersFailed": {
-              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.ContainersFailed",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/nodemanager/AllocatedContainers": {
-              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.AllocatedContainers",
-              "pointInTime": true,
-              "temporal": false
-            }
-          }
-        }
-      }
-    ]
-  },
-  "RESOURCEMANAGER": {
-    "Component": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "default": {
-            "metrics/rpcdetailed/FinishApplicationMasterNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsCompleted": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AppsCompleted",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
-              "metric": "yarn.ClusterMetrics.NumUnhealthyNMs",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "jvm.JvmMetrics.ThreadsRunnable",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
-              "metric": "yarn.ClusterMetrics.NumRebootedNMs",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "jvm.JvmMetrics.ThreadsNew",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsSubmitted": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AppsSubmitted",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/ClusterMetrics/NumLostNMs": {
-              "metric": "yarn.ClusterMetrics.NumLostNMs",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_avg_time": {
-              "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_avg_time": {
-              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedContainers": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedContainers",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "rpc.rpc.SentBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsKilled": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AppsKilled",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/ClusterMetrics/NumActiveNMs": {
-              "metric": "yarn.ClusterMetrics.NumActiveNMs",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logWarn": {
-              "metric": "jvm.JvmMetrics.LogWarn",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTimedWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsFailed": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AppsFailed",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcCount": {
-              "metric": "jvm.JvmMetrics.GcCount",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "rpc.rpc.ReceivedBytes",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/AllocateNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.AllocateNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/gcCountMarkSweepCompact": {
-              "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsBlocked": {
-              "metric": "jvm.JvmMetrics.ThreadsBlocked",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsRunning": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AppsRunning",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ActiveApplications": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).ActiveApplications",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AMResourceLimitMB": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AMResourceLimitMB",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AMResourceLimitVCores": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AMResourceLimitVCores",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ActiveUsers": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).ActiveUsers",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AggregateContainersAllocated": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AggregateContainersAllocated",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AggregateContainersReleased": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AggregateContainersReleased",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedVCores": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedVCores",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppAttemptFirstContainerAllocationDelayAvgTime": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AppAttemptFirstContainerAllocationDelayAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppAttemptFirstContainerAllocationDelayNumOps": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AppAttemptFirstContainerAllocationDelayNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AvailableVCores": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AvailableVCores",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingVCores": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).PendingVCores",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ReservedMB": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).ReservedMB",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ReservedVCores": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).ReservedVCores",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/UsedAMResourceMB": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).UsedAMResourceMB",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/UsedAMResourceVCores": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).UsedAMResourceVCores",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/running_0": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).running_0",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/running_1440": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).running_1440",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/running_300": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).running_300",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/running_60": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).running_60",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/ClusterMetrics/NumDecommissionedNMs": {
-              "metric": "yarn.ClusterMetrics.NumDecommissionedNMs",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcQueueTime_num_ops": {
-              "metric": "rpc.rpc.RpcQueueTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/NumOpenConnections": {
-              "metric": "rpc.rpc.NumOpenConnections",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemHeapUsedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsWaiting",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_num_ops": {
-              "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcTimeMillisCopy": {
-              "metric": "jvm.JvmMetrics.GcTimeMillisCopy",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/gcTimeMillis": {
-              "metric": "jvm.JvmMetrics.GcTimeMillis",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingContainers": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).PendingContainers",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/memMaxM": {
-              "metric": "jvm.JvmMetrics.MemMaxM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTerminated": {
-              "metric": "jvm.JvmMetrics.ThreadsTerminated",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/AllocateAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.AllocateAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/GetApplicationReportNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.GetApplicationReportNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/FinishApplicationMasterAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/callQueueLen": {
-              "metric": "rpc.rpc.CallQueueLength",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/RegisterApplicationMasterNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AvailableMB": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AvailableMB",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingMB": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).PendingMB",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logInfo": {
-              "metric": "jvm.JvmMetrics.LogInfo",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_num_ops": {
-              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_num_ops": {
-              "metric": "ugi.UgiMetrics.LoginFailureNumOps",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/SubmitApplicationAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.SubmitApplicationAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/GetNewApplicationNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.GetNewApplicationNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsPending": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AppsPending",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/gcCountCopy": {
-              "metric": "jvm.JvmMetrics.GcCountCopy",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logError": {
-              "metric": "jvm.JvmMetrics.LogError",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_avg_time": {
-              "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/SubmitApplicationNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.SubmitApplicationNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/gcTimeMillisMarkSweepCompact": {
-              "metric": "jvm.JvmMetrics.GcTimeMillisMarkSweepCompact",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpcAuthorizationSuccesses": {
-              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedMB": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedMB",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/logFatal": {
-              "metric": "jvm.JvmMetrics.LogFatal",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpc/RpcProcessingTime_avg_time": {
-              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/GetApplicationReportAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.GetApplicationReportAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/NodeHeartbeatAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.NodeHeartbeatAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/GetNewApplicationAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.GetNewApplicationAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/RegisterApplicationMasterAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ReservedContainers": {
-              "metric": "yarn.QueueMetrics.Queue=(.+).ReservedContainers",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/GcCountMarkSweepCompact": {
-              "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/MemHeapMaxM": {
-              "metric": "jvm.JvmMetrics.MemHeapMaxM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/MemNonHeapMaxM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapMaxM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/ThreadsBlocked": {
-              "metric": "jvm.JvmMetrics.ThreadsBlocked",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/DroppedPubAll": {
-              "metric": "metricssystem.MetricsSystem.DroppedPubAll",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/NumActiveSinks": {
-              "metric": "metricssystem.MetricsSystem.NumActiveSinks",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/NumActiveSources": {
-              "metric": "metricssystem.MetricsSystem.NumActiveSources",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/NumAllSinks": {
-              "metric": "metricssystem.MetricsSystem.NumAllSinks",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/NumAllSources": {
-              "metric": "metricssystem.MetricsSystem.NumAllSources",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/PublishAvgTime": {
-              "metric": "metricssystem.MetricsSystem.PublishAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/PublishNumOps": {
-              "metric": "metricssystem.MetricsSystem.PublishNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/Sink_timelineAvgTime": {
-              "metric": "metricssystem.MetricsSystem.Sink_timelineAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/Sink_timelineDropped": {
-              "metric": "metricssystem.MetricsSystem.Sink_timelineDropped",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/Sink_timelineNumOps": {
-              "metric": "metricssystem.MetricsSystem.Sink_timelineNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/Sink_timelineQsize": {
-              "metric": "metricssystem.MetricsSystem.Sink_timelineQsize",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/SnapshotAvgTime": {
-              "metric": "metricssystem.MetricsSystem.SnapshotAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/metricssystem/MetricsSystem/SnapshotNumOps": {
-              "metric": "metricssystem.MetricsSystem.SnapshotNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpc/RpcAuthenticationFailures": {
-              "metric": "rpc.rpc.RpcAuthenticationFailures",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpc/RpcAuthenticationSuccesses": {
-              "metric": "rpc.rpc.RpcAuthenticationSuccesses",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpc/RpcAuthorizationFailures": {
-              "metric": "rpc.rpc.RpcAuthorizationFailures",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpc/rpc/RpcClientBackoff": {
-              "metric": "rpc.rpc.RpcClientBackoff",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/AllocateNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.AllocateNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/GetClusterMetricsAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.GetClusterMetricsAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/GetClusterMetricsNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.GetClusterMetricsNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/GetClusterNodesAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.GetClusterNodesAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/GetClusterNodesNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.GetClusterNodesNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/GetQueueInfoAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.GetQueueInfoAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/GetQueueInfoNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.GetQueueInfoNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/GetQueueUserAclsAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.GetQueueUserAclsAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/GetQueueUserAclsNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.GetQueueUserAclsNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/NodeHeartbeatNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.NodeHeartbeatNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/RegisterNodeManagerAvgTime": {
-              "metric": "rpcdetailed.rpcdetailed.RegisterNodeManagerAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/rpcdetailed/rpcdetailed/RegisterNodeManagerNumOps": {
-              "metric": "rpcdetailed.rpcdetailed.RegisterNodeManagerNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/UgiMetrics/GetGroupsAvgTime": {
-              "metric": "ugi.UgiMetrics.GetGroupsAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/UgiMetrics/GetGroupsNumOps": {
-              "metric": "ugi.UgiMetrics.GetGroupsNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ClusterMetrics/AMLaunchDelayAvgTime": {
-              "metric": "yarn.ClusterMetrics.AMLaunchDelayAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ClusterMetrics/AMLaunchDelayNumOps": {
-              "metric": "yarn.ClusterMetrics.AMLaunchDelayNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ClusterMetrics/AMRegisterDelayAvgTime": {
-              "metric": "yarn.ClusterMetrics.AMRegisterDelayAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/yarn/ClusterMetrics/AMRegisterDelayNumOps": {
-              "metric": "yarn.ClusterMetrics.AMRegisterDelayNumOps",
-              "pointInTime": true,
-              "temporal": true
-            }
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "default": {
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsFailed": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsFailed",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/rm_metrics/cluster/rebootedNMcount": {
-              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumRebootedNMs",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemHeapCommittedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
-              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumUnhealthyNMs",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/HeapMemoryMax": {
-              "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/HeapMemoryUsed": {
-              "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/NonHeapMemoryMax": {
-              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/NonHeapMemoryUsed": {
-              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsRunnable",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsNew",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
-              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumRebootedNMs",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/runtime/StartTime": {
-              "metric": "java.lang:type=Runtime.StartTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsKilled": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsKilled",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/rpcAuthorizationFailures": {
-              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthorizationFailures",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AggregateContainersAllocated": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AggregateContainersAllocated",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/ClusterMetrics/NumLostNMs": {
-              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumLostNMs",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/ugi/loginSuccess_avg_time": {
-              "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginSuccessAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "ServiceComponentInfo/StartTime": {
-              "metric": "java.lang:type=Runtime.StartTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcQueueTime_avg_time": {
-              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeAvgTime",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedContainers": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedContainers",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsSubmitted": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsSubmitted",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/SentBytes": {
-              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.SentBytes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/ClusterMetrics/NumActiveNMs": {
-              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumActiveNMs",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_300": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_300",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/memNonHeapUsedM": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemNonHeapUsedM",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/logWarn": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogWarn",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsTimedWaiting": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/gcCount": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.GcCount",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/ReceivedBytes": {
-              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.ReceivedBytes",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/jvm/threadsBlocked": {
-              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsBlocked",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_60": {
-              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_60",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/rpc/RpcQueueTime_num_ops": {
-              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeNumOps",
-              "pointInTime": true,
-              "temporal": false
-            },
-            "metrics/yarn/ClusterMetrics/NumDecommissionedNMs": {
-      

<TRUNCATED>

[32/52] bigtop git commit: BIGTOP-1406. package Ambari in Bigtop

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/widgets.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/widgets.json b/bigtop-packages/src/common/ambari/ODPi/1.0/widgets.json
deleted file mode 100644
index 3176354..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/widgets.json
+++ /dev/null
@@ -1,95 +0,0 @@
-{
-  "layouts": [
-    {
-      "layout_name": "default_system_heatmap",
-      "display_name": "Heatmaps",
-      "section_name": "SYSTEM_HEATMAPS",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "Host Disk Space Used %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "disk_free",
-              "metric_path": "metrics/disk/disk_free",
-              "service_name": "STACK"
-            },
-            {
-              "name": "disk_total",
-              "metric_path": "metrics/disk/disk_total",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Disk Space Used %",
-              "value": "${((disk_total-disk_free)/disk_total)*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Host Memory Used %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "mem_total",
-              "metric_path": "metrics/memory/mem_total",
-              "service_name": "STACK"
-            },
-            {
-              "name": "mem_free",
-              "metric_path": "metrics/memory/mem_free",
-              "service_name": "STACK"
-            },
-            {
-              "name": "mem_cached",
-              "metric_path": "metrics/memory/mem_cached",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Memory Used %",
-              "value": "${((mem_total-mem_free-mem_cached)/mem_total)*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Host CPU Wait IO %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "cpu_wio",
-              "metric_path": "metrics/cpu/cpu_wio",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Memory Used %",
-              "value": "${cpu_wio*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/install_ambari.sh
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/install_ambari.sh b/bigtop-packages/src/common/ambari/install_ambari.sh
index 1a66af5..9f8e449 100755
--- a/bigtop-packages/src/common/ambari/install_ambari.sh
+++ b/bigtop-packages/src/common/ambari/install_ambari.sh
@@ -88,7 +88,6 @@ SERVER_DIR=$BUILD_DIR/ambari-server/target/ambari-server-*-dist
 cp -ra $SERVER_DIR/* ${PREFIX}/
 cp -a  $SOURCE_DIR/ambari-common/src/main/unix/ambari-python-wrap ${PREFIX}/${VAR_LIB_DIR}
 rm -rf ${PREFIX}/var/lib/ambari-server/resources/stacks/HDP*
-cp -r  ${DISTRO_DIR}/ODPi ${PREFIX}/var/lib/ambari-server/resources/stacks/
 
 # End of Ambari Server
 
@@ -103,7 +102,6 @@ AGENT_DEST_DIR=/usr/lib/python2.6/site-packages/ambari_agent
 cp -ra $AGENT_BUILD_DIR/* ${PREFIX}/
 cp -a $SOURCE_DIR/ambari-common/src/main/unix/ambari-python-wrap ${PREFIX}/${VAR_LIB_DIR}
 rm -rf ${PREFIX}/var/lib/ambari-agent/cache/stacks/HDP*
-cp -r  ${DISTRO_DIR}/ODPi ${PREFIX}/var/lib/ambari-agent/cache/stacks/
 
 #Ambari Groovy Client 
 

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/deb/ambari/source/include-binaries
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/deb/ambari/source/include-binaries b/bigtop-packages/src/deb/ambari/source/include-binaries
deleted file mode 100644
index d4ec7e3..0000000
--- a/bigtop-packages/src/deb/ambari/source/include-binaries
+++ /dev/null
@@ -1 +0,0 @@
-debian/ODPi/1.0/hooks/before-START/files/fast-hdfs-resource.jar

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop.bom
----------------------------------------------------------------------
diff --git a/bigtop.bom b/bigtop.bom
index 28b54f2..26fbda9 100644
--- a/bigtop.bom
+++ b/bigtop.bom
@@ -468,5 +468,15 @@ bigtop {
       url     { site = "https://github.com/greenplum-db/gpdb/archive/"
                 archive = site }
     }
+    'ambari' {
+      name    = 'ambari'
+      relNotes = 'Apache Ambari'
+      version { base = '2.4.2'; pkg = base; release = 1 }
+      tarball { destination = "apache-$name-${version.base}-src.tar.gz"
+                source      = destination }
+      url     { download_path = "/$name/$name-${version.base}/"
+                site = "${apache.APACHE_MIRROR}/${download_path}"
+                archive = "${apache.APACHE_ARCHIVE}/${download_path}" }
+    }
   }
 }


[36/52] bigtop git commit: BIGTOP-1406. package Ambari in Bigtop

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/yarn-site.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/yarn-site.xml
deleted file mode 100755
index 59ff82b..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/yarn-site.xml
+++ /dev/null
@@ -1,579 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<!-- Put site-specific property overrides in this file. -->
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
-  <property>
-    <name>yarn.resourcemanager.hostname</name>
-    <value>localhost</value>
-    <description>The hostname of the RM.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.resource-tracker.address</name>
-    <value>localhost:8025</value>
-    <description> The address of ResourceManager. </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.scheduler.address</name>
-    <value>localhost:8030</value>
-    <description>The address of the scheduler interface.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.address</name>
-    <value>localhost:8050</value>
-    <description>
-      The address of the applications manager interface in the
-      RM.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.admin.address</name>
-    <value>localhost:8141</value>
-    <description>The address of the RM admin interface.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.scheduler.class</name>
-    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
-    <description>The class to use as the resource scheduler.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.minimum-allocation-mb</name>
-    <value>512</value>
-    <description>
-      The minimum allocation for every container request at the RM,
-      in MBs. Memory requests lower than this won't take effect,
-      and the specified value will get allocated at minimum.
-    </description>
-    <display-name>Minimum Container Size (Memory)</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>5120</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>yarn-site</type>
-        <name>yarn.nodemanager.resource.memory-mb</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.maximum-allocation-mb</name>
-    <value>5120</value>
-    <description>
-      The maximum allocation for every container request at the RM,
-      in MBs. Memory requests higher than this won't take effect,
-      and will get capped to this value.
-    </description>
-    <display-name>Maximum Container Size (Memory)</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>5120</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>yarn-site</type>
-        <name>yarn.nodemanager.resource.memory-mb</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.address</name>
-    <value>0.0.0.0:45454</value>
-    <description>The address of the container manager in the NM.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.resource.memory-mb</name>
-    <value>5120</value>
-    <description>Amount of physical memory, in MB, that can be allocated
-      for containers.</description>
-    <display-name>Memory allocated for all YARN containers on a node</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>268435456</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.application.classpath</name>
-    <value>/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*</value>
-    <description>Classpath for typical applications.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.vmem-pmem-ratio</name>
-    <value>2.1</value>
-    <description>Ratio between virtual memory to physical memory when
-      setting memory limits for containers. Container allocations are
-      expressed in terms of physical memory, and virtual memory usage
-      is allowed to exceed this allocation by this ratio.
-    </description>
-    <display-name>Virtual Memory Ratio</display-name>
-    <value-attributes>
-      <type>float</type>
-      <minimum>0.1</minimum>
-      <maximum>5.0</maximum>
-      <increment-step>0.1</increment-step>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.container-executor.class</name>
-    <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
-    <description>ContainerExecutor for launching containers</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.group</name>
-    <value>hadoop</value>
-    <description>Unix group of the NodeManager</description>
-    <depends-on>
-      <property>
-        <type>cluster-env</type>
-        <name>user_group</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.aux-services</name>
-    <value>mapreduce_shuffle</value>
-    <description>Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and can
-      not start with numbers</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
-    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
-    <description>The auxiliary service class to use </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.log-dirs</name>
-    <value>/hadoop/yarn/log</value>
-    <description>
-      Where to store container logs. An application's localized log directory
-      will be found in ${yarn.nodemanager.log-dirs}/application_${appid}.
-      Individual containers' log directories will be below this, in directories
-      named container_{$contid}. Each container directory will contain the files
-      stderr, stdin, and syslog generated by that container.
-    </description>
-    <value-attributes>
-      <type>directories</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.local-dirs</name>
-    <value>/hadoop/yarn/local</value>
-    <description>
-      List of directories to store localized files in. An
-      application's localized file directory will be found in:
-      ${yarn.nodemanager.local-dirs}/usercache/${user}/appcache/application_${appid}.
-      Individual containers' work directories, called container_${contid}, will
-      be subdirectories of this.
-    </description>
-    <value-attributes>
-      <type>directories</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.container-monitor.interval-ms</name>
-    <value>3000</value>
-    <description>
-      The interval, in milliseconds, for which the node manager
-      waits  between two cycles of monitoring its containers' memory usage.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!--
-  <property>
-    <name>yarn.nodemanager.health-checker.script.path</name>
-    <value>/etc/hadoop/conf/health_check_nodemanager</value>
-    <description>The health check script to run.</description>
-  </property>
-   -->
-  <property>
-    <name>yarn.nodemanager.health-checker.interval-ms</name>
-    <value>135000</value>
-    <description>Frequency of running node health script.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.health-checker.script.timeout-ms</name>
-    <value>60000</value>
-    <description>Script time out period.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.log.retain-second</name>
-    <value>604800</value>
-    <description>
-      Time in seconds to retain user logs. Only applicable if
-      log aggregation is disabled.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.log-aggregation-enable</name>
-    <value>true</value>
-    <description>Whether to enable log aggregation. </description>
-    <display-name>Enable Log Aggregation</display-name>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.remote-app-log-dir</name>
-    <value>/app-logs</value>
-    <description>Location to aggregate logs to. </description>
-    <property-type>NOT_MANAGED_HDFS_PATH</property-type>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.remote-app-log-dir-suffix</name>
-    <value>logs</value>
-    <description>
-      The remote log dir will be created at
-      {yarn.nodemanager.remote-app-log-dir}/${user}/{thisParam}.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.log-aggregation.compression-type</name>
-    <value>gz</value>
-    <description>
-      T-file compression types used to compress aggregated logs.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.delete.debug-delay-sec</name>
-    <value>0</value>
-    <description>
-      Number of seconds after an application finishes before the nodemanager's
-      DeletionService will delete the application's localized file directory
-      and log directory.
-
-      To diagnose Yarn application problems, set this property's value large
-      enough (for example, to 600 = 10 minutes) to permit examination of these
-      directories. After changing the property's value, you must restart the
-      nodemanager in order for it to have an effect.
-
-      The roots of Yarn applications' work directories is configurable with
-      the yarn.nodemanager.local-dirs property (see below), and the roots
-      of the Yarn applications' log directories is configurable with the
-      yarn.nodemanager.log-dirs property (see also below).
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.log-aggregation.retain-seconds</name>
-    <value>2592000</value>
-    <description>
-      How long to keep aggregation logs before deleting them. -1 disables.
-      Be careful set this too small and you will spam the name node.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.admin-env</name>
-    <value>MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX</value>
-    <description>
-      Environment variables that should be forwarded from the NodeManager's
-      environment to the container's.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.disk-health-checker.min-healthy-disks</name>
-    <value>0.25</value>
-    <description>
-      The minimum fraction of number of disks to be healthy for the nodemanager
-      to launch new containers. This correspond to both
-      yarn-nodemanager.local-dirs and yarn.nodemanager.log-dirs. i.e.
-      If there are less number of healthy local-dirs (or log-dirs) available,
-      then new containers will not be launched on this node.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.am.max-attempts</name>
-    <value>2</value>
-    <description>
-      The maximum number of application attempts. It's a global
-      setting for all application masters. Each application master can specify
-      its individual maximum number of application attempts via the API, but the
-      individual number cannot be more than the global upper bound. If it is,
-      the resourcemanager will override it. The default number is set to 2, to
-      allow at least one retry for AM.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.webapp.address</name>
-    <value>localhost:8088</value>
-    <description>
-      The address of the RM web application.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.webapp.https.address</name>
-    <value>localhost:8090</value>
-    <description>
-      The https address of the RM web application.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.vmem-check-enabled</name>
-    <value>false</value>
-    <description>
-      Whether virtual memory limits will be enforced for containers.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.log.server.url</name>
-    <value>http://localhost:19888/jobhistory/logs</value>
-    <description>
-      URI for the HistoryServer's log resource
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.nodes.exclude-path</name>
-    <value>/etc/hadoop/conf/yarn.exclude</value>
-    <description>
-      Names a file that contains a list of hosts that are
-      not permitted to connect to the resource manager.  The full pathname of the
-      file must be specified.  If the value is empty, no hosts are
-      excluded.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.http.policy</name>
-    <value>HTTP_ONLY</value>
-    <description>
-      This configures the HTTP endpoint for Yarn Daemons.The following values are supported: - HTTP_ONLY : Service is provided only on http - HTTPS_ONLY : Service is provided only on https
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-====================
-  <property>
-    <name>yarn.timeline-service.enabled</name>
-    <value>true</value>
-    <description>Indicate to clients whether timeline service is enabled or not.
-      If enabled, clients will put entities and events to the timeline server.
-    </description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.generic-application-history.store-class</name>
-    <value>org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore</value>
-    <description>
-      Store class name for history store, defaulting to file system store
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.leveldb-timeline-store.path</name>
-    <value>/var/log/hadoop-yarn/timeline</value>
-    <description>
-      Store file name for leveldb timeline store
-    </description>
-    <value-attributes>
-      <type>directory</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.webapp.address</name>
-    <value>localhost:8188</value>
-    <description>
-      The http address of the timeline service web application.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.webapp.https.address</name>
-    <value>localhost:8190</value>
-    <description>
-      The http address of the timeline service web application.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.address</name>
-    <value>localhost:10200</value>
-    <description>
-      This is default address for the timeline server to start
-      the RPC server.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <description>Enable age off of timeline store data.</description>
-    <name>yarn.timeline-service.ttl-enable</name>
-    <value>true</value>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <description>Time to live for timeline store data in milliseconds.</description>
-    <name>yarn.timeline-service.ttl-ms</name>
-    <value>2678400000</value>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <description>Length of time to wait between deletion cycles of leveldb timeline store in milliseconds.</description>
-    <name>yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms</name>
-    <value>300000</value>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-=============================
-  <property>
-    <name>yarn.timeline-service.recovery.enabled</name>
-    <description>
-      Enable timeline server to recover state after starting. If
-      true, then yarn.timeline-service.state-store-class must be specified.
-    </description>
-    <value>true</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.acl.enable</name>
-    <value>false</value>
-    <description> Are acls enabled. </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.authorization-provider</name>
-    <description> Yarn authorization provider class. </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.admin.acl</name>
-    <value>yarn</value>
-    <description> ACL of who can be admin of the YARN cluster. </description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!--ats v1.5 properties-->
-  <property>
-    <name>yarn.timeline-service.store-class</name>
-    <value>org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore</value>
-    <description>Main storage class for YARN timeline server.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.active-dir</name>
-    <value>/ats/active/</value>
-    <description>DFS path to store active application&#x2019;s timeline data</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.done-dir</name>
-    <value>/ats/done/</value>
-    <description>DFS path to store done application&#x2019;s timeline data</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes</name>
-    <value/>
-    <description>Plugins that can translate a timeline entity read request into a list of timeline cache ids, separated by commas. </description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!-- advanced ats v1.5 properties-->
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.summary-store</name>
-    <description>Summary storage for ATS v1.5</description>
-    <!-- Use rolling leveldb, advanced -->
-    <value>org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.scan-interval-seconds</name>
-    <description>
-      Scan interval for ATS v1.5 entity group file system storage reader.This
-      value controls how frequent the reader will scan the HDFS active directory
-      for application status.
-    </description>
-    <!-- Default is 60 seconds, advanced -->
-    <value>60</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds</name>
-    <description>
-      Scan interval for ATS v1.5 entity group file system storage cleaner.This
-      value controls how frequent the reader will scan the HDFS done directory
-      for stale application data.
-    </description>
-    <!-- 3600 is default, advanced -->
-    <value>3600</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.retain-seconds</name>
-    <description>
-      How long the ATS v1.5 entity group file system storage will keep an
-      application's data in the done directory.
-    </description>
-    <!-- 7 days is default, advanced -->
-    <value>604800</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/kerberos.json b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/kerberos.json
deleted file mode 100755
index 4093431..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/kerberos.json
+++ /dev/null
@@ -1,214 +0,0 @@
-{
-  "services": [
-    {
-      "name": "YARN",
-      "identities": [
-        {
-          "name": "/spnego"
-        },
-        {
-          "name": "/smokeuser"
-        }
-      ],
-      "configurations": [
-        {
-          "yarn-site": {
-            "yarn.timeline-service.enabled": "false",
-            "yarn.timeline-service.http-authentication.type": "kerberos",
-            "yarn.acl.enable": "true",
-            "yarn.timeline-service.http-authentication.signature.secret": "",
-            "yarn.timeline-service.http-authentication.signature.secret.file": "",
-            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
-            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
-            "yarn.timeline-service.http-authentication.token.validity": "",
-            "yarn.timeline-service.http-authentication.cookie.domain": "",
-            "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
-            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyusers.*.groups": "",
-            "yarn.resourcemanager.proxyusers.*.hosts": "",
-            "yarn.resourcemanager.proxyusers.*.users": "",
-            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
-            "yarn.nodemanager.linux-container-executor.cgroups.mount-path": ""
-          }
-        },
-        {
-          "core-site": {
-            "hadoop.proxyuser.${yarn-env/yarn_user}.groups": "*",
-            "hadoop.proxyuser.${yarn-env/yarn_user}.hosts": "${clusterHostInfo/rm_host}"
-          }
-        }
-      ],
-      "components": [
-        {
-          "name": "NODEMANAGER",
-          "identities": [
-            {
-              "name": "nodemanager_nm",
-              "principal": {
-                "value": "nm/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.nodemanager.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/nm.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.nodemanager.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file"
-              }
-            }
-          ],
-          "configurations": [
-            {
-              "yarn-site": {
-                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor"
-              }
-            }
-          ]
-        },
-        {
-          "name": "RESOURCEMANAGER",
-          "identities": [
-            {
-              "name": "resource_manager_rm",
-              "principal": {
-                "value": "rm/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.resourcemanager.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/rm.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.resourcemanager.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file"
-              }
-            }
-          ]
-        },
-        {
-          "name": "APP_TIMELINE_SERVER",
-          "identities": [
-            {
-              "name": "app_timeline_server_yarn",
-              "principal": {
-                "value": "yarn/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.timeline-service.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/yarn.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.timeline-service.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
-              }
-            },
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "name": "MAPREDUCE2",
-      "identities": [
-        {
-          "name": "/spnego"
-        },
-        {
-          "name": "/smokeuser"
-        }
-      ],
-      "components": [
-        {
-          "name": "HISTORYSERVER",
-          "identities": [
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            },
-            {
-              "name": "history_server_jhs",
-              "principal": {
-                "value": "jhs/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "mapred-site/mapreduce.jobhistory.principal",
-                "local_username": "${mapred-env/mapred_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/jhs.service.keytab",
-                "owner": {
-                  "name": "${mapred-env/mapred_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "mapred-site/mapreduce.jobhistory.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file"
-              }
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/metainfo.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/metainfo.xml
deleted file mode 100755
index 3e79bc5..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/metainfo.xml
+++ /dev/null
@@ -1,317 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>YARN</name>
-      <displayName>YARN</displayName>
-      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
-      <version>2.7.1+odpi</version>
-      <components>
-
-      <component>
-          <name>APP_TIMELINE_SERVER</name>
-          <displayName>App Timeline Server</displayName>
-          <category>MASTER</category>
-          <cardinality>0-1</cardinality>
-          <versionAdvertised>false</versionAdvertised>
-          <reassignAllowed>true</reassignAllowed>
-          <commandScript>
-            <script>scripts/application_timeline_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>RESOURCEMANAGER</name>
-          <displayName>ResourceManager</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <versionAdvertised>false</versionAdvertised>
-          <reassignAllowed>true</reassignAllowed>
-          <commandScript>
-            <script>scripts/resourcemanager.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-          <logs>
-            <log>
-              <logId>yarn_resourcemanager</logId>
-              <primary>true</primary>
-            </log>
-            <log>
-              <logId>yarn_historyserver</logId>
-            </log>
-            <log>
-              <logId>yarn_jobsummary</logId>
-            </log>
-          </logs>
-          <customCommands>
-            <customCommand>
-              <name>DECOMMISSION</name>
-              <commandScript>
-                <script>scripts/resourcemanager.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-            <customCommand>
-              <name>REFRESHQUEUES</name>
-              <commandScript>
-                <script>scripts/resourcemanager.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-          <configuration-dependencies>
-            <config-type>capacity-scheduler</config-type>
-            <config-type>hdfs-site</config-type>
-          </configuration-dependencies>
-        </component>
-
-        <component>
-          <name>NODEMANAGER</name>
-          <displayName>NodeManager</displayName>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <versionAdvertised>false</versionAdvertised>
-          <decommissionAllowed>true</decommissionAllowed>
-          <commandScript>
-            <script>scripts/nodemanager.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-           <bulkCommands>
-             <displayName>NodeManagers</displayName>
-             <!-- Used by decommission and recommission -->
-             <masterComponent>RESOURCEMANAGER</masterComponent>
-           </bulkCommands>
-          <logs>
-            <log>
-              <logId>yarn_nodemanager</logId>
-            </log>
-          </logs>
-        </component>
-
-        <component>
-          <name>YARN_CLIENT</name>
-          <displayName>YARN Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <versionAdvertised>false</versionAdvertised>
-          <commandScript>
-            <script>scripts/yarn_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>xml</type>
-              <fileName>yarn-site.xml</fileName>
-              <dictionaryName>yarn-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>xml</type>
-              <fileName>core-site.xml</fileName>
-              <dictionaryName>core-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>yarn-env.sh</fileName>
-              <dictionaryName>yarn-env</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>log4j.properties</fileName>
-              <dictionaryName>hdfs-log4j,yarn-log4j</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>xml</type>
-              <fileName>capacity-scheduler.xml</fileName>
-              <dictionaryName>capacity-scheduler</dictionaryName>
-            </configFile>                        
-          </configFiles>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>hadoop-yarn</name>
-            </package>
-            <package>
-              <name>hadoop-hdfs</name>
-            </package>
-            <package>
-              <name>hadoop-mapreduce</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      
-      <requiredServices>
-        <service>HDFS</service>
-        <service>MAPREDUCE2</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>yarn-site</config-type>
-        <config-type>yarn-env</config-type>
-        <config-type>hdfs-site</config-type>
-        <config-type>hadoop-env</config-type>
-        <config-type>core-site</config-type>
-        <config-type>mapred-site</config-type>
-        <config-type>yarn-log4j</config-type>
-        <config-type>ams-ssl-client</config-type>
-        <config-type>ranger-yarn-plugin-properties</config-type>
-        <config-type>ranger-yarn-audit</config-type>
-        <config-type>ranger-yarn-policymgr-ssl</config-type>
-        <config-type>ranger-yarn-security</config-type>
-      </configuration-dependencies>
-      <widgetsFileName>YARN_widgets.json</widgetsFileName>
-      <metricsFileName>YARN_metrics.json</metricsFileName>
-    </service>
-
-    <service>
-      <name>MAPREDUCE2</name>
-      <displayName>MapReduce2</displayName>
-      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
-      <version>2.7.1+odpi</version>
-      <components>
-        <component>
-          <name>HISTORYSERVER</name>
-          <displayName>History Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <versionAdvertised>false</versionAdvertised>
-          <reassignAllowed>true</reassignAllowed>
-          <auto-deploy>
-            <enabled>true</enabled>
-            <co-locate>YARN/RESOURCEMANAGER</co-locate>
-          </auto-deploy>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>TEZ/TEZ_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/historyserver.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-          <logs>
-            <log>
-              <logId>mapred_historyserver</logId>
-              <primary>true</primary>
-            </log>
-          </logs>
-        </component>
-
-        <component>
-          <name>MAPREDUCE2_CLIENT</name>
-          <displayName>MapReduce2 Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <versionAdvertised>false</versionAdvertised>
-          <commandScript>
-            <script>scripts/mapreduce2_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>xml</type>
-              <fileName>mapred-site.xml</fileName>
-              <dictionaryName>mapred-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>xml</type>
-              <fileName>core-site.xml</fileName>
-              <dictionaryName>core-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>mapred-env.sh</fileName>
-              <dictionaryName>mapred-env</dictionaryName>
-            </configFile>
-          </configFiles>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>hadoop-mapreduce</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/mapred_service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      
-      <requiredServices>
-        <service>YARN</service>
-      </requiredServices>
-
-      <configuration-dir>configuration-mapred</configuration-dir>
-
-      <configuration-dependencies>
-        <config-type>hdfs-site</config-type>
-        <config-type>hadoop-env</config-type>
-        <config-type>core-site</config-type>
-        <config-type>mapred-site</config-type>
-        <config-type>mapred-env</config-type>
-        <config-type>ssl-client</config-type>
-        <config-type>ssl-server</config-type>
-        <config-type>ams-ssl-client</config-type>
-      </configuration-dependencies>
-      <restartRequiredAfterRackChange>true</restartRequiredAfterRackChange>
-      <widgetsFileName>MAPREDUCE2_widgets.json</widgetsFileName>
-      <metricsFileName>MAPREDUCE2_metrics.json</metricsFileName>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/.hash
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/.hash b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/.hash
deleted file mode 100755
index c26dcc7..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/.hash
+++ /dev/null
@@ -1 +0,0 @@
-51572fff0a03b67b13f41bbe7c55c4c2b682d089
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/alerts/alert_nodemanager_health.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/alerts/alert_nodemanager_health.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/alerts/alert_nodemanager_health.py
deleted file mode 100755
index d7159e4..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/alerts/alert_nodemanager_health.py
+++ /dev/null
@@ -1,209 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-import socket
-import urllib2
-import logging
-import traceback
-from ambari_commons import OSCheck
-from ambari_commons.inet_utils import resolve_address
-from resource_management.libraries.functions.curl_krb_request import curl_krb_request
-from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
-from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
-from resource_management.core.environment import Environment
-
-RESULT_CODE_OK = 'OK'
-RESULT_CODE_CRITICAL = 'CRITICAL'
-RESULT_CODE_UNKNOWN = 'UNKNOWN'
-
-NODEMANAGER_HTTP_ADDRESS_KEY = '{{yarn-site/yarn.nodemanager.webapp.address}}'
-NODEMANAGER_HTTPS_ADDRESS_KEY = '{{yarn-site/yarn.nodemanager.webapp.https.address}}'
-YARN_HTTP_POLICY_KEY = '{{yarn-site/yarn.http.policy}}'
-
-OK_MESSAGE = 'NodeManager Healthy'
-CRITICAL_CONNECTION_MESSAGE = 'Connection failed to {0} ({1})'
-CRITICAL_HTTP_STATUS_MESSAGE = 'HTTP {0} returned from {1} ({2}) \n{3}'
-CRITICAL_NODEMANAGER_STATUS_MESSAGE = 'NodeManager returned an unexpected status of "{0}"'
-CRITICAL_NODEMANAGER_UNKNOWN_JSON_MESSAGE = 'Unable to determine NodeManager health from unexpected JSON response'
-
-KERBEROS_KEYTAB = '{{yarn-site/yarn.nodemanager.webapp.spnego-keytab-file}}'
-KERBEROS_PRINCIPAL = '{{yarn-site/yarn.nodemanager.webapp.spnego-principal}}'
-SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
-SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
-EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
-
-NODEMANAGER_DEFAULT_PORT = 8042
-
-CONNECTION_TIMEOUT_KEY = 'connection.timeout'
-CONNECTION_TIMEOUT_DEFAULT = 5.0
-
-LOGGER_EXCEPTION_MESSAGE = "[Alert] NodeManager Health on {0} fails:"
-logger = logging.getLogger('ambari_alerts')
-
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return (NODEMANAGER_HTTP_ADDRESS_KEY,NODEMANAGER_HTTPS_ADDRESS_KEY, EXECUTABLE_SEARCH_PATHS,
-  YARN_HTTP_POLICY_KEY, SMOKEUSER_KEY, KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY)
-  
-
-def execute(configurations={}, parameters={}, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  configurations (dictionary): a mapping of configuration key to value
-  parameters (dictionary): a mapping of script parameter key to value
-  host_name (string): the name of this host where the alert is running
-  """
-  result_code = RESULT_CODE_UNKNOWN
-
-  if configurations is None:
-    return (result_code, ['There were no configurations supplied to the script.'])
-
-  if host_name is None:
-    host_name = socket.getfqdn()
-
-  scheme = 'http'
-  http_uri = None
-  https_uri = None
-  http_policy = 'HTTP_ONLY'
-
-  if SMOKEUSER_KEY in configurations:
-    smokeuser = configurations[SMOKEUSER_KEY]
-
-  executable_paths = None
-  if EXECUTABLE_SEARCH_PATHS in configurations:
-    executable_paths = configurations[EXECUTABLE_SEARCH_PATHS]
-
-  security_enabled = False
-  if SECURITY_ENABLED_KEY in configurations:
-    security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
-
-  kerberos_keytab = None
-  if KERBEROS_KEYTAB in configurations:
-    kerberos_keytab = configurations[KERBEROS_KEYTAB]
-
-  kerberos_principal = None
-  if KERBEROS_PRINCIPAL in configurations:
-    kerberos_principal = configurations[KERBEROS_PRINCIPAL]
-    kerberos_principal = kerberos_principal.replace('_HOST', host_name)
-
-  if NODEMANAGER_HTTP_ADDRESS_KEY in configurations:
-    http_uri = configurations[NODEMANAGER_HTTP_ADDRESS_KEY]
-
-  if NODEMANAGER_HTTPS_ADDRESS_KEY in configurations:
-    https_uri = configurations[NODEMANAGER_HTTPS_ADDRESS_KEY]
-
-  if YARN_HTTP_POLICY_KEY in configurations:
-    http_policy = configurations[YARN_HTTP_POLICY_KEY]
-
-
-  # parse script arguments
-  connection_timeout = CONNECTION_TIMEOUT_DEFAULT
-  if CONNECTION_TIMEOUT_KEY in parameters:
-    connection_timeout = float(parameters[CONNECTION_TIMEOUT_KEY])
-
-
-  # determine the right URI and whether to use SSL
-  host_port = http_uri
-  if http_policy == 'HTTPS_ONLY':
-    scheme = 'https'
-
-    if https_uri is not None:
-      host_port = https_uri
-
-  label = ''
-  url_response = None
-  node_healthy = 'false'
-  total_time = 0
-
-  # replace hostname on host fqdn to make it work on all environments
-  if host_port is not None:
-    if ":" in host_port:
-      uri_host, uri_port = host_port.split(':')
-      host_port = '{0}:{1}'.format(host_name, uri_port)
-    else:
-      host_port = host_name
-
-  # some yarn-site structures don't have the web ui address
-  if host_port is None:
-    host_port = '{0}:{1}'.format(host_name, NODEMANAGER_DEFAULT_PORT)
-
-  query = "{0}://{1}/ws/v1/node/info".format(scheme, host_port)
-
-  try:
-    if kerberos_principal is not None and kerberos_keytab is not None and security_enabled:
-      env = Environment.get_instance()
-
-      # curl requires an integer timeout
-      curl_connection_timeout = int(connection_timeout)
-
-      kinit_timer_ms = parameters.get(KERBEROS_KINIT_TIMER_PARAMETER, DEFAULT_KERBEROS_KINIT_TIMER_MS)
-
-      url_response, error_msg, time_millis  = curl_krb_request(env.tmp_dir, kerberos_keytab, kerberos_principal,
-        query, "nm_health_alert", executable_paths, False, "NodeManager Health", smokeuser,
-        connection_timeout=curl_connection_timeout, kinit_timer_ms = kinit_timer_ms)
-
-      json_response = json.loads(url_response)
-    else:
-      # execute the query for the JSON that includes templeton status
-      url_response = urllib2.urlopen(query, timeout=connection_timeout)
-      json_response = json.loads(url_response.read())
-  except urllib2.HTTPError, httpError:
-    label = CRITICAL_HTTP_STATUS_MESSAGE.format(str(httpError.code), query,
-      str(httpError), traceback.format_exc())
-
-    return (RESULT_CODE_CRITICAL, [label])
-  except:
-    label = CRITICAL_CONNECTION_MESSAGE.format(query, traceback.format_exc())
-    return (RESULT_CODE_CRITICAL, [label])
-
-  # URL response received, parse it
-  try:
-    node_healthy = json_response['nodeInfo']['nodeHealthy']
-    node_healthy_report = json_response['nodeInfo']['healthReport']
-
-    # convert boolean to string
-    node_healthy = str(node_healthy)
-  except:
-    return (RESULT_CODE_CRITICAL, [query + "\n" + traceback.format_exc()])
-  finally:
-    if url_response is not None:
-      try:
-        url_response.close()
-      except:
-        pass
-
-  # proper JSON received, compare against known value
-  if node_healthy.lower() == 'true':
-    result_code = RESULT_CODE_OK
-    label = OK_MESSAGE
-  elif node_healthy.lower() == 'false':
-    result_code = RESULT_CODE_CRITICAL
-    label = node_healthy_report
-  else:
-    result_code = RESULT_CODE_CRITICAL
-    label = CRITICAL_NODEMANAGER_STATUS_MESSAGE.format(node_healthy)
-
-  return (result_code, [label])

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/alerts/alert_nodemanagers_summary.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/alerts/alert_nodemanagers_summary.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/alerts/alert_nodemanagers_summary.py
deleted file mode 100755
index adf27ec..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/alerts/alert_nodemanagers_summary.py
+++ /dev/null
@@ -1,219 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import urllib2
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-import logging
-import traceback
-
-from ambari_commons.urllib_handlers import RefreshHeaderProcessor
-from resource_management.libraries.functions.curl_krb_request import curl_krb_request
-from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
-from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
-from resource_management.core.environment import Environment
-
-ERROR_LABEL = '{0} NodeManager{1} {2} unhealthy.'
-OK_LABEL = 'All NodeManagers are healthy'
-
-NODEMANAGER_HTTP_ADDRESS_KEY = '{{yarn-site/yarn.resourcemanager.webapp.address}}'
-NODEMANAGER_HTTPS_ADDRESS_KEY = '{{yarn-site/yarn.resourcemanager.webapp.https.address}}'
-YARN_HTTP_POLICY_KEY = '{{yarn-site/yarn.http.policy}}'
-
-KERBEROS_KEYTAB = '{{yarn-site/yarn.nodemanager.webapp.spnego-keytab-file}}'
-KERBEROS_PRINCIPAL = '{{yarn-site/yarn.nodemanager.webapp.spnego-principal}}'
-SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
-SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
-EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
-
-CONNECTION_TIMEOUT_KEY = 'connection.timeout'
-CONNECTION_TIMEOUT_DEFAULT = 5.0
-
-LOGGER_EXCEPTION_MESSAGE = "[Alert] NodeManager Health Summary on {0} fails:"
-logger = logging.getLogger('ambari_alerts')
-
-QRY = "Hadoop:service=ResourceManager,name=RMNMInfo"
-
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return NODEMANAGER_HTTP_ADDRESS_KEY, NODEMANAGER_HTTPS_ADDRESS_KEY, EXECUTABLE_SEARCH_PATHS, \
-    YARN_HTTP_POLICY_KEY, SMOKEUSER_KEY, KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY
-
-
-def execute(configurations={}, parameters={}, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  configurations (dictionary): a mapping of configuration key to value
-  parameters (dictionary): a mapping of script parameter key to value
-  host_name (string): the name of this host where the alert is running
-  """
-
-  if configurations is None:
-    return (('UNKNOWN', ['There were no configurations supplied to the script.']))
-
-  scheme = 'http'  
-  http_uri = None
-  https_uri = None
-  http_policy = 'HTTP_ONLY'
-
-  security_enabled = False
-  if SECURITY_ENABLED_KEY in configurations:
-    security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
-
-  executable_paths = None
-  if EXECUTABLE_SEARCH_PATHS in configurations:
-    executable_paths = configurations[EXECUTABLE_SEARCH_PATHS]
-
-  kerberos_keytab = None
-  if KERBEROS_KEYTAB in configurations:
-    kerberos_keytab = configurations[KERBEROS_KEYTAB]
-
-  kerberos_principal = None
-  if KERBEROS_PRINCIPAL in configurations:
-    kerberos_principal = configurations[KERBEROS_PRINCIPAL]
-    kerberos_principal = kerberos_principal.replace('_HOST', host_name)
-
-  if NODEMANAGER_HTTP_ADDRESS_KEY in configurations:
-    http_uri = configurations[NODEMANAGER_HTTP_ADDRESS_KEY]
-
-  if NODEMANAGER_HTTPS_ADDRESS_KEY in configurations:
-    https_uri = configurations[NODEMANAGER_HTTPS_ADDRESS_KEY]
-
-  if YARN_HTTP_POLICY_KEY in configurations:
-    http_policy = configurations[YARN_HTTP_POLICY_KEY]
-    
-  if SMOKEUSER_KEY in configurations:
-    smokeuser = configurations[SMOKEUSER_KEY]
-
-  # parse script arguments
-  connection_timeout = CONNECTION_TIMEOUT_DEFAULT
-  if CONNECTION_TIMEOUT_KEY in parameters:
-    connection_timeout = float(parameters[CONNECTION_TIMEOUT_KEY])
-
-  kinit_timer_ms = parameters.get(KERBEROS_KINIT_TIMER_PARAMETER, DEFAULT_KERBEROS_KINIT_TIMER_MS)
-
-  # determine the right URI and whether to use SSL
-  uri = http_uri
-  if http_policy == 'HTTPS_ONLY':
-    scheme = 'https'
-
-    if https_uri is not None:
-      uri = https_uri
-
-  uri = str(host_name) + ":" + uri.split(":")[1]
-  live_nodemanagers_qry = "{0}://{1}/jmx?qry={2}".format(scheme, uri, QRY)
-  convert_to_json_failed = False
-  response_code = None
-  try:
-    if kerberos_principal is not None and kerberos_keytab is not None and security_enabled:
-      env = Environment.get_instance()
-
-      # curl requires an integer timeout
-      curl_connection_timeout = int(connection_timeout)
-
-      url_response, error_msg, time_millis  = curl_krb_request(env.tmp_dir, kerberos_keytab, kerberos_principal,
-        live_nodemanagers_qry, "nm_health_summary_alert", executable_paths, False,
-        "NodeManager Health Summary", smokeuser, connection_timeout=curl_connection_timeout,
-        kinit_timer_ms = kinit_timer_ms)
-
-      try:
-        url_response_json = json.loads(url_response)
-        live_nodemanagers = json.loads(find_value_in_jmx(url_response_json, "LiveNodeManagers", live_nodemanagers_qry))
-      except ValueError, error:
-        convert_to_json_failed = True
-        logger.exception("[Alert][{0}] Convert response to json failed or json doesn't contain needed data: {1}".
-        format("NodeManager Health Summary", str(error)))
-
-      if convert_to_json_failed:
-        response_code, error_msg, time_millis  = curl_krb_request(env.tmp_dir, kerberos_keytab, kerberos_principal,
-          live_nodemanagers_qry, "nm_health_summary_alert", executable_paths, True,
-          "NodeManager Health Summary", smokeuser, connection_timeout=curl_connection_timeout,
-          kinit_timer_ms = kinit_timer_ms)
-    else:
-      live_nodemanagers = json.loads(get_value_from_jmx(live_nodemanagers_qry,
-      "LiveNodeManagers", connection_timeout))
-
-    if kerberos_principal is not None and kerberos_keytab is not None and security_enabled:
-      if response_code in [200, 307] and convert_to_json_failed:
-        return ('UNKNOWN', ['HTTP {0} response (metrics unavailable)'.format(str(response_code))])
-      elif convert_to_json_failed and response_code not in [200, 307]:
-        raise Exception("[Alert][NodeManager Health Summary] Getting data from {0} failed with http code {1}".format(
-          str(live_nodemanagers_qry), str(response_code)))
-
-    unhealthy_count = 0
-
-    for nodemanager in live_nodemanagers:
-      health_report = nodemanager['State']
-      if health_report == 'UNHEALTHY':
-        unhealthy_count += 1
-
-    if unhealthy_count == 0:
-      result_code = 'OK'
-      label = OK_LABEL
-    else:
-      result_code = 'CRITICAL'
-      if unhealthy_count == 1:
-        label = ERROR_LABEL.format(unhealthy_count, '', 'is')
-      else:
-        label = ERROR_LABEL.format(unhealthy_count, 's', 'are')
-
-  except:
-    label = traceback.format_exc()
-    result_code = 'UNKNOWN'
-
-  return (result_code, [label])
-
-
-def get_value_from_jmx(query, jmx_property, connection_timeout):
-  response = None
-  
-  try:
-    # use a customer header process that will look for the non-standard
-    # "Refresh" header and attempt to follow the redirect
-    url_opener = urllib2.build_opener(RefreshHeaderProcessor())
-    response = url_opener.open(query, timeout=connection_timeout)
-
-    data = response.read()
-    data_dict = json.loads(data)
-    return find_value_in_jmx(data_dict, jmx_property, query)
-  finally:
-    if response is not None:
-      try:
-        response.close()
-      except:
-        pass
-
-
-def find_value_in_jmx(data_dict, jmx_property, query):
-  json_data = data_dict["beans"][0]
-
-  if jmx_property not in json_data:
-    beans = data_dict['beans']
-    for jmx_prop_list_item in beans:
-      if "name" in jmx_prop_list_item and jmx_prop_list_item["name"] == QRY:
-        if jmx_property not in jmx_prop_list_item:
-          raise Exception("Unable to find {0} in JSON from {1} ".format(jmx_property, query))
-        json_data = jmx_prop_list_item
-
-  return json_data[jmx_property]
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/files/validateYarnComponentStatusWindows.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/files/validateYarnComponentStatusWindows.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/files/validateYarnComponentStatusWindows.py
deleted file mode 100755
index 5e2b4d9..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/files/validateYarnComponentStatusWindows.py
+++ /dev/null
@@ -1,161 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import optparse
-import subprocess
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-import urllib2
-
-RESOURCEMANAGER = 'rm'
-NODEMANAGER = 'nm'
-HISTORYSERVER = 'hs'
-
-STARTED_STATE = 'STARTED'
-RUNNING_STATE = 'RUNNING'
-
-#Return reponse for given path and address
-def getResponse(path, address, ssl_enabled):
-  if ssl_enabled:
-    url = 'https://' + address + path
-  else:
-    url = 'http://' + address + path
-
-  try:
-    handle = urllib2.urlopen(url)
-    output = handle.read()
-    handle.close()
-    response = json.loads(output)
-    if response == None:
-      print 'There is no response for url: ' + str(url)
-      exit(1)
-    return response
-  except Exception as e:
-    print 'Error getting response for url:' + str(url), e
-    exit(1)
-
-#Verify that REST api is available for given component
-def validateAvailability(component, path, address, ssl_enabled):
-
-  try:
-    response = getResponse(path, address, ssl_enabled)
-    is_valid = validateAvailabilityResponse(component, response)
-    if not is_valid:
-      exit(1)
-  except Exception as e:
-    print 'Error checking availability status of component', e
-    exit(1)
-
-#Validate component-specific response
-def validateAvailabilityResponse(component, response):
-  try:
-    if component == RESOURCEMANAGER:
-      rm_state = response['clusterInfo']['state']
-      if rm_state == STARTED_STATE:
-        return True
-      else:
-        print 'Resourcemanager is not started'
-        return False
-
-    elif component == NODEMANAGER:
-      node_healthy = bool(response['nodeInfo']['nodeHealthy'])
-      if node_healthy:
-        return True
-      else:
-        return False
-    elif component == HISTORYSERVER:
-      hs_start_time = response['historyInfo']['startedOn']
-      if hs_start_time > 0:
-        return True
-      else:
-        return False
-    else:
-      return False
-  except Exception as e:
-    print 'Error validation of availability response for ' + str(component), e
-    return False
-
-#Verify that component has required resources to work
-def validateAbility(component, path, address, ssl_enabled):
-
-  try:
-    response = getResponse(path, address, ssl_enabled)
-    is_valid = validateAbilityResponse(component, response)
-    if not is_valid:
-      exit(1)
-  except Exception as e:
-    print 'Error checking ability of component', e
-    exit(1)
-
-#Validate component-specific response that it has required resources to work
-def validateAbilityResponse(component, response):
-  try:
-    if component == RESOURCEMANAGER:
-      nodes = []
-      if response.has_key('nodes') and not response['nodes'] == None and response['nodes'].has_key('node'):
-        nodes = response['nodes']['node']
-      connected_nodes_count = len(nodes)
-      if connected_nodes_count == 0:
-        print 'There is no connected nodemanagers to resourcemanager'
-        return False
-      active_nodes = filter(lambda x: x['state'] == RUNNING_STATE, nodes)
-      active_nodes_count = len(active_nodes)
-
-      if connected_nodes_count == 0:
-        print 'There is no connected active nodemanagers to resourcemanager'
-        return False
-      else:
-        return True
-    else:
-      return False
-  except Exception as e:
-    print 'Error validation of ability response', e
-    return False
-
-#
-# Main.
-#
-def main():
-  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
-  parser.add_option("-p", "--port", dest="address", help="Host:Port for REST API of a desired component")
-  parser.add_option("-s", "--ssl", dest="ssl_enabled", help="Is SSL enabled for UI of component")
-
-  (options, args) = parser.parse_args()
-
-  component = args[0]
-
-  address = options.address
-  ssl_enabled = (options.ssl_enabled) in 'true'
-  if component == RESOURCEMANAGER:
-    path = '/ws/v1/cluster/info'
-  elif component == NODEMANAGER:
-    path = '/ws/v1/node/info'
-  elif component == HISTORYSERVER:
-    path = '/ws/v1/history/info'
-  else:
-    parser.error("Invalid component")
-
-  validateAvailability(component, path, address, ssl_enabled)
-
-  if component == RESOURCEMANAGER:
-    path = '/ws/v1/cluster/nodes'
-    validateAbility(component, path, address, ssl_enabled)
-
-if __name__ == "__main__":
-  main()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/__init__.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/__init__.py
deleted file mode 100755
index 35de4bb..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/application_timeline_server.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/application_timeline_server.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/application_timeline_server.py
deleted file mode 100755
index 4ec6aa7..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/application_timeline_server.py
+++ /dev/null
@@ -1,155 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.security_commons import build_expectations, \
-  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties,\
-  FILE_TYPE_XML
-from resource_management.libraries.functions.format import format
-from yarn import yarn
-from service import service
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-
-
-class ApplicationTimelineServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    service('timelineserver', action='start')
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    service('timelineserver', action='stop')
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    yarn(name='apptimelineserver')
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class ApplicationTimelineServerWindows(ApplicationTimelineServer):
-  def status(self, env):
-    service('timelineserver', action='status')
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class ApplicationTimelineServerDefault(ApplicationTimelineServer):
-  def get_component_name(self):
-    return "hadoop-yarn-timelineserver"
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    Logger.info("Executing Stack Upgrade pre-restart")
-    import params
-    env.set_params(params)
-
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-yarn-timelineserver", params.version)
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    Execute(format("mv {yarn_historyserver_pid_file_old} {yarn_historyserver_pid_file}"),
-            only_if = format("test -e {yarn_historyserver_pid_file_old}", user=status_params.yarn_user))
-    functions.check_process_status(status_params.yarn_historyserver_pid_file)
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"yarn.timeline-service.enabled": "true",
-                           "yarn.timeline-service.http-authentication.type": "kerberos",
-                           "yarn.acl.enable": "true"}
-      props_empty_check = ["yarn.timeline-service.principal",
-                           "yarn.timeline-service.keytab",
-                           "yarn.timeline-service.http-authentication.kerberos.principal",
-                           "yarn.timeline-service.http-authentication.kerberos.keytab"]
-
-      props_read_check = ["yarn.timeline-service.keytab",
-                          "yarn.timeline-service.http-authentication.kerberos.keytab"]
-      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
-                                                  props_read_check)
-
-      yarn_expectations ={}
-      yarn_expectations.update(yarn_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'yarn-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, yarn_expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'yarn-site' not in security_params
-               or 'yarn.timeline-service.keytab' not in security_params['yarn-site']
-               or 'yarn.timeline-service.principal' not in security_params['yarn-site']) \
-            or 'yarn.timeline-service.http-authentication.kerberos.keytab' not in security_params['yarn-site'] \
-            or 'yarn.timeline-service.http-authentication.kerberos.principal' not in security_params['yarn-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.timeline-service.keytab'],
-                                security_params['yarn-site']['yarn.timeline-service.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.timeline-service.http-authentication.kerberos.keytab'],
-                                security_params['yarn-site']['yarn.timeline-service.http-authentication.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
-  def get_log_folder(self):
-    import params
-    return params.yarn_log_dir
-  
-  def get_user(self):
-    import params
-    return params.yarn_user
-
-if __name__ == "__main__":
-  ApplicationTimelineServer().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/historyserver.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/historyserver.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/historyserver.py
deleted file mode 100755
index 34c683a..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/historyserver.py
+++ /dev/null
@@ -1,190 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.check_process_status import check_process_status
-from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.security_commons import build_expectations, \
-  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
-  FILE_TYPE_XML
-from resource_management.core.source import Template
-from resource_management.core.logger import Logger
-
-from install_jars import install_tez_jars
-from yarn import yarn
-from service import service
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-
-
-class HistoryServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    service('historyserver', action='stop', serviceName='mapreduce')
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    yarn(name="historyserver")
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class HistoryserverWindows(HistoryServer):
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env)
-    service('historyserver', action='start', serviceName='mapreduce')
-
-  def status(self, env):
-    service('historyserver', action='status')
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class HistoryServerDefault(HistoryServer):
-  def get_component_name(self):
-    return "hadoop-mapreduce-historyserver"
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    Logger.info("Executing Stack Upgrade pre-restart")
-    import params
-    env.set_params(params)
-
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-mapreduce-historyserver", params.version)
-      # MC Hammer said, "Can't touch this"
-      copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
-      copy_to_hdfs("tez", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
-      copy_to_hdfs("slider", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
-      params.HdfsResource(None, action="execute")
-
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-
-    if params.stack_version_formatted_major and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS, params.stack_version_formatted_major):
-      # MC Hammer said, "Can't touch this"
-      resource_created = copy_to_hdfs(
-        "mapreduce",
-        params.user_group,
-        params.hdfs_user,
-        host_sys_prepped=params.host_sys_prepped)
-      resource_created = copy_to_hdfs(
-        "tez",
-        params.user_group,
-        params.hdfs_user,
-        host_sys_prepped=params.host_sys_prepped) or resource_created
-      resource_created = copy_to_hdfs(
-        "slider",
-        params.user_group,
-        params.hdfs_user,
-        host_sys_prepped=params.host_sys_prepped) or resource_created
-      if resource_created:
-        params.HdfsResource(None, action="execute")
-    else:
-      # In stack versions before copy_tarball_to_hdfs support tez.tar.gz was copied to a different folder in HDFS.
-      install_tez_jars()
-
-    service('historyserver', action='start', serviceName='mapreduce')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.mapred_historyserver_pid_file)
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      expectations = {}
-      expectations.update(build_expectations('mapred-site',
-                                             None,
-                                             [
-                                               'mapreduce.jobhistory.keytab',
-                                               'mapreduce.jobhistory.principal',
-                                               'mapreduce.jobhistory.webapp.spnego-keytab-file',
-                                               'mapreduce.jobhistory.webapp.spnego-principal'
-                                             ],
-                                             None))
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'mapred-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'mapred-site' not in security_params or
-               'mapreduce.jobhistory.keytab' not in security_params['mapred-site'] or
-               'mapreduce.jobhistory.principal' not in security_params['mapred-site'] or
-               'mapreduce.jobhistory.webapp.spnego-keytab-file' not in security_params['mapred-site'] or
-               'mapreduce.jobhistory.webapp.spnego-principal' not in security_params['mapred-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal not set."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.mapred_user,
-                                security_params['mapred-site']['mapreduce.jobhistory.keytab'],
-                                security_params['mapred-site']['mapreduce.jobhistory.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.mapred_user,
-                                security_params['mapred-site']['mapreduce.jobhistory.webapp.spnego-keytab-file'],
-                                security_params['mapred-site']['mapreduce.jobhistory.webapp.spnego-principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
-  def get_log_folder(self):
-    import params
-    return params.mapred_log_dir
-
-  def get_user(self):
-    import params
-    return params.mapred_user
-
-if __name__ == "__main__":
-  HistoryServer().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/install_jars.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/install_jars.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/install_jars.py
deleted file mode 100755
index 44015bf..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/install_jars.py
+++ /dev/null
@@ -1,99 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import os
-import glob
-
-def install_tez_jars():
-  import params
-
-  destination_hdfs_dirs = get_tez_hdfs_dir_paths(params.tez_lib_uris)
-
-  # If tez libraries are to be stored in hdfs
-  if destination_hdfs_dirs:
-    for hdfs_dir in destination_hdfs_dirs:
-      params.HdfsResource(hdfs_dir,
-                           type="directory",
-                           action="create_on_execute",
-                           owner=params.tez_user,
-                           mode=0755
-      )
-
-    app_dir_path = None
-    lib_dir_path = None
-
-    if len(destination_hdfs_dirs) > 0:
-      for path in destination_hdfs_dirs:
-        if 'lib' in path:
-          lib_dir_path = path
-        else:
-          app_dir_path = path
-        pass
-      pass
-    pass
-
-    tez_jars = {}
-    if app_dir_path:
-      tez_jars[params.tez_local_api_jars] = app_dir_path
-    if lib_dir_path:
-      tez_jars[params.tez_local_lib_jars] = lib_dir_path
-
-    for src_file_regex, dest_dir in tez_jars.iteritems():
-      for src_filepath in glob.glob(src_file_regex):
-        src_filename = os.path.basename(src_filepath)
-        params.HdfsResource(format("{dest_dir}/{src_filename}"),
-                            type="file",
-                            action="create_on_execute",
-                            source=src_filepath,
-                            mode=0755,
-                            owner=params.tez_user
-         )
-        
-    for src_file_regex, dest_dir in tez_jars.iteritems():
-      for src_filepath in glob.glob(src_file_regex):
-        src_filename = os.path.basename(src_filepath)
-        params.HdfsResource(format("{dest_dir}/{src_filename}"),
-                            type="file",
-                            action="create_on_execute",
-                            source=src_filepath,
-                            mode=0755,
-                            owner=params.tez_user
-         )
-    params.HdfsResource(None, action="execute")
-
-
-def get_tez_hdfs_dir_paths(tez_lib_uris = None):
-  hdfs_path_prefix = 'hdfs://'
-  lib_dir_paths = []
-  if tez_lib_uris and tez_lib_uris.strip().find(hdfs_path_prefix, 0) != -1:
-    dir_paths = tez_lib_uris.split(',')
-    for path in dir_paths:
-      if not "tez.tar.gz" in path:
-        lib_dir_path = path.replace(hdfs_path_prefix, '')
-        lib_dir_path = lib_dir_path if lib_dir_path.endswith(os.sep) else lib_dir_path + os.sep
-        lib_dir_paths.append(lib_dir_path)
-      else:
-        lib_dir_path = path.replace(hdfs_path_prefix, '')
-        lib_dir_paths.append(os.path.dirname(lib_dir_path))
-    pass
-  pass
-
-  return lib_dir_paths


[37/52] bigtop git commit: BIGTOP-1406. package Ambari in Bigtop

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/YARN_widgets.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/YARN_widgets.json b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/YARN_widgets.json
deleted file mode 100755
index 4b76a17..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/YARN_widgets.json
+++ /dev/null
@@ -1,611 +0,0 @@
-{
-  "layouts": [
-    {
-      "layout_name": "default_yarn_dashboard",
-      "display_name": "Standard YARN Dashboard",
-      "section_name": "YARN_SUMMARY",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "Memory Utilization",
-          "description": "Percentage of total memory allocated to containers running in the cluster.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AllocatedMB",
-              "metric_path": "metrics/yarn/Queue/root/AllocatedMB",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AvailableMB",
-              "metric_path": "metrics/yarn/Queue/root/AvailableMB",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            }
-          ],
-          "values": [
-            {
-              "name": "Memory Utilization",
-              "value": "${(yarn.QueueMetrics.Queue=root.AllocatedMB / (yarn.QueueMetrics.Queue=root.AllocatedMB + yarn.QueueMetrics.Queue=root.AvailableMB)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "CPU Utilization",
-          "description": "Percentage of total virtual cores allocated to containers running in the cluster.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AllocatedVCores",
-              "metric_path": "metrics/yarn/Queue/root/AllocatedVCores",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AvailableVCores",
-              "metric_path": "metrics/yarn/Queue/root/AvailableVCores",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            }
-          ],
-          "values": [
-            {
-              "name": "Total Allocatable CPU Utilized across NodeManager",
-              "value": "${(yarn.QueueMetrics.Queue=root.AllocatedVCores / (yarn.QueueMetrics.Queue=root.AllocatedVCores + yarn.QueueMetrics.Queue=root.AvailableVCores)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Container Failures",
-          "description": "Percentage of all containers failing in the cluster.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersFailed._rate",
-              "metric_path": "metrics/yarn/ContainersFailed._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersCompleted._rate",
-              "metric_path": "metrics/yarn/ContainersCompleted._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersLaunched._rate",
-              "metric_path": "metrics/yarn/ContainersLaunched._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersIniting._sum",
-              "metric_path": "metrics/yarn/ContainersIniting._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersKilled._rate",
-              "metric_path": "metrics/yarn/ContainersKilled._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersRunning._sum",
-              "metric_path": "metrics/yarn/ContainersRunning._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Container Failures",
-              "value": "${(yarn.NodeManagerMetrics.ContainersFailed._rate/(yarn.NodeManagerMetrics.ContainersFailed._rate + yarn.NodeManagerMetrics.ContainersCompleted._rate + yarn.NodeManagerMetrics.ContainersLaunched._rate + yarn.NodeManagerMetrics.ContainersIniting._sum + yarn.NodeManagerMetrics.ContainersKilled._rate + yarn.NodeManagerMetrics.ContainersRunning._sum)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "App Failures",
-          "description": "Percentage of all launched applications failing in the cluster.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsFailed._rate",
-              "metric_path": "metrics/yarn/Queue/root/AppsFailed._rate",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsKilled._rate",
-              "metric_path": "metrics/yarn/Queue/root/AppsKilled._rate",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsPending",
-              "metric_path": "metrics/yarn/Queue/root/AppsPending",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsRunning",
-              "metric_path": "metrics/yarn/Queue/root/AppsRunning",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsSubmitted._rate",
-              "metric_path": "metrics/yarn/Queue/root/AppsSubmitted._rate",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            },
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsCompleted._rate",
-              "metric_path": "metrics/yarn/Queue/root/AppsCompleted._rate",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            }
-          ],
-          "values": [
-            {
-              "name": "App Failures",
-              "value": "${(yarn.QueueMetrics.Queue=root.AppsFailed._rate/(yarn.QueueMetrics.Queue=root.AppsFailed._rate + yarn.QueueMetrics.Queue=root.AppsKilled._rate + yarn.QueueMetrics.Queue=root.AppsPending + yarn.QueueMetrics.Queue=root.AppsRunning + yarn.QueueMetrics.Queue=root.AppsSubmitted._rate + yarn.QueueMetrics.Queue=root.AppsCompleted._rate)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Pending Apps",
-          "description": "Count of applications waiting for cluster resources to become available.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.QueueMetrics.Queue=root.AppsPending",
-              "metric_path": "metrics/yarn/Queue/root/AppsPending",
-              "service_name": "YARN",
-              "component_name": "RESOURCEMANAGER",
-              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
-            }
-          ],
-          "values": [
-            {
-              "name": "Pending Apps",
-              "value": "${yarn.QueueMetrics.Queue=root.AppsPending}"
-            }
-          ],
-          "properties": {
-            "display_unit": "Apps",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Cluster Memory",
-          "description": "Percentage of memory used across all NodeManager hosts.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "mem_total._sum",
-              "metric_path": "metrics/memory/mem_total._avg",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "mem_free._sum",
-              "metric_path": "metrics/memory/mem_free._avg",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Memory utilization",
-              "value": "${((mem_total._sum - mem_free._sum)/mem_total._sum) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Cluster Disk",
-          "description": "Sum of disk throughput for all NodeManager hosts.",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "read_bps._sum",
-              "metric_path": "metrics/disk/read_bps._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "write_bps._sum",
-              "metric_path": "metrics/disk/write_bps._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Read throughput",
-              "value": "${read_bps._sum/1048576}"
-            },
-            {
-              "name": "Write throughput",
-              "value": "${write_bps._sum/1048576}"
-            }
-          ],
-          "properties": {
-            "display_unit": "Mbps",
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Cluster Network",
-          "description": "Average of Network utilized across all NodeManager hosts.",
-          "default_section_name": "YARN_SUMMARY",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "pkts_in._avg",
-              "metric_path": "metrics/network/pkts_in._avg",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "pkts_out._avg",
-              "metric_path": "metrics/network/pkts_out._avg",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Packets In",
-              "value": "${pkts_in._avg}"
-            },
-            {
-              "name": "Packets Out",
-              "value": "${pkts_out._avg}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1"
-          }
-        },
-        {
-          "widget_name": "Cluster CPU",
-          "description": "Percentage of CPU utilized across all NodeManager hosts.",
-          "default_section_name": "YARN_SUMMARY",
-          "widget_type": "GRAPH",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "cpu_system._sum",
-              "metric_path": "metrics/cpu/cpu_system._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "cpu_user._sum",
-              "metric_path": "metrics/cpu/cpu_user._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "cpu_nice._sum",
-              "metric_path": "metrics/cpu/cpu_nice._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "cpu_idle._sum",
-              "metric_path": "metrics/cpu/cpu_idle._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "cpu_wio._sum",
-              "metric_path": "metrics/cpu/cpu_wio._sum",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "CPU utilization",
-              "value": "${((cpu_system._sum + cpu_user._sum + cpu_nice._sum)/(cpu_system._sum + cpu_user._sum + cpu_nice._sum + cpu_idle._sum + cpu_wio._sum)) * 100}"
-            }
-          ],
-          "properties": {
-            "graph_type": "LINE",
-            "time_range": "1",
-            "display_unit": "%"
-          }
-        }
-      ]
-    },
-    {
-      "layout_name": "default_yarn_heatmap",
-      "display_name": "YARN Heatmaps",
-      "section_name": "YARN_HEATMAPS",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "Total Allocatable RAM Utilized per NodeManager",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.AllocatedGB",
-              "metric_path": "metrics/yarn/AllocatedGB",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.AvailableGB",
-              "metric_path": "metrics/yarn/AvailableGB",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Total Allocatable RAM Utilized per NodeManager",
-              "value": "${(yarn.NodeManagerMetrics.AllocatedGB/(yarn.NodeManagerMetrics.AvailableGB + yarn.NodeManagerMetrics.AllocatedGB)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Total Allocatable CPU Utilized per NodeManager",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.AllocatedVCores",
-              "metric_path": "metrics/yarn/AllocatedVCores",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.AvailableVCores",
-              "metric_path": "metrics/yarn/AvailableVCores",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Total Allocatable CPU Utilized per NodeManager",
-              "value": "${(yarn.NodeManagerMetrics.AllocatedVCores/(yarn.NodeManagerMetrics.AllocatedVCores + yarn.NodeManagerMetrics.AvailableVCores)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Container Failures",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersFailed._rate",
-              "metric_path": "metrics/yarn/ContainersFailed._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersCompleted._rate",
-              "metric_path": "metrics/yarn/ContainersCompleted._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersLaunched._rate",
-              "metric_path": "metrics/yarn/ContainersLaunched._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersIniting",
-              "metric_path": "metrics/yarn/ContainersIniting",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersKilled._rate",
-              "metric_path": "metrics/yarn/ContainersKilled._rate",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            },
-            {
-              "name": "yarn.NodeManagerMetrics.ContainersRunning",
-              "metric_path": "metrics/yarn/ContainersRunning",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Container Failures",
-              "value": "${(yarn.NodeManagerMetrics.ContainersFailed._rate/(yarn.NodeManagerMetrics.ContainersFailed._rate + yarn.NodeManagerMetrics.ContainersCompleted._rate + yarn.NodeManagerMetrics.ContainersLaunched._rate + yarn.NodeManagerMetrics.ContainersIniting + yarn.NodeManagerMetrics.ContainersKilled._rate + yarn.NodeManagerMetrics.ContainersRunning)) * 100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "NodeManager GC Time",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis",
-              "metric_path": "metrics/jvm/gcTimeMillis",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "NodeManager Garbage Collection Time",
-              "value": "${Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis}"
-            }
-          ],
-          "properties": {
-            "display_unit": "ms",
-            "max_limit": "10000"
-          }
-        },
-        {
-          "widget_name": "NodeManager JVM Heap Memory Used",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM",
-              "metric_path": "metrics/jvm/memHeapUsedM",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "NodeManager JVM Heap Memory Used",
-              "value": "${Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM}"
-            }
-          ],
-          "properties": {
-            "display_unit": "MB",
-            "max_limit": "512"
-          }
-        },
-        {
-          "widget_name": "Allocated Containers",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.AllocatedContainers",
-              "metric_path": "metrics/yarn/AllocatedContainers",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "Allocated Containers",
-              "value": "${yarn.NodeManagerMetrics.AllocatedContainers}"
-            }
-          ],
-          "properties": {
-            "display_unit": "",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "NodeManager RAM Utilized",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.AllocatedGB",
-              "metric_path": "metrics/yarn/AllocatedGB",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "NodeManager RAM Utilized",
-              "value": "${yarn.NodeManagerMetrics.AllocatedGB}"
-            }
-          ],
-          "properties": {
-            "display_unit": "",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "NodeManager CPU Utilized",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "yarn.NodeManagerMetrics.AllocatedVCores",
-              "metric_path": "metrics/yarn/AllocatedVCores",
-              "service_name": "YARN",
-              "component_name": "NODEMANAGER"
-            }
-          ],
-          "values": [
-            {
-              "name": "NodeManager CPU Utilized",
-              "value": "${yarn.NodeManagerMetrics.AllocatedVCores}"
-            }
-          ],
-          "properties": {
-            "display_unit": "",
-            "max_limit": "100"
-          }
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/alerts.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/alerts.json b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/alerts.json
deleted file mode 100755
index 8561922..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/alerts.json
+++ /dev/null
@@ -1,418 +0,0 @@
-{
-  "MAPREDUCE2": {
-    "service": [],
-    "HISTORYSERVER": [
-      {
-        "name": "mapreduce_history_server_webui",
-        "label": "History Server Web UI",
-        "description": "This host-level alert is triggered if the History Server Web UI is unreachable.",
-        "interval": 1,
-        "scope": "ANY",
-        "source": {
-          "type": "WEB",
-          "uri": {
-            "http": "{{mapred-site/mapreduce.jobhistory.webapp.address}}",
-            "https": "{{mapred-site/mapreduce.jobhistory.webapp.https.address}}",
-            "https_property": "{{mapred-site/mapreduce.jobhistory.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "kerberos_keytab": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file}}",
-            "kerberos_principal": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-principal}}",
-            "connection_timeout": 5.0
-          },
-          "reporting": {
-            "ok": {
-              "text": "HTTP {0} response in {2:.3f}s"
-            },
-            "warning":{
-              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
-            },
-            "critical": {
-              "text": "Connection failed to {1} ({3})"
-            }
-          }
-        }
-      },
-      {
-        "name": "mapreduce_history_server_cpu",
-        "label": "History Server CPU Utilization",
-        "description": "This host-level alert is triggered if the percent of CPU utilization on the History Server exceeds the configured critical threshold. The threshold values are in percent.",
-        "interval": 5,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{mapred-site/mapreduce.jobhistory.webapp.address}}",
-            "kerberos_keytab": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file}}",
-            "kerberos_principal": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-principal}}",
-            "https": "{{mapred-site/mapreduce.jobhistory.webapp.https.address}}",
-            "https_property": "{{mapred-site/mapreduce.jobhistory.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0
-          },
-          "reporting": {
-            "ok": {
-              "text": "{1} CPU, load {0:.1%}"
-            },
-            "warning": {
-              "text": "{1} CPU, load {0:.1%}",
-              "value": 200
-            },
-            "critical": {
-              "text": "{1} CPU, load {0:.1%}",
-              "value": 250
-            },
-            "units" : "%",
-            "type": "PERCENT"
-          },
-          "jmx": {
-            "property_list": [
-              "java.lang:type=OperatingSystem/SystemCpuLoad",
-              "java.lang:type=OperatingSystem/AvailableProcessors"
-            ],
-            "value": "{0} * 100"
-          }
-        }
-      },
-      {
-        "name": "mapreduce_history_server_rpc_latency",
-        "label": "History Server RPC Latency",
-        "description": "This host-level alert is triggered if the History Server operations RPC latency exceeds the configured critical threshold. Typically an increase in the RPC processing time increases the RPC queue length, causing the average queue wait time to increase for operations. The threshold values are in milliseconds.",
-        "interval": 5,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{mapred-site/mapreduce.jobhistory.webapp.address}}",
-            "https": "{{mapred-site/mapreduce.jobhistory.webapp.https.address}}",
-            "kerberos_keytab": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file}}",
-            "kerberos_principal": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-principal}}",
-            "https_property": "{{mapred-site/mapreduce.jobhistory.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0
-          },
-          "reporting": {
-            "ok": {
-              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]"
-            },
-            "warning": {
-              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
-              "value": 3000
-            },          
-            "critical": {
-              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
-              "value": 5000
-            },
-            "units" : "ms"
-          },
-          "jmx": {
-            "property_list": [
-              "Hadoop:service=JobHistoryServer,name=RpcActivityForPort*/RpcQueueTimeAvgTime",
-              "Hadoop:service=JobHistoryServer,name=RpcActivityForPort*/RpcProcessingTimeAvgTime"
-            ],
-            "value": "{0}"
-          }
-        }
-      },
-      {
-        "name": "mapreduce_history_server_process",
-        "label": "History Server Process",
-        "description": "This host-level alert is triggered if the History Server process cannot be established to be up and listening on the network.",
-        "interval": 1,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "PORT",
-          "uri": "{{mapred-site/mapreduce.jobhistory.webapp.address}}",
-          "default_port": 19888,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
-      }
-    ]
-  },
-  "YARN": {
-    "service": [
-      {
-        "name": "yarn_nodemanager_webui_percent",
-        "label": "Percent NodeManagers Available",
-        "description": "This alert is triggered if the number of down NodeManagers in the cluster is greater than the configured critical threshold. It aggregates the results of NodeManager process checks.",
-        "interval": 1,
-        "scope": "SERVICE",
-        "enabled": true,
-        "source": {
-          "type": "AGGREGATE",
-          "alert_name": "yarn_nodemanager_webui",
-          "reporting": {
-            "ok": {
-              "text": "affected: [{1}], total: [{0}]"
-            },
-            "warning": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 10
-            },
-            "critical": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 30
-            },
-            "units" : "%",
-            "type": "PERCENT"
-          }
-        }
-      }
-    ],
-    "NODEMANAGER": [
-      {
-        "name": "yarn_nodemanager_webui",
-        "label": "NodeManager Web UI",
-        "description": "This host-level alert is triggered if the NodeManager Web UI is unreachable.",
-        "interval": 1,
-        "scope": "HOST",
-        "source": {
-          "type": "WEB",
-          "uri": {
-            "http": "{{yarn-site/yarn.nodemanager.webapp.address}}",
-            "https": "{{yarn-site/yarn.nodemanager.webapp.https.address}}",
-            "https_property": "{{yarn-site/yarn.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "default_port": 8042,
-            "kerberos_keytab": "{{yarn-site/yarn.nodemanager.webapp.spnego-keytab-file}}",
-            "kerberos_principal": "{{yarn-site/yarn.nodemanager.webapp.spnego-principal}}",
-            "connection_timeout": 5.0
-          },
-          "reporting": {
-            "ok": {
-              "text": "HTTP {0} response in {2:.3f}s"
-            },
-            "warning":{
-              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
-            },
-            "critical": {
-              "text": "Connection failed to {1} ({3})"
-            }
-          }
-        }
-      },
-      {
-        "name": "yarn_nodemanager_health",
-        "label": "NodeManager Health",
-        "description": "This host-level alert checks the node health property available from the NodeManager component.",
-        "interval": 1,
-        "scope": "HOST",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "YARN/2.1.0.2.0/package/alerts/alert_nodemanager_health.py",
-          "parameters": [
-            {
-              "name": "connection.timeout",
-              "display_name": "Connection Timeout",
-              "value": 5.0,
-              "type": "NUMERIC",
-              "description": "The maximum time before this alert is considered to be CRITICAL",
-              "units": "seconds",
-              "threshold": "CRITICAL"
-            }
-          ]
-        }
-      }
-    ],
-    "RESOURCEMANAGER": [
-      {
-        "name": "yarn_resourcemanager_webui",
-        "label": "ResourceManager Web UI",
-        "description": "This host-level alert is triggered if the ResourceManager Web UI is unreachable.",
-        "interval": 1,
-        "scope": "ANY",
-        "source": {
-          "type": "WEB",
-          "uri": {
-            "http": "{{yarn-site/yarn.resourcemanager.webapp.address}}",
-            "https": "{{yarn-site/yarn.resourcemanager.webapp.https.address}}",
-            "https_property": "{{yarn-site/yarn.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "kerberos_keytab": "{{yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file}}",
-            "kerberos_principal": "{{yarn-site/yarn.resourcemanager.webapp.spnego-principal}}",
-            "connection_timeout": 5.0,
-            "high_availability": {
-              "alias_key" : "{{yarn-site/yarn.resourcemanager.ha.rm-ids}}",
-              "http_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.address.{{alias}}}}",
-              "https_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.https.address.{{alias}}}}"
-            }
-          },
-          "reporting": {
-            "ok": {
-              "text": "HTTP {0} response in {2:.3f}s"
-            },
-            "warning":{
-              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
-            },
-            "critical": {
-              "text": "Connection failed to {1} ({3})"
-            }
-          }
-        }
-      },
-      {
-        "name": "yarn_resourcemanager_cpu",
-        "label": "ResourceManager CPU Utilization",
-        "description": "This host-level alert is triggered if CPU utilization of the ResourceManager exceeds certain warning and critical thresholds. It checks the ResourceManager JMX Servlet for the SystemCPULoad property. The threshold values are in percent.",
-        "interval": 5,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{yarn-site/yarn.resourcemanager.webapp.address}}",
-            "https": "{{yarn-site/yarn.resourcemanager.webapp.https.address}}",
-            "kerberos_keytab": "{{yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file}}",
-            "kerberos_principal": "{{yarn-site/yarn.resourcemanager.webapp.spnego-principal}}",
-            "https_property": "{{yarn-site/yarn.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0,
-            "high_availability": {
-              "alias_key" : "{{yarn-site/yarn.resourcemanager.ha.rm-ids}}",
-              "http_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.address.{{alias}}}}",
-              "https_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.https.address.{{alias}}}}"
-            }
-          },
-          "reporting": {
-            "ok": {
-              "text": "{1} CPU, load {0:.1%}"
-            },
-            "warning": {
-              "text": "{1} CPU, load {0:.1%}",
-              "value": 200
-            },
-            "critical": {
-              "text": "{1} CPU, load {0:.1%}",
-              "value": 250
-            },
-            "units" : "%",
-            "type": "PERCENT"
-          },
-          "jmx": {
-            "property_list": [
-              "java.lang:type=OperatingSystem/SystemCpuLoad",
-              "java.lang:type=OperatingSystem/AvailableProcessors"
-            ],
-            "value": "{0} * 100"
-          }
-        }
-      },
-      {
-        "name": "yarn_resourcemanager_rpc_latency",
-        "label": "ResourceManager RPC Latency",
-        "description": "This host-level alert is triggered if the ResourceManager operations RPC latency exceeds the configured critical threshold. Typically an increase in the RPC processing time increases the RPC queue length, causing the average queue wait time to increase for ResourceManager operations. The threshold values are in milliseconds.",
-        "interval": 5,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{yarn-site/yarn.resourcemanager.webapp.address}}",
-            "https": "{{yarn-site/yarn.resourcemanager.webapp.https.address}}",
-            "kerberos_keytab": "{{yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file}}",
-            "kerberos_principal": "{{yarn-site/yarn.resourcemanager.webapp.spnego-principal}}",
-            "https_property": "{{yarn-site/yarn.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "connection_timeout": 5.0,
-            "high_availability": {
-              "alias_key" : "{{yarn-site/yarn.resourcemanager.ha.rm-ids}}",
-              "http_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.address.{{alias}}}}",
-              "https_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.https.address.{{alias}}}}"
-            }
-          },
-          "reporting": {
-            "ok": {
-              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]"
-            },
-            "warning": {
-              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
-              "value": 3000
-            },          
-            "critical": {
-              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
-              "value": 5000
-            },
-            "units" : "ms"
-          },
-          "jmx": {
-            "property_list": [
-              "Hadoop:service=ResourceManager,name=RpcActivityForPort*/RpcQueueTimeAvgTime",
-              "Hadoop:service=ResourceManager,name=RpcActivityForPort*/RpcProcessingTimeAvgTime"
-            ],
-            "value": "{0}"
-          }
-        }
-      },
-      {
-        "name": "nodemanager_health_summary",
-        "label": "NodeManager Health Summary",
-        "description": "This service-level alert is triggered if there are unhealthy NodeManagers",
-        "interval": 1,
-        "scope": "SERVICE",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "YARN/2.1.0.2.0/package/alerts/alert_nodemanagers_summary.py",
-          "parameters": [
-            {
-              "name": "connection.timeout",
-              "display_name": "Connection Timeout",
-              "value": 5.0,
-              "type": "NUMERIC",
-              "description": "The maximum time before this alert is considered to be CRITICAL",
-              "units": "seconds",
-              "threshold": "CRITICAL"
-            }
-          ]
-        }
-      }
-    ],
-    "APP_TIMELINE_SERVER": [
-      {
-        "name": "yarn_app_timeline_server_webui",
-        "label": "App Timeline Web UI",
-        "description": "This host-level alert is triggered if the App Timeline Server Web UI is unreachable.",
-        "interval": 1,
-        "scope": "ANY",
-        "source": {
-          "type": "WEB",
-          "uri": {
-            "http": "{{yarn-site/yarn.timeline-service.webapp.address}}/ws/v1/timeline",
-            "https": "{{yarn-site/yarn.timeline-service.webapp.https.address}}/ws/v1/timeline",
-            "https_property": "{{yarn-site/yarn.http.policy}}",
-            "https_property_value": "HTTPS_ONLY",
-            "kerberos_keytab": "{{yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{yarn-site/yarn.timeline-service.http-authentication.kerberos.principal}}",
-            "connection_timeout": 5.0
-          },
-          "reporting": {
-            "ok": {
-              "text": "HTTP {0} response in {2:.3f}s"
-            },
-            "warning":{
-              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
-            },
-            "critical": {
-              "text": "Connection failed to {1} ({3})"
-            }
-          }
-        }
-      }
-    ]
-  }
-}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration-mapred/mapred-env.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration-mapred/mapred-env.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration-mapred/mapred-env.xml
deleted file mode 100755
index fe6d4b9..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration-mapred/mapred-env.xml
+++ /dev/null
@@ -1,105 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_adding_forbidden="true">
-  <property>
-    <name>mapred_log_dir_prefix</name>
-    <value>/var/log/hadoop-mapreduce</value>
-    <display-name>Mapreduce Log Dir Prefix</display-name>
-    <description>Mapreduce Log Dir Prefix</description>
-    <value-attributes>
-      <type>directory</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapred_pid_dir_prefix</name>
-    <value>/var/run/hadoop-mapreduce</value>
-    <display-name>Mapreduce PID Dir Prefix</display-name>
-    <description>Mapreduce PID Dir Prefix</description>
-    <value-attributes>
-      <type>directory</type>
-      <overridable>false</overridable>
-      <editable-only-at-install>true</editable-only-at-install>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapred_user</name>
-    <display-name>Mapreduce User</display-name>
-    <value>mapred</value>
-    <property-type>USER</property-type>
-    <description>Mapreduce User</description>
-    <value-attributes>
-      <type>user</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>jobhistory_heapsize</name>
-    <display-name>History Server heap size</display-name>
-    <value>900</value>
-    <description>Value for JobHistoryServer heap_size variable in hadoop-env.sh</description>
-    <value-attributes>
-      <unit>MB</unit>
-      <type>int</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapred_user_nofile_limit</name>
-    <value>32768</value>
-    <description>Max open files limit setting for MAPREDUCE user.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapred_user_nproc_limit</name>
-    <value>65536</value>
-    <description>Max number of processes limit setting for MAPREDUCE user.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!-- mapred-env.sh -->
-  <property>
-    <name>content</name>
-    <display-name>mapred-env template</display-name>
-    <description>This is the jinja template for mapred-env.sh file</description>
-    <value>
-# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
-
-export HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}
-
-export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA
-
-#export HADOOP_JOB_HISTORYSERVER_OPTS=
-#export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.
-#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.
-#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.
-#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default
-#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.
-    </value>
-    <value-attributes>
-      <type>content</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration-mapred/mapred-site.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration-mapred/mapred-site.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration-mapred/mapred-site.xml
deleted file mode 100755
index 434eea0..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration-mapred/mapred-site.xml
+++ /dev/null
@@ -1,481 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<!-- Put site-specific property overrides in this file. -->
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
-  <!-- i/o properties -->
-  <property>
-    <name>mapreduce.task.io.sort.mb</name>
-    <value>358</value>
-    <description>
-      The total amount of buffer memory to use while sorting files, in megabytes.
-      By default, gives each merge stream 1MB, which should minimize seeks.
-    </description>
-    <display-name>Sort Allocation Memory</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>2047</maximum>
-      <unit>MB</unit>
-      <increment-step>1</increment-step>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>mapred-site</type>
-        <name>mapreduce.map.memory.mb</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.map.sort.spill.percent</name>
-    <value>0.7</value>
-    <description>
-      The soft limit in the serialization buffer. Once reached, a thread will
-      begin to spill the contents to disk in the background. Note that
-      collection will not block if this threshold is exceeded while a spill
-      is already in progress, so spills may be larger than this threshold when
-      it is set to less than .5
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.task.io.sort.factor</name>
-    <value>100</value>
-    <description>
-      The number of streams to merge at once while sorting files.
-      This determines the number of open file handles.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!-- map/reduce properties -->
-  <property>
-    <name>mapreduce.cluster.administrators</name>
-    <value> hadoop</value>
-    <description>
-      Administrators for MapReduce applications.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.reduce.shuffle.parallelcopies</name>
-    <value>30</value>
-    <description>
-      The default number of parallel transfers run by reduce during
-      the copy(shuffle) phase.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.map.speculative</name>
-    <value>false</value>
-    <description>
-      If true, then multiple instances of some map tasks
-      may be executed in parallel.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.reduce.speculative</name>
-    <value>false</value>
-    <description>
-      If true, then multiple instances of some reduce tasks may be
-      executed in parallel.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.job.reduce.slowstart.completedmaps</name>
-    <value>0.05</value>
-    <description>
-      Fraction of the number of maps in the job which should be complete before
-      reduces are scheduled for the job.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.job.counters.max</name>
-    <value>130</value>
-    <description>
-      Limit on the number of counters allowed per job.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.reduce.shuffle.merge.percent</name>
-    <value>0.66</value>
-    <description>
-      The usage threshold at which an in-memory merge will be
-      initiated, expressed as a percentage of the total memory allocated to
-      storing in-memory map outputs, as defined by
-      mapreduce.reduce.shuffle.input.buffer.percent.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
-    <value>0.7</value>
-    <description>
-      The percentage of memory to be allocated from the maximum heap
-      size to storing map outputs during the shuffle.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.output.fileoutputformat.compress.type</name>
-    <value>BLOCK</value>
-    <description>
-      If the job outputs are to compressed as SequenceFiles, how should
-      they be compressed? Should be one of NONE, RECORD or BLOCK.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.reduce.input.buffer.percent</name>
-    <value>0.0</value>
-    <description>
-      The percentage of memory- relative to the maximum heap size- to
-      retain map outputs during the reduce. When the shuffle is concluded, any
-      remaining map outputs in memory must consume less than this threshold before
-      the reduce can begin.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!-- copied from kryptonite configuration -->
-  <property>
-    <name>mapreduce.map.output.compress</name>
-    <value>false</value>
-    <description>
-      Should the outputs of the maps be compressed before being sent across the network. Uses SequenceFile compression.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.task.timeout</name>
-    <value>300000</value>
-    <description>
-      The number of milliseconds before a task will be
-      terminated if it neither reads an input, writes an output, nor
-      updates its status string.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.map.memory.mb</name>
-    <value>512</value>
-    <description>Virtual memory for single Map task</description>
-    <display-name>Map Memory</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>512</minimum>
-      <maximum>5120</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>yarn-site</type>
-        <name>yarn.scheduler.maximum-allocation-mb</name>
-      </property>
-      <property>
-        <type>yarn-site</type>
-        <name>yarn.scheduler.minimum-allocation-mb</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.reduce.memory.mb</name>
-    <value>1024</value>
-    <description>Virtual memory for single Reduce task</description>
-    <display-name>Reduce Memory</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>512</minimum>
-      <maximum>5120</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>yarn-site</type>
-        <name>yarn.scheduler.maximum-allocation-mb</name>
-      </property>
-      <property>
-        <type>yarn-site</type>
-        <name>yarn.scheduler.minimum-allocation-mb</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.shuffle.port</name>
-    <value>13562</value>
-    <description>
-      Default port that the ShuffleHandler will run on.
-      ShuffleHandler is a service run at the NodeManager to facilitate
-      transfers of intermediate Map outputs to requesting Reducers.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.jobhistory.intermediate-done-dir</name>
-    <value>/mr-history/tmp</value>
-    <description>
-      Directory where history files are written by MapReduce jobs.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.jobhistory.done-dir</name>
-    <value>/mr-history/done</value>
-    <description>
-      Directory where history files are managed by the MR JobHistory Server.
-    </description>
-    <property-type>NOT_MANAGED_HDFS_PATH</property-type>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.jobhistory.address</name>
-    <value>localhost:10020</value>
-    <description>Enter your JobHistoryServer hostname.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>mapreduce.jobhistory.webapp.address</name>
-    <value>localhost:19888</value>
-    <description>Enter your JobHistoryServer hostname.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>mapreduce.framework.name</name>
-    <value>yarn</value>
-    <description>
-      The runtime framework for executing MapReduce jobs. Can be one of local,
-      classic or yarn.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.app.mapreduce.am.staging-dir</name>
-    <value>/user</value>
-    <description>
-      The staging dir used while submitting jobs.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.app.mapreduce.am.resource.mb</name>
-    <value>512</value>
-    <description>The amount of memory the MR AppMaster needs.</description>
-    <display-name>AppMaster Memory</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>512</minimum>
-      <maximum>5120</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>yarn-site</type>
-        <name>yarn.scheduler.maximum-allocation-mb</name>
-      </property>
-      <property>
-        <type>yarn-site</type>
-        <name>yarn.scheduler.minimum-allocation-mb</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.app.mapreduce.am.command-opts</name>
-    <value>-Xmx410m</value>
-    <description>
-      Java opts for the MR App Master processes.
-      The following symbol, if present, will be interpolated: @taskid@ is replaced
-      by current TaskID. Any other occurrences of '@' will go unchanged.
-      For example, to enable verbose gc logging to a file named for the taskid in
-      /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
-      -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
-
-      Usage of -Djava.library.path can cause programs to no longer function if
-      hadoop native libraries are used. These values should instead be set as part
-      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
-      mapreduce.reduce.env config settings.
-    </description>
-    <display-name>MR AppMaster Java Heap Size</display-name>
-    <depends-on>
-      <property>
-        <type>mapred-site</type>
-        <name>yarn.app.mapreduce.am.resource.mb</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.app.mapreduce.am.admin-command-opts</name>
-    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-    <description>
-      Java opts for the MR App Master processes for admin purposes.
-      It will appears before the opts set by yarn.app.mapreduce.am.command-opts and
-      thus its options can be overridden user.
-
-      Usage of -Djava.library.path can cause programs to no longer function if
-      hadoop native libraries are used. These values should instead be set as part
-      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
-      mapreduce.reduce.env config settings.
-    </description>
-    <display-name>MR AppMaster Java Heap Size</display-name>
-    <depends-on>
-      <property>
-        <type>mapred-site</type>
-        <name>yarn.app.mapreduce.am.resource.mb</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.app.mapreduce.am.log.level</name>
-    <value>INFO</value>
-    <description>MR App Master process log level.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.admin.map.child.java.opts</name>
-    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-    <description>This property stores Java options for map tasks.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.admin.reduce.child.java.opts</name>
-    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-    <description>This property stores Java options for reduce tasks.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.application.classpath</name>
-    <value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*</value>
-    <description>
-      CLASSPATH for MR applications. A comma-separated list of CLASSPATH
-      entries.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.am.max-attempts</name>
-    <value>2</value>
-    <description>
-      The maximum number of application attempts. It is a
-      application-specific setting. It should not be larger than the global number
-      set by resourcemanager. Otherwise, it will be override. The default number is
-      set to 2, to allow at least one retry for AM.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.map.java.opts</name>
-    <value>-Xmx410m</value>
-    <description>
-      Larger heap-size for child jvms of maps.
-    </description>
-    <display-name>MR Map Java Heap Size</display-name>
-    <depends-on>
-      <property>
-        <type>mapred-site</type>
-        <name>mapreduce.map.memory.mb</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.reduce.java.opts</name>
-    <value>-Xmx756m</value>
-    <description>
-      Larger heap-size for child jvms of reduces.
-    </description>
-    <display-name>MR Reduce Java Heap Size</display-name>
-    <depends-on>
-      <property>
-        <type>mapred-site</type>
-        <name>mapreduce.reduce.memory.mb</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.map.log.level</name>
-    <value>INFO</value>
-    <description>
-      The logging level for the map task. The allowed levels are:
-      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.reduce.log.level</name>
-    <value>INFO</value>
-    <description>
-      The logging level for the reduce task. The allowed levels are:
-      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.admin.user.env</name>
-    <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/Linux-amd64-64</value>
-    <description>
-      Additional execution environment entries for map and reduce task processes.
-      This is not an additive property. You must preserve the original value if
-      you want your map and reduce tasks to have access to native libraries (compression, etc)
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.output.fileoutputformat.compress</name>
-    <value>false</value>
-    <description>
-      Should the job outputs be compressed?
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.jobhistory.http.policy</name>
-    <value>HTTP_ONLY</value>
-    <description>
-      This configures the HTTP endpoint for JobHistoryServer web UI.
-      The following values are supported: - HTTP_ONLY : Service is provided only
-      on http - HTTPS_ONLY : Service is provided only on https
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>mapreduce.job.queuename</name>
-    <value>default</value>
-    <description>
-      Queue to which a job is submitted.
-    </description>
-    <depends-on>
-      <property>
-        <type>capacity-scheduler</type>
-        <name>yarn.scheduler.capacity.root.queues</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/capacity-scheduler.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/capacity-scheduler.xml
deleted file mode 100755
index 912113b..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/capacity-scheduler.xml
+++ /dev/null
@@ -1,130 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>yarn.scheduler.capacity.maximum-applications</name>
-    <value>10000</value>
-    <description>
-      Maximum number of applications that can be pending and running.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
-    <value>0.2</value>
-    <description>
-      Maximum percent of resources in the cluster which can be used to run 
-      application masters i.e. controls number of concurrent running
-      applications.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.queues</name>
-    <value>default</value>
-    <description>
-      The queues at the this level (root is the root queue).
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.capacity</name>
-    <value>100</value>
-    <description>
-      The total capacity as a percentage out of 100 for this queue.
-      If it has child queues then this includes their capacity as well.
-      The child queues capacity should add up to their parent queue's capacity
-      or less.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.default.capacity</name>
-    <value>100</value>
-    <description>Default queue target capacity.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
-    <value>1</value>
-    <description>
-      Default queue user limit a percentage from 0.0 to 1.0.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
-    <value>100</value>
-    <description>
-      The maximum capacity of the default queue. 
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.default.state</name>
-    <value>RUNNING</value>
-    <description>
-      The state of the default queue. State can be one of RUNNING or STOPPED.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
-    <value>*</value>
-    <description>
-      The ACL of who can submit jobs to the default queue.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_administer_jobs</name>
-    <value>*</value>
-    <description>
-      The ACL of who can administer jobs on the default queue.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.root.acl_administer_queue</name>
-    <value>*</value>
-    <description>
-      The ACL for who can administer this queue i.e. change sub-queue 
-      allocations.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.node-locality-delay</name>
-    <value>40</value>
-    <description>
-      Number of missed scheduling opportunities after which the CapacityScheduler
-      attempts to schedule rack-local containers.
-      Typically this should be set to number of nodes in the cluster, By default is setting
-      approximately number of nodes in one rack which is 40.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.default.minimum-user-limit-percent</name>
-    <value>100</value>
-    <description>
-      Default minimum queue resource limit depends on the number of users who have submitted applications.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/yarn-env.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/yarn-env.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/yarn-env.xml
deleted file mode 100755
index c3bbcb6..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/yarn-env.xml
+++ /dev/null
@@ -1,260 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_adding_forbidden="true">
-  <property>
-    <name>yarn_log_dir_prefix</name>
-    <value>/var/log/hadoop-yarn</value>
-    <display-name>YARN Log Dir Prefix</display-name>
-    <description>YARN Log Dir Prefix</description>
-    <value-attributes>
-      <type>directory</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn_pid_dir_prefix</name>
-    <value>/var/run/hadoop-yarn</value>
-    <display-name>YARN PID Dir Prefix</display-name>
-    <description>YARN PID Dir Prefix</description>
-    <value-attributes>
-      <type>directory</type>
-      <overridable>false</overridable>
-      <editable-only-at-install>true</editable-only-at-install>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn_user</name>
-    <display-name>Yarn User</display-name>
-    <value>yarn</value>
-    <property-type>USER</property-type>
-    <description>YARN User</description>
-    <value-attributes>
-      <type>user</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn_heapsize</name>
-    <value>1024</value>
-    <display-name>YARN Java heap size</display-name>
-    <description>Max heapsize for all YARN components using a numerical value in the scale of MB</description>
-    <value-attributes>
-      <type>int</type>
-      <unit>MB</unit>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>resourcemanager_heapsize</name>
-    <value>1024</value>
-    <display-name>ResourceManager Java heap size</display-name>
-    <description>Max heapsize for ResourceManager using a numerical value in the scale of MB</description>
-    <value-attributes>
-      <type>int</type>
-      <overridable>false</overridable>
-      <unit>MB</unit>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>nodemanager_heapsize</name>
-    <value>1024</value>
-    <display-name>NodeManager Java heap size</display-name>
-    <description>Max heapsize for NodeManager using a numerical value in the scale of MB</description>
-    <value-attributes>
-      <type>int</type>
-      <unit>MB</unit>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>min_user_id</name>
-    <value>1000</value>
-    <display-name>Minimum user ID for submitting job</display-name>
-    <description>Set to 0 to disallow root from submitting jobs. Set to 1000 to disallow all superusers from submitting jobs</description>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>is_supported_yarn_ranger</name>
-    <value>false</value>
-    <description>Set to false by default,  needs to be set to true in stacks that use Ranger Yarn Plugin</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn_user_nofile_limit</name>
-    <value>32768</value>
-    <description>Max open files limit setting for YARN user.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn_user_nproc_limit</name>
-    <value>65536</value>
-    <description>Max number of processes limit setting for YARN user.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!-- yarn-env.sh -->
-  <property>
-    <name>content</name>
-    <display-name>yarn-env template</display-name>
-    <description>This is the jinja template for yarn-env.sh file</description>
-    <value>
-export HADOOP_YARN_HOME={{hadoop_yarn_home}}
-export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER
-export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER
-export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
-export JAVA_HOME={{java64_home}}
-export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}"
-
-# User for YARN daemons
-export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
-
-# resolve links - $0 may be a softlink
-export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
-
-# some Java parameters
-# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
-if [ "$JAVA_HOME" != "" ]; then
-  #echo "run java in $JAVA_HOME"
-  JAVA_HOME=$JAVA_HOME
-fi
-
-if [ "$JAVA_HOME" = "" ]; then
-  echo "Error: JAVA_HOME is not set."
-  exit 1
-fi
-
-JAVA=$JAVA_HOME/bin/java
-JAVA_HEAP_MAX=-Xmx1000m
-
-# For setting YARN specific HEAP sizes please use this
-# Parameter and set appropriately
-YARN_HEAPSIZE={{yarn_heapsize}}
-
-# check envvars which might override default args
-if [ "$YARN_HEAPSIZE" != "" ]; then
-  JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
-fi
-
-# Resource Manager specific parameters
-
-# Specify the max Heapsize for the ResourceManager using a numerical value
-# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-# the value to 1000.
-# This value will be overridden by an Xmx setting specified in either YARN_OPTS
-# and/or YARN_RESOURCEMANAGER_OPTS.
-# If not specified, the default value will be picked from either YARN_HEAPMAX
-# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
-
-# Specify the JVM options to be used when starting the ResourceManager.
-# These options will be appended to the options specified as YARN_OPTS
-# and therefore may override any similar flags set in YARN_OPTS
-#export YARN_RESOURCEMANAGER_OPTS=
-
-# Node Manager specific parameters
-
-# Specify the max Heapsize for the NodeManager using a numerical value
-# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-# the value to 1000.
-# This value will be overridden by an Xmx setting specified in either YARN_OPTS
-# and/or YARN_NODEMANAGER_OPTS.
-# If not specified, the default value will be picked from either YARN_HEAPMAX
-# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
-
-# Specify the max Heapsize for the HistoryManager using a numerical value
-# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-# the value to 1024.
-# This value will be overridden by an Xmx setting specified in either YARN_OPTS
-# and/or YARN_HISTORYSERVER_OPTS.
-# If not specified, the default value will be picked from either YARN_HEAPMAX
-# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-export YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}
-
-# Specify the JVM options to be used when starting the NodeManager.
-# These options will be appended to the options specified as YARN_OPTS
-# and therefore may override any similar flags set in YARN_OPTS
-#export YARN_NODEMANAGER_OPTS=
-
-# so that filenames w/ spaces are handled correctly in loops below
-IFS=
-
-
-# default log directory and file
-if [ "$YARN_LOG_DIR" = "" ]; then
-  YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
-fi
-if [ "$YARN_LOGFILE" = "" ]; then
-  YARN_LOGFILE='yarn.log'
-fi
-
-# default policy file for service-level authorization
-if [ "$YARN_POLICYFILE" = "" ]; then
-  YARN_POLICYFILE="hadoop-policy.xml"
-fi
-
-# restore ordinary behaviour
-unset IFS
-
-
-YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
-YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
-YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
-YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
-YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
-YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
-YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
-YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
-export YARN_NODEMANAGER_OPTS="$YARN_NODEMANAGER_OPTS -Dnm.audit.logger=INFO,NMAUDIT"
-export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS -Drm.audit.logger=INFO,RMAUDIT"
-if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
-  YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
-fi
-YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
-YARN_OPTS="$YARN_OPTS -Djava.io.tmpdir={{hadoop_java_io_tmpdir}}"
-    </value>
-    <value-attributes>
-      <type>content</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>service_check.queue.name</name>
-    <value>default</value>
-    <description>
-      The queue that used by service check.
-    </description>
-    <depends-on>
-      <property>
-        <type>capacity-scheduler</type>
-        <name>yarn.scheduler.capacity.root.queues</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/yarn-log4j.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/yarn-log4j.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/yarn-log4j.xml
deleted file mode 100755
index 89dd52d..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/yarn-log4j.xml
+++ /dev/null
@@ -1,94 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>content</name>
-    <display-name>yarn-log4j template</display-name>
-    <description>Custom log4j.properties</description>
-    <value>
-#Relative to Yarn Log Dir Prefix
-yarn.log.dir=.
-#
-# Job Summary Appender
-#
-# Use following logger to send summary to separate file defined by
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
-# hadoop.mapreduce.jobsummary.logger=INFO,JSA
-#
-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
-# Set the ResourceManager summary log filename
-yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log
-# Set the ResourceManager summary log level and appender
-yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
-#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
-
-# To enable AppSummaryLogging for the RM,
-# set yarn.server.resourcemanager.appsummary.logger to
-# LEVEL,RMSUMMARY in hadoop-env.sh
-
-# Appender for ResourceManager Application Summary Log
-# Requires the following properties to be set
-#    - hadoop.log.dir (Hadoop Log directory)
-#    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
-#    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
-log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
-log4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
-log4j.appender.RMSUMMARY.MaxFileSize=256MB
-log4j.appender.RMSUMMARY.MaxBackupIndex=20
-log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
-log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-log4j.appender.JSA.DatePattern=.yyyy-MM-dd
-log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
-log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
-
-# Audit logging for ResourceManager
-rm.audit.logger=${hadoop.root.logger}
-log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=${rm.audit.logger}
-log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=false
-log4j.appender.RMAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.RMAUDIT.File=${yarn.log.dir}/rm-audit.log
-log4j.appender.RMAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.RMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.RMAUDIT.DatePattern=.yyyy-MM-dd
-
-# Audit logging for NodeManager
-nm.audit.logger=${hadoop.root.logger}
-log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=${nm.audit.logger}
-log4j.additivity.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=false
-log4j.appender.NMAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.NMAUDIT.File=${yarn.log.dir}/nm-audit.log
-log4j.appender.NMAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.NMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.NMAUDIT.DatePattern=.yyyy-MM-dd
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>


[52/52] bigtop git commit: BIGTOP-1406. package Ambari in Bigtop

Posted by rv...@apache.org.
BIGTOP-1406. package Ambari in Bigtop


Project: http://git-wip-us.apache.org/repos/asf/bigtop/repo
Commit: http://git-wip-us.apache.org/repos/asf/bigtop/commit/0d3448b8
Tree: http://git-wip-us.apache.org/repos/asf/bigtop/tree/0d3448b8
Diff: http://git-wip-us.apache.org/repos/asf/bigtop/diff/0d3448b8

Branch: refs/heads/master
Commit: 0d3448b812781488010f80febcbfe6e29af8d075
Parents: bf841ad
Author: Roman Shaposhnik <rv...@apache.org>
Authored: Mon Feb 27 12:26:46 2017 -0800
Committer: Roman Shaposhnik <rv...@apache.org>
Committed: Tue Mar 21 23:08:12 2017 -0700

----------------------------------------------------------------------
 .../ODPi/1.0/blueprints/multinode-default.json  |  108 -
 .../ODPi/1.0/blueprints/singlenode-default.json |   65 -
 .../ODPi/1.0/configuration/cluster-env.xml      |  232 --
 .../src/common/ambari/ODPi/1.0/hooks/.hash      |    1 -
 .../1.0/hooks/after-INSTALL/scripts/hook.py     |   37 -
 .../1.0/hooks/after-INSTALL/scripts/params.py   |  101 -
 .../scripts/shared_initialization.py            |  108 -
 .../hooks/before-ANY/files/changeToSecureUid.sh |   53 -
 .../ODPi/1.0/hooks/before-ANY/scripts/hook.py   |   36 -
 .../ODPi/1.0/hooks/before-ANY/scripts/params.py |  230 --
 .../before-ANY/scripts/shared_initialization.py |  224 --
 .../1.0/hooks/before-INSTALL/scripts/hook.py    |   37 -
 .../1.0/hooks/before-INSTALL/scripts/params.py  |  113 -
 .../scripts/repo_initialization.py              |   68 -
 .../scripts/shared_initialization.py            |   37 -
 .../1.0/hooks/before-RESTART/scripts/hook.py    |   29 -
 .../hooks/before-START/files/checkForFormat.sh  |   65 -
 .../before-START/files/fast-hdfs-resource.jar   |  Bin 19285850 -> 0 bytes
 .../before-START/files/task-log4j.properties    |  134 -
 .../hooks/before-START/files/topology_script.py |   66 -
 .../ODPi/1.0/hooks/before-START/scripts/hook.py |   39 -
 .../1.0/hooks/before-START/scripts/params.py    |  318 --
 .../before-START/scripts/rack_awareness.py      |   47 -
 .../scripts/shared_initialization.py            |  175 -
 .../templates/commons-logging.properties.j2     |   43 -
 .../templates/exclude_hosts_list.j2             |   21 -
 .../templates/hadoop-metrics2.properties.j2     |  104 -
 .../before-START/templates/health_check.j2      |   81 -
 .../templates/include_hosts_list.j2             |   21 -
 .../templates/topology_mappings.data.j2         |   24 -
 .../src/common/ambari/ODPi/1.0/kerberos.json    |   60 -
 .../src/common/ambari/ODPi/1.0/metainfo.xml     |   22 -
 .../ODPi/1.0/properties/stack_features.json     |   51 -
 .../ambari/ODPi/1.0/properties/stack_tools.json |    4 -
 .../common/ambari/ODPi/1.0/repos/repoinfo.xml   |   33 -
 .../ambari/ODPi/1.0/role_command_order.json     |   75 -
 .../ambari/ODPi/1.0/services/HDFS/metainfo.xml  |   27 -
 .../ambari/ODPi/1.0/services/HIVE/alerts.json   |  232 --
 .../services/HIVE/configuration/hcat-env.xml    |   41 -
 .../services/HIVE/configuration/hive-env.xml    |  540 ---
 .../HIVE/configuration/hive-exec-log4j.xml      |   96 -
 .../services/HIVE/configuration/hive-log4j.xml  |  106 -
 .../services/HIVE/configuration/hive-site.xml   | 2796 --------------
 .../HIVE/configuration/hivemetastore-site.xml   |   43 -
 .../HIVE/configuration/hiveserver2-site.xml     |  122 -
 .../services/HIVE/configuration/webhcat-env.xml |   38 -
 .../HIVE/configuration/webhcat-log4j.xml        |   63 -
 .../HIVE/configuration/webhcat-site.xml         |  287 --
 .../HIVE/etc/hive-schema-0.12.0.mysql.sql       |  777 ----
 .../HIVE/etc/hive-schema-0.12.0.oracle.sql      |  718 ----
 .../HIVE/etc/hive-schema-0.12.0.postgres.sql    | 1406 -------
 .../HIVE/etc/hive-schema-0.13.0.mysql.sql       |  889 -----
 .../HIVE/etc/hive-schema-0.13.0.oracle.sql      |  835 -----
 .../HIVE/etc/hive-schema-0.13.0.postgres.sql    | 1538 --------
 .../etc/upgrade-0.12.0-to-0.13.0.oracle.sql     |  165 -
 .../services/HIVE/etc/upgrade-0.13.0.oracle.sql |   38 -
 .../ambari/ODPi/1.0/services/HIVE/kerberos.json |  132 -
 .../ambari/ODPi/1.0/services/HIVE/metainfo.xml  |  372 --
 .../alert_hive_interactive_thrift_port.py       |  216 --
 .../HIVE/package/alerts/alert_hive_metastore.py |  270 --
 .../package/alerts/alert_hive_thrift_port.py    |  274 --
 .../package/alerts/alert_llap_app_status.py     |  299 --
 .../HIVE/package/alerts/alert_webhcat_server.py |  228 --
 .../package/etc/hive-schema-0.12.0.mysql.sql    |  777 ----
 .../package/etc/hive-schema-0.12.0.oracle.sql   |  718 ----
 .../package/etc/hive-schema-0.12.0.postgres.sql | 1406 -------
 .../services/HIVE/package/files/addMysqlUser.sh |   39 -
 .../services/HIVE/package/files/hcatSmoke.sh    |   41 -
 .../services/HIVE/package/files/hiveSmoke.sh    |   24 -
 .../HIVE/package/files/hiveTezSetup.cmd         |   58 -
 .../services/HIVE/package/files/hiveserver2.sql |   23 -
 .../HIVE/package/files/hiveserver2Smoke.sh      |   32 -
 .../1.0/services/HIVE/package/files/pigSmoke.sh |   18 -
 .../HIVE/package/files/removeMysqlUser.sh       |   33 -
 .../HIVE/package/files/startMetastore.sh        |   25 -
 .../HIVE/package/files/templetonSmoke.sh        |   58 -
 .../services/HIVE/package/scripts/__init__.py   |   19 -
 .../1.0/services/HIVE/package/scripts/hcat.py   |   81 -
 .../HIVE/package/scripts/hcat_client.py         |   85 -
 .../HIVE/package/scripts/hcat_service_check.py  |   86 -
 .../1.0/services/HIVE/package/scripts/hive.py   |  481 ---
 .../HIVE/package/scripts/hive_client.py         |   68 -
 .../HIVE/package/scripts/hive_interactive.py    |  302 --
 .../HIVE/package/scripts/hive_metastore.py      |  259 --
 .../HIVE/package/scripts/hive_server.py         |  211 --
 .../package/scripts/hive_server_interactive.py  |  535 ---
 .../HIVE/package/scripts/hive_server_upgrade.py |  141 -
 .../HIVE/package/scripts/hive_service.py        |  187 -
 .../package/scripts/hive_service_interactive.py |  109 -
 .../HIVE/package/scripts/mysql_server.py        |   64 -
 .../HIVE/package/scripts/mysql_service.py       |   49 -
 .../HIVE/package/scripts/mysql_users.py         |   70 -
 .../HIVE/package/scripts/mysql_utils.py         |   35 -
 .../1.0/services/HIVE/package/scripts/params.py |   29 -
 .../HIVE/package/scripts/params_linux.py        |  735 ----
 .../HIVE/package/scripts/params_windows.py      |   74 -
 .../HIVE/package/scripts/service_check.py       |  190 -
 .../HIVE/package/scripts/setup_ranger_hive.py   |   98 -
 .../scripts/setup_ranger_hive_interactive.py    |   78 -
 .../HIVE/package/scripts/status_params.py       |  123 -
 .../services/HIVE/package/scripts/webhcat.py    |  145 -
 .../HIVE/package/scripts/webhcat_server.py      |  164 -
 .../HIVE/package/scripts/webhcat_service.py     |   96 -
 .../package/scripts/webhcat_service_check.py    |  128 -
 .../hadoop-metrics2-hivemetastore.properties.j2 |   54 -
 .../hadoop-metrics2-hiveserver2.properties.j2   |   54 -
 .../templates/hadoop-metrics2-llapdaemon.j2     |   52 -
 .../hadoop-metrics2-llaptaskscheduler.j2        |   52 -
 .../HIVE/package/templates/hive.conf.j2         |   35 -
 .../package/templates/startHiveserver2.sh.j2    |   24 -
 .../templates/startHiveserver2Interactive.sh.j2 |   24 -
 .../package/templates/templeton_smoke.pig.j2    |   24 -
 .../ODPi/1.0/services/KERBEROS/metainfo.xml     |   26 -
 .../1.0/services/YARN/MAPREDUCE2_metrics.json   | 2596 -------------
 .../ODPi/1.0/services/YARN/YARN_metrics.json    | 3486 ------------------
 .../ODPi/1.0/services/YARN/YARN_widgets.json    |  611 ---
 .../ambari/ODPi/1.0/services/YARN/alerts.json   |  418 ---
 .../YARN/configuration-mapred/mapred-env.xml    |  105 -
 .../YARN/configuration-mapred/mapred-site.xml   |  481 ---
 .../YARN/configuration/capacity-scheduler.xml   |  130 -
 .../services/YARN/configuration/yarn-env.xml    |  260 --
 .../services/YARN/configuration/yarn-log4j.xml  |   94 -
 .../services/YARN/configuration/yarn-site.xml   |  579 ---
 .../ambari/ODPi/1.0/services/YARN/kerberos.json |  214 --
 .../ambari/ODPi/1.0/services/YARN/metainfo.xml  |  317 --
 .../ambari/ODPi/1.0/services/YARN/package/.hash |    1 -
 .../package/alerts/alert_nodemanager_health.py  |  209 --
 .../alerts/alert_nodemanagers_summary.py        |  219 --
 .../files/validateYarnComponentStatusWindows.py |  161 -
 .../services/YARN/package/scripts/__init__.py   |   20 -
 .../scripts/application_timeline_server.py      |  155 -
 .../YARN/package/scripts/historyserver.py       |  190 -
 .../YARN/package/scripts/install_jars.py        |   99 -
 .../package/scripts/mapred_service_check.py     |  168 -
 .../YARN/package/scripts/mapreduce2_client.py   |   98 -
 .../YARN/package/scripts/nodemanager.py         |  161 -
 .../YARN/package/scripts/nodemanager_upgrade.py |   73 -
 .../1.0/services/YARN/package/scripts/params.py |   31 -
 .../YARN/package/scripts/params_linux.py        |  469 ---
 .../YARN/package/scripts/params_windows.py      |   59 -
 .../YARN/package/scripts/resourcemanager.py     |  289 --
 .../services/YARN/package/scripts/service.py    |  105 -
 .../YARN/package/scripts/service_check.py       |  159 -
 .../YARN/package/scripts/setup_ranger_yarn.py   |   71 -
 .../YARN/package/scripts/status_params.py       |   61 -
 .../1.0/services/YARN/package/scripts/yarn.py   |  499 ---
 .../YARN/package/scripts/yarn_client.py         |   67 -
 .../package/templates/container-executor.cfg.j2 |   40 -
 .../package/templates/exclude_hosts_list.j2     |   21 -
 .../YARN/package/templates/mapreduce.conf.j2    |   35 -
 .../package/templates/taskcontroller.cfg.j2     |   38 -
 .../YARN/package/templates/yarn.conf.j2         |   35 -
 .../ODPi/1.0/services/ZOOKEEPER/metainfo.xml    |   27 -
 .../ambari/ODPi/1.0/services/stack_advisor.py   | 1947 ----------
 .../src/common/ambari/ODPi/1.0/widgets.json     |   95 -
 .../src/common/ambari/install_ambari.sh         |    2 -
 .../src/deb/ambari/source/include-binaries      |    1 -
 bigtop.bom                                      |   10 +
 158 files changed, 10 insertions(+), 38545 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/blueprints/multinode-default.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/blueprints/multinode-default.json b/bigtop-packages/src/common/ambari/ODPi/1.0/blueprints/multinode-default.json
deleted file mode 100755
index 53248e4..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/blueprints/multinode-default.json
+++ /dev/null
@@ -1,108 +0,0 @@
-{
-    "configurations" : [
-    ],
-    "host_groups" : [
-        {
-            "name" : "master_1",
-            "components" : [
-                {
-                    "name" : "NAMENODE"
-                },
-                {
-                    "name" : "ZOOKEEPER_SERVER"
-                },
-                {
-                    "name" : "HDFS_CLIENT"
-                },
-                {
-                    "name" : "YARN_CLIENT"
-                }
-            ],
-            "cardinality" : "1"
-        },
-        {
-            "name" : "master_2",
-            "components" : [
-
-                {
-                    "name" : "ZOOKEEPER_CLIENT"
-                },
-                {
-                    "name" : "HISTORYSERVER"
-                },
-                {
-                    "name" : "SECONDARY_NAMENODE"
-                },
-                {
-                    "name" : "HDFS_CLIENT"
-                },
-                {
-                    "name" : "YARN_CLIENT"
-                },
-                {
-                    "name" : "POSTGRESQL_SERVER"
-                }
-            ],
-            "cardinality" : "1"
-        },
-        {
-            "name" : "master_3",
-            "components" : [
-                {
-                    "name" : "RESOURCEMANAGER"
-                },
-                {
-                    "name" : "ZOOKEEPER_SERVER"
-                }
-            ],
-            "cardinality" : "1"
-        },
-        {
-            "name" : "master_4",
-            "components" : [
-                {
-                    "name" : "ZOOKEEPER_SERVER"
-                }
-            ],
-            "cardinality" : "1"
-        },
-        {
-            "name" : "slave",
-            "components" : [
-                {
-                    "name" : "NODEMANAGER"
-                },
-                {
-                    "name" : "DATANODE"
-                }
-            ],
-            "cardinality" : "${slavesCount}"
-        },
-        {
-            "name" : "gateway",
-            "components" : [
-                {
-                    "name" : "AMBARI_SERVER"
-                },
-                {
-                    "name" : "ZOOKEEPER_CLIENT"
-                },
-                {
-                    "name" : "HDFS_CLIENT"
-                },
-                {
-                    "name" : "YARN_CLIENT"
-                },
-                {
-                    "name" : "MAPREDUCE2_CLIENT"
-                }
-            ],
-            "cardinality" : "1"
-        }
-    ],
-    "Blueprints" : {
-        "blueprint_name" : "blueprint-multinode-default",
-        "stack_name" : "ODPi",
-        "stack_version" : "1.0"
-    }
-}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/blueprints/singlenode-default.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/blueprints/singlenode-default.json b/bigtop-packages/src/common/ambari/ODPi/1.0/blueprints/singlenode-default.json
deleted file mode 100755
index 6aeb516..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/blueprints/singlenode-default.json
+++ /dev/null
@@ -1,65 +0,0 @@
-{
-    "configurations" : [
-    ],
-    "host_groups" : [
-        {
-            "name" : "host_group_1",
-            "components" : [
-                {
-                    "name" : "HISTORYSERVER"
-                },
-                {
-                    "name" : "NAMENODE"
-                },
-                {
-                    "name" : "SUPERVISOR"
-                },
-                {
-                    "name" : "AMBARI_SERVER"
-                },
-                {
-                    "name" : "APP_TIMELINE_SERVER"
-                },
-                {
-                    "name" : "HDFS_CLIENT"
-                },
-                {
-                    "name" : "NODEMANAGER"
-                },
-                {
-                    "name" : "DATANODE"
-                },
-                {
-                    "name" : "RESOURCEMANAGER"
-                },
-                {
-                    "name" : "ZOOKEEPER_SERVER"
-                },
-                {
-                    "name" : "ZOOKEEPER_CLIENT"
-                },
-                {
-                    "name" : "SECONDARY_NAMENODE"
-                },
-                {
-                    "name" : "YARN_CLIENT"
-                },
-                {
-                    "name" : "MAPREDUCE2_CLIENT"
-                },
-                {
-                    "name" : "POSTGRESQL_SERVER"
-                },
-                {
-                    "name" : "DRPC_SERVER"
-                }
-            ],
-            "cardinality" : "1"
-        }
-    ],
-    "Blueprints" : {
-        "blueprint_name" : "blueprint-singlenode-default",
-        "stack_name" : "ODPi",
-        "stack_version" : "1.0"
-    }
-}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/configuration/cluster-env.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/configuration/cluster-env.xml
deleted file mode 100755
index 61274b6..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/configuration/cluster-env.xml
+++ /dev/null
@@ -1,232 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>recovery_enabled</name>
-    <value>true</value>
-    <description>Auto start enabled or not for this cluster.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>recovery_type</name>
-    <value>AUTO_START</value>
-    <description>Auto start type.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>recovery_lifetime_max_count</name>
-    <value>1024</value>
-    <description>Auto start lifetime maximum count of recovery attempt allowed per host component. This is reset when agent is restarted.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>recovery_max_count</name>
-    <value>6</value>
-    <description>Auto start maximum count of recovery attempt allowed per host component in a window. This is reset when agent is restarted.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>recovery_window_in_minutes</name>
-    <value>60</value>
-    <description>Auto start recovery window size in minutes.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>recovery_retry_interval</name>
-    <value>5</value>
-    <description>Auto start recovery retry gap between tries per host component.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>security_enabled</name>
-    <value>false</value>
-    <description>Hadoop Security</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>kerberos_domain</name>
-    <value>EXAMPLE.COM</value>
-    <description>Kerberos realm.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>ignore_groupsusers_create</name>
-    <display-name>Skip group modifications during install</display-name>
-    <value>false</value>
-    <property-type>ADDITIONAL_USER_PROPERTY</property-type>
-    <description>Whether to ignore failures on users and group creation</description>
-    <value-attributes>
-      <overridable>false</overridable>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>smokeuser</name>
-    <display-name>Smoke User</display-name>
-    <value>ambari-qa</value>
-    <property-type>USER</property-type>
-    <description>User executing service checks</description>
-    <value-attributes>
-      <type>user</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>smokeuser_keytab</name>
-    <value>/etc/security/keytabs/smokeuser.headless.keytab</value>
-    <description>Path to smoke test user keytab file</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>user_group</name>
-    <display-name>Hadoop Group</display-name>
-    <value>hadoop</value>
-    <property-type>GROUP</property-type>
-    <description>Hadoop user group.</description>
-    <value-attributes>
-      <type>user</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>repo_suse_rhel_template</name>
-    <value>[{{repo_id}}]
-name={{repo_id}}
-{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}
-
-path=/
-enabled=1
-gpgcheck=0</value>
-    <description>Template of repositories for rhel and suse.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>repo_ubuntu_template</name>
-    <value>{{package_type}} {{base_url}} {{components}}</value>
-    <description>Template of repositories for ubuntu.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>override_uid</name>
-    <value>true</value>
-    <property-type>ADDITIONAL_USER_PROPERTY</property-type>
-    <display-name>Have Ambari manage UIDs</display-name>
-    <description>Have Ambari manage UIDs</description>
-    <value-attributes>
-      <overridable>false</overridable>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>fetch_nonlocal_groups</name>
-    <value>true</value>
-    <display-name>Ambari fetch nonlocal groups</display-name>
-    <description>Ambari requires fetching all the groups. This can be slow
-        on envs with enabled ldap. Setting this option to false will enable Ambari,
-        to skip user/group management connected with ldap groups.</description>
-    <value-attributes>
-      <overridable>false</overridable>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>managed_hdfs_resource_property_names</name>
-    <value/>
-    <description>Comma separated list of property names with HDFS resource paths.
-        Resource from this list will be managed even if it is marked as not managed in the stack</description>
-    <value-attributes>
-      <overridable>false</overridable>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!-- Define stack_tools property in the base stack. DO NOT override this property for each stack version -->
-  <property>
-    <name>stack_tools</name>
-    <value/>
-    <description>Stack specific tools</description>
-    <property-type>VALUE_FROM_PROPERTY_FILE</property-type>
-    <value-attributes>
-      <property-file-name>stack_tools.json</property-file-name>
-      <property-file-type>json</property-file-type>
-      <read-only>true</read-only>
-      <overridable>false</overridable>
-      <visible>false</visible>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!-- Define stack_features property in the base stack. DO NOT override this property for each stack version -->
-  <property>
-    <name>stack_features</name>
-    <value/>
-    <description>List of features supported by the stack</description>
-    <property-type>VALUE_FROM_PROPERTY_FILE</property-type>
-    <value-attributes>
-      <property-file-name>stack_features.json</property-file-name>
-      <property-file-type>json</property-file-type>
-      <read-only>true</read-only>
-      <overridable>false</overridable>
-      <visible>false</visible>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>stack_root</name>
-    <value>/usr/odpi</value>
-    <description>Stack root folder</description>
-    <value-attributes>
-      <read-only>true</read-only>
-      <overridable>false</overridable>
-      <visible>false</visible>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>alerts_repeat_tolerance</name>
-    <value>1</value>
-    <description>The number of consecutive alerts required to transition an alert from the SOFT to the HARD state.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>ignore_bad_mounts</name>
-    <value>false</value>
-    <description>For properties handled by handle_mounted_dirs this will make Ambari not to create any directories.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>manage_dirs_on_root</name>
-    <value>true</value>
-    <description>For properties handled by handle_mounted_dirs this will make Ambari to manage (create and set permissions) unknown directories on / partition</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>one_dir_per_partition</name>
-    <value>false</value>
-    <description>For properties handled by handle_mounted_dirs this will make Ambari </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/.hash
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/.hash b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/.hash
deleted file mode 100755
index f8c8c1f..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/.hash
+++ /dev/null
@@ -1 +0,0 @@
-18a52d08dc963523592f7f1f2997089b6655de71
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/hook.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/hook.py
deleted file mode 100755
index 8a583b3..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/hook.py
+++ /dev/null
@@ -1,37 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.libraries.script.hook import Hook
-from shared_initialization import link_configs
-from shared_initialization import setup_config
-from shared_initialization import setup_stack_symlinks
-
-class AfterInstallHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    env.set_params(params)
-    setup_stack_symlinks()
-    setup_config()
-
-    link_configs(self.stroutfile)
-
-if __name__ == "__main__":
-  AfterInstallHook().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/params.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/params.py
deleted file mode 100755
index 819d8f7..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/params.py
+++ /dev/null
@@ -1,101 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-from resource_management.libraries.script import Script
-from resource_management.libraries.functions import default
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import format_jvm_option
-from resource_management.libraries.functions.version import format_stack_version
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-dfs_type = default("/commandParams/dfs_type", "")
-
-is_parallel_execution_enabled = int(default("/agentConfigParams/agent/parallel_execution", 0)) == 1
-
-sudo = AMBARI_SUDO_BINARY
-
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-
-# current host stack version
-current_version = default("/hostLevelParams/current_version", None)
-
-# default hadoop params
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
-hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
-
-# HDP 2.2+ params
-if Script.is_stack_greater_or_equal("2.2"):
-  mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
-
-  # not supported in HDP 2.2+
-  hadoop_conf_empty_dir = None
-
-versioned_stack_root = '/usr/hdp/current'
-
-#security params
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-#java params
-java_home = config['hostLevelParams']['java_home']
-
-#hadoop params
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
-
-jsvc_path = "/usr/lib/bigtop-utils"
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
-namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
-
-jtnode_opt_newsize = "200m"
-jtnode_opt_maxnewsize = "200m"
-jtnode_heapsize =  "1024m"
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-
-#users and groups
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-user_group = config['configurations']['cluster-env']['user_group']
-
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-has_namenode = not len(namenode_host) == 0
-
-if has_namenode or dfs_type == 'HCFS':
-  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
-
-link_configs_lock_file = os.path.join(tmp_dir, "link_configs_lock_file")
-stack_select_lock_file = os.path.join(tmp_dir, "stack_select_lock_file")
-
-upgrade_suspended = default("/roleParams/upgrade_suspended", False)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/shared_initialization.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/shared_initialization.py
deleted file mode 100755
index 9982dc6..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/shared_initialization.py
+++ /dev/null
@@ -1,108 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-
-import ambari_simplejson as json
-from resource_management.core.logger import Logger
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.version import compare_versions
-from resource_management.libraries.functions.fcntl_based_process_lock import FcntlBasedProcessLock
-from resource_management.libraries.resources.xml_config import XmlConfig
-from resource_management.libraries.script import Script
-
-
-def setup_stack_symlinks():
-  """
-  Invokes <stack-selector-tool> set all against a calculated fully-qualified, "normalized" version based on a
-  stack version, such as "2.3". This should always be called after a component has been
-  installed to ensure that all HDP pointers are correct. The stack upgrade logic does not
-  interact with this since it's done via a custom command and will not trigger this hook.
-  :return:
-  """
-  import params
-  if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0:
-    # try using the exact version first, falling back in just the stack if it's not defined
-    # which would only be during an intial cluster installation
-    version = params.current_version if params.current_version is not None else params.stack_version_unformatted
-
-    if not params.upgrade_suspended:
-      # On parallel command execution this should be executed by a single process at a time.
-      with FcntlBasedProcessLock(params.stack_select_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
-        stack_select.select_all(version)
-
-def setup_config():
-  import params
-  stackversion = params.stack_version_unformatted
-  Logger.info("FS Type: {0}".format(params.dfs_type))
-
-  is_hadoop_conf_dir_present = False
-  if hasattr(params, "hadoop_conf_dir") and params.hadoop_conf_dir is not None and os.path.exists(params.hadoop_conf_dir):
-    is_hadoop_conf_dir_present = True
-  else:
-    Logger.warning("Parameter hadoop_conf_dir is missing or directory does not exist. This is expected if this host does not have any Hadoop components.")
-
-  if is_hadoop_conf_dir_present and (params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS'):
-    # create core-site only if the hadoop config diretory exists
-    XmlConfig("core-site.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations']['core-site'],
-              configuration_attributes=params.config['configuration_attributes']['core-site'],
-              owner=params.hdfs_user,
-              group=params.user_group,
-              only_if=format("ls {hadoop_conf_dir}"))
-
-
-def load_version(struct_out_file):
-  """
-  Load version from file.  Made a separate method for testing
-  """
-  json_version = None
-  try:
-    if os.path.exists(struct_out_file):
-      with open(struct_out_file, 'r') as fp:
-        json_info = json.load(fp)
-        json_version = json_info['version']
-  except:
-    pass
-
-  return json_version
-  
-
-def link_configs(struct_out_file):
-  """
-  Links configs, only on a fresh install of HDP-2.3 and higher
-  """
-  import params
-
-  if not Script.is_stack_greater_or_equal("2.3"):
-    Logger.info("Can only link configs for HDP-2.3 and higher.")
-    return
-
-  json_version = load_version(struct_out_file)
-
-  if not json_version:
-    Logger.info("Could not load 'version' from {0}".format(struct_out_file))
-    return
-
-  # On parallel command execution this should be executed by a single process at a time.
-  with FcntlBasedProcessLock(params.link_configs_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
-    for k, v in conf_select.get_package_dirs().iteritems():
-      conf_select.convert_conf_directories_to_symlinks(k, json_version, v)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/files/changeToSecureUid.sh
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/files/changeToSecureUid.sh b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/files/changeToSecureUid.sh
deleted file mode 100755
index 08542c4..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/files/changeToSecureUid.sh
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-username=$1
-directories=$2
-
-function find_available_uid() {
- for ((i=1001; i<=2000; i++))
- do
-   grep -q $i /etc/passwd
-   if [ "$?" -ne 0 ]
-   then
-    newUid=$i
-    break
-   fi
- done
-}
-
-find_available_uid
-
-if [ $newUid -eq 0 ]
-then
-  echo "Failed to find Uid between 1000 and 2000"
-  exit 1
-fi
-
-set -e
-
-dir_array=($(echo $directories | sed 's/,/\n/g'))
-old_uid=$(id -u $username)
-sudo_prefix="/var/lib/ambari-agent/ambari-sudo.sh -H -E"
-echo "Changing uid of $username from $old_uid to $newUid"
-echo "Changing directory permisions for ${dir_array[@]}"
-$sudo_prefix usermod -u $newUid $username && for dir in ${dir_array[@]} ; do ls $dir 2> /dev/null && echo "Changing permission for $dir" && $sudo_prefix chown -Rh $newUid $dir ; done
-exit 0

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/hook.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/hook.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/hook.py
deleted file mode 100755
index c34be0b..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/hook.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from shared_initialization import *
-
-class BeforeAnyHook(Hook):
-
-  def hook(self, env):
-    import params
-    env.set_params(params)
-
-    setup_users()
-    if params.has_namenode or params.dfs_type == 'HCFS':
-      setup_hadoop_env()
-    setup_java()
-
-if __name__ == "__main__":
-  BeforeAnyHook().execute()
-

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/params.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/params.py
deleted file mode 100755
index 5544085..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/params.py
+++ /dev/null
@@ -1,230 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import collections
-import re
-import os
-
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-
-from resource_management.libraries.script import Script
-from resource_management.libraries.functions import default
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import format_jvm_option
-from resource_management.libraries.functions.is_empty import is_empty
-from resource_management.libraries.functions.version import format_stack_version
-from resource_management.libraries.functions.version import compare_versions
-from resource_management.libraries.functions.expect import expect
-from ambari_commons.os_check import OSCheck
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-dfs_type = default("/commandParams/dfs_type", "")
-
-artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
-jdk_name = default("/hostLevelParams/jdk_name", None)
-java_home = config['hostLevelParams']['java_home']
-java_version = expect("/hostLevelParams/java_version", int)
-jdk_location = config['hostLevelParams']['jdk_location']
-
-sudo = AMBARI_SUDO_BINARY
-
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-
-restart_type = default("/commandParams/restart_type", "")
-version = default("/commandParams/version", None)
-# Handle upgrade and downgrade
-if (restart_type.lower() == "rolling_upgrade" or restart_type.lower() == "nonrolling_upgrade") and version:
-  stack_version_formatted = format_stack_version(version)
-
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-
-# Some datanode settings
-dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
-dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)
-dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None)
-dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
-secure_dn_ports_are_in_use = False
-
-def get_port(address):
-  """
-  Extracts port from the address like 0.0.0.0:1019
-  """
-  if address is None:
-    return None
-  m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
-  if m is not None:
-    return int(m.group(2))
-  else:
-    return None
-
-def is_secure_port(port):
-  """
-  Returns True if port is root-owned at *nix systems
-  """
-  if port is not None:
-    return port < 1024
-  else:
-    return False
-
-# hadoop default params
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-
-# upgrades would cause these directories to have a version instead of "current"
-# which would cause a lot of problems when writing out hadoop-env.sh; instead
-# force the use of "current" in the hook
-hdfs_user_nofile_limit = default("/configurations/hadoop-env/hdfs_user_nofile_limit", "128000")
-hadoop_home = stack_select.get_hadoop_dir("home", force_latest_on_upgrade=True)
-hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec", force_latest_on_upgrade=True)
-
-hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
-hadoop_secure_dn_user = hdfs_user
-hadoop_dir = "/etc/hadoop"
-versioned_stack_root = '/usr/hdp/current'
-hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
-datanode_max_locked_memory = config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory']
-is_datanode_max_locked_memory_set = not is_empty(config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory'])
-
-# HDP 2.2+ params
-if Script.is_stack_greater_or_equal("2.2"):
-  mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
-
-  # not supported in HDP 2.2+
-  hadoop_conf_empty_dir = None
-
-  if not security_enabled:
-    hadoop_secure_dn_user = '""'
-  else:
-    dfs_dn_port = get_port(dfs_dn_addr)
-    dfs_dn_http_port = get_port(dfs_dn_http_addr)
-    dfs_dn_https_port = get_port(dfs_dn_https_addr)
-    # We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
-    if dfs_http_policy == "HTTPS_ONLY":
-      secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_https_port)
-    elif dfs_http_policy == "HTTP_AND_HTTPS":
-      secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port) or is_secure_port(dfs_dn_https_port)
-    else:   # params.dfs_http_policy == "HTTP_ONLY" or not defined:
-      secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port)
-    if secure_dn_ports_are_in_use:
-      hadoop_secure_dn_user = hdfs_user
-    else:
-      hadoop_secure_dn_user = '""'
-
-#hadoop params
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
-
-jsvc_path = "/usr/lib/bigtop-utils"
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
-namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
-
-jtnode_opt_newsize = "200m"
-jtnode_opt_maxnewsize = "200m"
-jtnode_heapsize =  "1024m"
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-nfsgateway_heapsize = config['configurations']['hadoop-env']['nfsgateway_heapsize']
-mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
-
-#users and groups
-hbase_user = config['configurations']['hbase-env']['hbase_user']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
-gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
-gmond_user = config['configurations']['ganglia-env']["gmond_user"]
-tez_user = config['configurations']['tez-env']["tez_user"]
-oozie_user = config['configurations']['oozie-env']["oozie_user"]
-falcon_user = config['configurations']['falcon-env']["falcon_user"]
-ranger_user = config['configurations']['ranger-env']["ranger_user"]
-
-user_group = config['configurations']['cluster-env']['user_group']
-
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-falcon_server_hosts = default("/clusterHostInfo/falcon_server_hosts", [])
-ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
-
-has_namenode = not len(namenode_host) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_tez = 'tez-site' in config['configurations']
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_oozie_server = not len(oozie_servers) == 0
-has_falcon_server_hosts = not len(falcon_server_hosts) == 0
-has_ranger_admin = not len(ranger_admin_hosts) == 0
-
-if has_namenode or dfs_type == 'HCFS':
-  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
-
-hbase_tmp_dir = "/tmp/hbase-hbase"
-
-proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
-ranger_group = config['configurations']['ranger-env']['ranger_group']
-dfs_cluster_administrators_group = config['configurations']['hdfs-site']["dfs.cluster.administrators"]
-
-ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
-fetch_nonlocal_groups = config['configurations']['cluster-env']["fetch_nonlocal_groups"]
-
-smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
-if has_hbase_masters:
-  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
-#repo params
-repo_info = config['hostLevelParams']['repo_info']
-service_repo_info = default("/hostLevelParams/service_repo_info",None)
-
-user_to_groups_dict = collections.defaultdict(lambda:[user_group])
-user_to_groups_dict[smoke_user] = [proxyuser_group]
-if has_ganglia_server:
-  user_to_groups_dict[gmond_user] = [gmond_user]
-  user_to_groups_dict[gmetad_user] = [gmetad_user]
-if has_tez:
-  user_to_groups_dict[tez_user] = [proxyuser_group]
-if has_oozie_server:
-  user_to_groups_dict[oozie_user] = [proxyuser_group]
-if has_falcon_server_hosts:
-  user_to_groups_dict[falcon_user] = [proxyuser_group]
-if has_ranger_admin:
-  user_to_groups_dict[ranger_user] = [ranger_group]
-
-user_to_gid_dict = collections.defaultdict(lambda:user_group)
-
-user_list = json.loads(config['hostLevelParams']['user_list'])
-group_list = json.loads(config['hostLevelParams']['group_list'])
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-
-tez_am_view_acls = config['configurations']['tez-site']["tez.am.view-acls"]
-override_uid = str(default("/configurations/cluster-env/override_uid", "true")).lower()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/shared_initialization.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/shared_initialization.py
deleted file mode 100755
index 1a7d21a..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/shared_initialization.py
+++ /dev/null
@@ -1,224 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-import re
-import getpass
-import tempfile
-from copy import copy
-from resource_management.libraries.functions.version import compare_versions
-from resource_management import *
-
-def setup_users():
-  """
-  Creates users before cluster installation
-  """
-  import params
-  should_create_users_and_groups = not params.host_sys_prepped and not params.ignore_groupsusers_create
-
-  if should_create_users_and_groups:
-    for group in params.group_list:
-      Group(group,
-      )
-
-    for user in params.user_list:
-      User(user,
-          gid = params.user_to_gid_dict[user],
-          groups = params.user_to_groups_dict[user],
-          fetch_nonlocal_groups = params.fetch_nonlocal_groups
-      )
-
-    if params.override_uid == "true":
-      set_uid(params.smoke_user, params.smoke_user_dirs)
-    else:
-      Logger.info('Skipping setting uid for smoke user as host is sys prepped')
-  else:
-    Logger.info('Skipping creation of User and Group as host is sys prepped or ignore_groupsusers_create flag is on')
-    pass
-
-
-  if params.has_hbase_masters:
-    Directory (params.hbase_tmp_dir,
-               owner = params.hbase_user,
-               mode=0775,
-               create_parents = True,
-               cd_access="a",
-    )
-    if not params.host_sys_prepped and params.override_uid == "true":
-      set_uid(params.hbase_user, params.hbase_user_dirs)
-    else:
-      Logger.info('Skipping setting uid for hbase user as host is sys prepped')      
-      pass
-
-  if not params.host_sys_prepped:
-    if params.has_namenode:
-      if should_create_users_and_groups:
-        create_dfs_cluster_admins()
-    if params.has_tez and params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.3') >= 0:
-      if should_create_users_and_groups:
-        create_tez_am_view_acls()
-  else:
-    Logger.info('Skipping setting dfs cluster admin and tez view acls as host is sys prepped')
-
-def create_dfs_cluster_admins():
-  """
-  dfs.cluster.administrators support format <comma-delimited list of usernames><space><comma-delimited list of group names>
-  """
-  import params
-
-  groups_list = create_users_and_groups(params.dfs_cluster_administrators_group)
-
-  User(params.hdfs_user,
-    groups = params.user_to_groups_dict[params.hdfs_user] + groups_list,
-          fetch_nonlocal_groups = params.fetch_nonlocal_groups
-  )
-
-def create_tez_am_view_acls():
-
-  """
-  tez.am.view-acls support format <comma-delimited list of usernames><space><comma-delimited list of group names>
-  """
-  import params
-
-  if not params.tez_am_view_acls.startswith("*"):
-    create_users_and_groups(params.tez_am_view_acls)
-
-def create_users_and_groups(user_and_groups):
-
-  import params
-
-  parts = re.split('\s', user_and_groups)
-  if len(parts) == 1:
-    parts.append("")
-
-  users_list = parts[0].split(",") if parts[0] else []
-  groups_list = parts[1].split(",") if parts[1] else []
-
-  if users_list:
-    User(users_list,
-          fetch_nonlocal_groups = params.fetch_nonlocal_groups
-    )
-
-  if groups_list:
-    Group(copy(groups_list),
-    )
-  return groups_list
-    
-def set_uid(user, user_dirs):
-  """
-  user_dirs - comma separated directories
-  """
-  import params
-
-  File(format("{tmp_dir}/changeUid.sh"),
-       content=StaticFile("changeToSecureUid.sh"),
-       mode=0555)
-  ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
-  Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs}"),
-          not_if = format("(test $(id -u {user}) -gt 1000) || ({ignore_groupsusers_create_str})"))
-    
-def setup_hadoop_env():
-  import params
-  stackversion = params.stack_version_unformatted
-  Logger.info("FS Type: {0}".format(params.dfs_type))
-  if params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS':
-    if params.security_enabled:
-      tc_owner = "root"
-    else:
-      tc_owner = params.hdfs_user
-
-    # create /etc/hadoop
-    Directory(params.hadoop_dir, mode=0755)
-
-    # HDP < 2.2 used a conf -> conf.empty symlink for /etc/hadoop/
-    if Script.is_stack_less_than("2.2"):
-      Directory(params.hadoop_conf_empty_dir, create_parents = True, owner="root",
-        group=params.user_group )
-
-      Link(params.hadoop_conf_dir, to=params.hadoop_conf_empty_dir,
-         not_if=format("ls {hadoop_conf_dir}"))
-
-    # write out hadoop-env.sh, but only if the directory exists
-    if os.path.exists(params.hadoop_conf_dir):
-      File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'), owner=tc_owner,
-        group=params.user_group,
-        content=InlineTemplate(params.hadoop_env_sh_template))
-
-    # Create tmp dir for java.io.tmpdir
-    # Handle a situation when /tmp is set to noexec
-    Directory(params.hadoop_java_io_tmpdir,
-              owner=params.hdfs_user,
-              group=params.user_group,
-              mode=01777
-    )
-
-def setup_java():
-  """
-  Installs jdk using specific params, that comes from ambari-server
-  """
-  import params
-
-  java_exec = format("{java_home}/bin/java")
-
-  if not os.path.isfile(java_exec):
-    if not params.jdk_name: # if custom jdk is used.
-      raise Fail(format("Unable to access {java_exec}. Confirm you have copied jdk to this host."))
-
-    jdk_curl_target = format("{tmp_dir}/{jdk_name}")
-    java_dir = os.path.dirname(params.java_home)
-
-    Directory(params.artifact_dir,
-              create_parents = True,
-              )
-
-    File(jdk_curl_target,
-         content = DownloadSource(format("{jdk_location}/{jdk_name}")),
-         not_if = format("test -f {jdk_curl_target}")
-    )
-
-    tmp_java_dir = tempfile.mkdtemp(prefix="jdk_tmp_", dir=params.tmp_dir)
-
-    try:
-      if params.jdk_name.endswith(".bin"):
-        chmod_cmd = ("chmod", "+x", jdk_curl_target)
-        install_cmd = format("cd {tmp_java_dir} && echo A | {jdk_curl_target} -noregister && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
-      elif params.jdk_name.endswith(".gz"):
-        chmod_cmd = ("chmod","a+x", java_dir)
-        install_cmd = format("cd {tmp_java_dir} && tar -xf {jdk_curl_target} && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
-
-      Directory(java_dir
-      )
-
-      Execute(chmod_cmd,
-              sudo = True,
-              )
-
-      Execute(install_cmd,
-              )
-
-    finally:
-      Directory(tmp_java_dir, action="delete")
-
-    File(format("{java_home}/bin/java"),
-         mode=0755,
-         cd_access="a",
-         )
-    Execute(('chmod', '-R', '755', params.java_home),
-      sudo = True,
-    )

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/hook.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/hook.py
deleted file mode 100755
index ce17776..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/hook.py
+++ /dev/null
@@ -1,37 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from shared_initialization import *
-from repo_initialization import *
-
-class BeforeInstallHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    self.run_custom_hook('before-ANY')
-    env.set_params(params)
-    
-    install_repos()
-    install_packages()
-
-if __name__ == "__main__":
-  BeforeInstallHook().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/params.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/params.py
deleted file mode 100755
index 6193c11..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/params.py
+++ /dev/null
@@ -1,113 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-from resource_management.libraries.functions.version import format_stack_version, compare_versions
-from resource_management.core.system import System
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import default, format
-from resource_management.libraries.functions.expect import expect
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-sudo = AMBARI_SUDO_BINARY
-
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
-agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-
-#users and groups
-hbase_user = config['configurations']['hbase-env']['hbase_user']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
-gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
-gmond_user = config['configurations']['ganglia-env']["gmond_user"]
-tez_user = config['configurations']['tez-env']["tez_user"]
-
-user_group = config['configurations']['cluster-env']['user_group']
-proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
-
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-
-# repo templates
-repo_rhel_suse =  config['configurations']['cluster-env']['repo_suse_rhel_template']
-repo_ubuntu =  config['configurations']['cluster-env']['repo_ubuntu_template']
-
-#hosts
-hostname = config["hostname"]
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-storm_server_hosts = default("/clusterHostInfo/nimbus_hosts", [])
-falcon_host =  default('/clusterHostInfo/falcon_server_hosts', [])
-
-has_sqoop_client = 'sqoop-env' in config['configurations']
-has_namenode = not len(namenode_host) == 0
-has_hs = not len(hs_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_slaves = not len(slave_hosts) == 0
-has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_zk_host = not len(zk_hosts) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_storm_server = not len(storm_server_hosts) == 0
-has_falcon_server = not len(falcon_host) == 0
-has_tez = 'tez-site' in config['configurations']
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-
-hbase_tmp_dir = "/tmp/hbase-hbase"
-
-#security params
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-#java params
-java_home = config['hostLevelParams']['java_home']
-artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
-jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already installed by user
-jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
-jce_location = config['hostLevelParams']['jdk_location']
-jdk_location = config['hostLevelParams']['jdk_location']
-ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-
-smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
-if has_hbase_masters:
-  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
-#repo params
-repo_info = config['hostLevelParams']['repo_info']
-service_repo_info = default("/hostLevelParams/service_repo_info",None)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/repo_initialization.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/repo_initialization.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/repo_initialization.py
deleted file mode 100755
index a35dce7..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/repo_initialization.py
+++ /dev/null
@@ -1,68 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from ambari_commons.os_check import OSCheck
-from resource_management.libraries.resources.repository import Repository
-from resource_management.core.logger import Logger
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-
-# components_lits = repoName + postfix
-_UBUNTU_REPO_COMPONENTS_POSTFIX = ["main"]
-
-def _alter_repo(action, repo_string, repo_template):
-  """
-  @param action: "delete" or "create"
-  @param repo_string: e.g. "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]"
-  """
-  repo_dicts = json.loads(repo_string)
-
-  if not isinstance(repo_dicts, list):
-    repo_dicts = [repo_dicts]
-
-  if 0 == len(repo_dicts):
-    Logger.info("Repository list is empty. Ambari may not be managing the repositories.")
-  else:
-    Logger.info("Initializing {0} repositories".format(str(len(repo_dicts))))
-
-  for repo in repo_dicts:
-    if not 'baseUrl' in repo:
-      repo['baseUrl'] = None
-    if not 'mirrorsList' in repo:
-      repo['mirrorsList'] = None
-    
-    ubuntu_components = [ repo['repoName'] ] + _UBUNTU_REPO_COMPONENTS_POSTFIX
-    
-    Repository(repo['repoId'],
-               action = action,
-               base_url = repo['baseUrl'],
-               mirror_list = repo['mirrorsList'],
-               repo_file_name = repo['repoName'],
-               repo_template = repo_template,
-               components = ubuntu_components, # ubuntu specific
-    )
-
-def install_repos():
-  import params
-  if params.host_sys_prepped:
-    return
-
-  template = params.repo_rhel_suse if OSCheck.is_suse_family() or OSCheck.is_redhat_family() else params.repo_ubuntu
-  _alter_repo("create", params.repo_info, template)
-  if params.service_repo_info:
-    _alter_repo("create", params.service_repo_info, template)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/shared_initialization.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/shared_initialization.py
deleted file mode 100755
index 1609050..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/shared_initialization.py
+++ /dev/null
@@ -1,37 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from resource_management.libraries.functions import stack_tools
-from resource_management.libraries.functions.version import compare_versions
-from resource_management.core.resources.packaging import Package
-
-def install_packages():
-  import params
-  if params.host_sys_prepped:
-    return
-
-  packages = ['unzip', 'curl']
-  if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0:
-    stack_selector_package = stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME)
-    packages.append(stack_selector_package)
-  Package(packages,
-          retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
-          retry_count=params.agent_stack_retry_count)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-RESTART/scripts/hook.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-RESTART/scripts/hook.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-RESTART/scripts/hook.py
deleted file mode 100755
index 14b9d99..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-RESTART/scripts/hook.py
+++ /dev/null
@@ -1,29 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-class BeforeRestartHook(Hook):
-
-  def hook(self, env):
-    self.run_custom_hook('before-START')
-
-if __name__ == "__main__":
-  BeforeRestartHook().execute()
-

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/checkForFormat.sh b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/checkForFormat.sh
deleted file mode 100755
index 68aa96d..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/checkForFormat.sh
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export hdfs_user=$1
-shift
-export conf_dir=$1
-shift
-export bin_dir=$1
-shift
-export mark_dir=$1
-shift
-export name_dirs=$*
-
-export EXIT_CODE=0
-export command="namenode -format"
-export list_of_non_empty_dirs=""
-
-mark_file=/var/run/hadoop/hdfs/namenode-formatted
-if [[ -f ${mark_file} ]] ; then
-  /var/lib/ambari-agent/ambari-sudo.sh rm -f ${mark_file}
-  /var/lib/ambari-agent/ambari-sudo.sh mkdir -p ${mark_dir}
-fi
-
-if [[ ! -d $mark_dir ]] ; then
-  for dir in `echo $name_dirs | tr ',' ' '` ; do
-    echo "NameNode Dirname = $dir"
-    cmd="ls $dir | wc -l  | grep -q ^0$"
-    eval $cmd
-    if [[ $? -ne 0 ]] ; then
-      (( EXIT_CODE = $EXIT_CODE + 1 ))
-      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
-    fi
-  done
-
-  if [[ $EXIT_CODE == 0 ]] ; then
-    /var/lib/ambari-agent/ambari-sudo.sh su ${hdfs_user} - -s /bin/bash -c "export PATH=$PATH:$bin_dir ; yes Y | hdfs --config ${conf_dir} ${command}"
-    (( EXIT_CODE = $EXIT_CODE | $? ))
-  else
-    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
-  fi
-else
-  echo "${mark_dir} exists. Namenode DFS already formatted"
-fi
-
-exit $EXIT_CODE
-

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/fast-hdfs-resource.jar
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/fast-hdfs-resource.jar b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/fast-hdfs-resource.jar
deleted file mode 100755
index c90890b..0000000
Binary files a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/fast-hdfs-resource.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/task-log4j.properties
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/task-log4j.properties b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/task-log4j.properties
deleted file mode 100755
index 7e12962..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/task-log4j.properties
+++ /dev/null
@@ -1,134 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-
-# Define some default values that can be overridden by system properties
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-#
-# Job Summary Appender 
-#
-# Use following logger to send summary to separate file defined by 
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
-# hadoop.mapreduce.jobsummary.logger=INFO,JSA
-# 
-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this 
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-# Rolling File Appender
-#
-
-#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-#log4j.appender.RFA.MaxFileSize=1MB
-#log4j.appender.RFA.MaxBackupIndex=30
-
-#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-# Custom Logging levels
-
-hadoop.metrics.log.level=INFO
-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
-
-# Jets3t library
-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-#
-# Null Appender
-# Trap security logger on the hadoop client side
-#
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
- 
-# Removes "deprecated" messages
-log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/topology_script.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/topology_script.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/topology_script.py
deleted file mode 100755
index 0f7a55c..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/topology_script.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/env python
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import sys, os
-from string import join
-import ConfigParser
-
-
-DEFAULT_RACK = "/default-rack"
-DATA_FILE_NAME =  os.path.dirname(os.path.abspath(__file__)) + "/topology_mappings.data"
-SECTION_NAME = "network_topology"
-
-class TopologyScript():
-
-  def load_rack_map(self):
-    try:
-      #RACK_MAP contains both host name vs rack and ip vs rack mappings
-      mappings = ConfigParser.ConfigParser()
-      mappings.read(DATA_FILE_NAME)
-      return dict(mappings.items(SECTION_NAME))
-    except ConfigParser.NoSectionError:
-      return {}
-
-  def get_racks(self, rack_map, args):
-    if len(args) == 1:
-      return DEFAULT_RACK
-    else:
-      return join([self.lookup_by_hostname_or_ip(input_argument, rack_map) for input_argument in args[1:]],)
-
-  def lookup_by_hostname_or_ip(self, hostname_or_ip, rack_map):
-    #try looking up by hostname
-    rack = rack_map.get(hostname_or_ip)
-    if rack is not None:
-      return rack
-    #try looking up by ip
-    rack = rack_map.get(self.extract_ip(hostname_or_ip))
-    #try by localhost since hadoop could be passing in 127.0.0.1 which might not be mapped
-    return rack if rack is not None else rack_map.get("localhost.localdomain", DEFAULT_RACK)
-
-  #strips out port and slashes in case hadoop passes in something like 127.0.0.1/127.0.0.1:50010
-  def extract_ip(self, container_string):
-    return container_string.split("/")[0].split(":")[0]
-
-  def execute(self, args):
-    rack_map = self.load_rack_map()
-    rack = self.get_racks(rack_map, args)
-    print rack
-
-if __name__ == "__main__":
-  TopologyScript().execute(sys.argv)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/hook.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/hook.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/hook.py
deleted file mode 100755
index f21e4b1..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/hook.py
+++ /dev/null
@@ -1,39 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from rack_awareness import create_topology_script_and_mapping
-from shared_initialization import setup_hadoop, setup_configs, create_javahome_symlink
-
-class BeforeStartHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    self.run_custom_hook('before-ANY')
-    env.set_params(params)
-
-    setup_hadoop()
-    setup_configs()
-    create_javahome_symlink()
-    create_topology_script_and_mapping()
-
-if __name__ == "__main__":
-  BeforeStartHook().execute()


[44/52] bigtop git commit: BIGTOP-1406. package Ambari in Bigtop

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/etc/hive-schema-0.12.0.mysql.sql
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/etc/hive-schema-0.12.0.mysql.sql b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/etc/hive-schema-0.12.0.mysql.sql
deleted file mode 100755
index b0415b1..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/etc/hive-schema-0.12.0.mysql.sql
+++ /dev/null
@@ -1,777 +0,0 @@
--- MySQL dump 10.13  Distrib 5.5.25, for osx10.6 (i386)
---
--- Host: localhost    Database: test
--- ------------------------------------------------------
--- Server version	5.5.25
-
-/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
-/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
-/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
-/*!40101 SET NAMES utf8 */;
-/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
-/*!40103 SET TIME_ZONE='+00:00' */;
-/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
-/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
-/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
-/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
-
---
--- Table structure for table `BUCKETING_COLS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` (
-  `SD_ID` bigint(20) NOT NULL,
-  `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
-  KEY `BUCKETING_COLS_N49` (`SD_ID`),
-  CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `CDS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `CDS` (
-  `CD_ID` bigint(20) NOT NULL,
-  PRIMARY KEY (`CD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `COLUMNS_V2`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
-  `CD_ID` bigint(20) NOT NULL,
-  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `TYPE_NAME` varchar(4000) DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
-  KEY `COLUMNS_V2_N49` (`CD_ID`),
-  CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `DATABASE_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` (
-  `DB_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`DB_ID`,`PARAM_KEY`),
-  KEY `DATABASE_PARAMS_N49` (`DB_ID`),
-  CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `DBS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `DBS` (
-  `DB_ID` bigint(20) NOT NULL,
-  `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`DB_ID`),
-  UNIQUE KEY `UNIQUE_DATABASE` (`NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `DB_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
-  `DB_GRANT_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `DB_ID` bigint(20) DEFAULT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `DB_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`DB_GRANT_ID`),
-  UNIQUE KEY `DBPRIVILEGEINDEX` (`DB_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DB_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  KEY `DB_PRIVS_N49` (`DB_ID`),
-  CONSTRAINT `DB_PRIVS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `GLOBAL_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` (
-  `USER_GRANT_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `USER_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`USER_GRANT_ID`),
-  UNIQUE KEY `GLOBALPRIVILEGEINDEX` (`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`USER_PRIV`,`GRANTOR`,`GRANTOR_TYPE`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `IDXS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `IDXS` (
-  `INDEX_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `DEFERRED_REBUILD` bit(1) NOT NULL,
-  `INDEX_HANDLER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INDEX_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INDEX_TBL_ID` bigint(20) DEFAULT NULL,
-  `LAST_ACCESS_TIME` int(11) NOT NULL,
-  `ORIG_TBL_ID` bigint(20) DEFAULT NULL,
-  `SD_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`INDEX_ID`),
-  UNIQUE KEY `UNIQUEINDEX` (`INDEX_NAME`,`ORIG_TBL_ID`),
-  KEY `IDXS_N51` (`SD_ID`),
-  KEY `IDXS_N50` (`INDEX_TBL_ID`),
-  KEY `IDXS_N49` (`ORIG_TBL_ID`),
-  CONSTRAINT `IDXS_FK1` FOREIGN KEY (`ORIG_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
-  CONSTRAINT `IDXS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
-  CONSTRAINT `IDXS_FK3` FOREIGN KEY (`INDEX_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `INDEX_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `INDEX_PARAMS` (
-  `INDEX_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`),
-  KEY `INDEX_PARAMS_N49` (`INDEX_ID`),
-  CONSTRAINT `INDEX_PARAMS_FK1` FOREIGN KEY (`INDEX_ID`) REFERENCES `IDXS` (`INDEX_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `NUCLEUS_TABLES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `NUCLEUS_TABLES` (
-  `CLASS_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `TYPE` varchar(4) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `OWNER` varchar(2) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `VERSION` varchar(20) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `INTERFACE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`CLASS_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITIONS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITIONS` (
-  `PART_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `LAST_ACCESS_TIME` int(11) NOT NULL,
-  `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `SD_ID` bigint(20) DEFAULT NULL,
-  `TBL_ID` bigint(20) DEFAULT NULL,
-  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`PART_ID`),
-  UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
-  KEY `PARTITIONS_N49` (`TBL_ID`),
-  KEY `PARTITIONS_N50` (`SD_ID`),
-  KEY `PARTITIONS_N51` (`LINK_TARGET_ID`),
-  CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
-  CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
-  CONSTRAINT `PARTITIONS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITION_EVENTS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` (
-  `PART_NAME_ID` bigint(20) NOT NULL,
-  `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `EVENT_TIME` bigint(20) NOT NULL,
-  `EVENT_TYPE` int(11) NOT NULL,
-  `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`PART_NAME_ID`),
-  KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITION_KEYS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` (
-  `TBL_ID` bigint(20) NOT NULL,
-  `PKEY_COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PKEY_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PKEY_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`TBL_ID`,`PKEY_NAME`),
-  KEY `PARTITION_KEYS_N49` (`TBL_ID`),
-  CONSTRAINT `PARTITION_KEYS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITION_KEY_VALS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` (
-  `PART_ID` bigint(20) NOT NULL,
-  `PART_KEY_VAL` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`PART_ID`,`INTEGER_IDX`),
-  KEY `PARTITION_KEY_VALS_N49` (`PART_ID`),
-  CONSTRAINT `PARTITION_KEY_VALS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITION_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` (
-  `PART_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`PART_ID`,`PARAM_KEY`),
-  KEY `PARTITION_PARAMS_N49` (`PART_ID`),
-  CONSTRAINT `PARTITION_PARAMS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PART_COL_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` (
-  `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL,
-  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PART_ID` bigint(20) DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PART_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`PART_COLUMN_GRANT_ID`),
-  KEY `PART_COL_PRIVS_N49` (`PART_ID`),
-  KEY `PARTITIONCOLUMNPRIVILEGEINDEX` (`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  CONSTRAINT `PART_COL_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PART_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PART_PRIVS` (
-  `PART_GRANT_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PART_ID` bigint(20) DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PART_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`PART_GRANT_ID`),
-  KEY `PARTPRIVILEGEINDEX` (`PART_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  KEY `PART_PRIVS_N49` (`PART_ID`),
-  CONSTRAINT `PART_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `ROLES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `ROLES` (
-  `ROLE_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `ROLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`ROLE_ID`),
-  UNIQUE KEY `ROLEENTITYINDEX` (`ROLE_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `ROLE_MAP`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `ROLE_MAP` (
-  `ROLE_GRANT_ID` bigint(20) NOT NULL,
-  `ADD_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `ROLE_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`ROLE_GRANT_ID`),
-  UNIQUE KEY `USERROLEMAPINDEX` (`PRINCIPAL_NAME`,`ROLE_ID`,`GRANTOR`,`GRANTOR_TYPE`),
-  KEY `ROLE_MAP_N49` (`ROLE_ID`),
-  CONSTRAINT `ROLE_MAP_FK1` FOREIGN KEY (`ROLE_ID`) REFERENCES `ROLES` (`ROLE_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SDS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SDS` (
-  `SD_ID` bigint(20) NOT NULL,
-  `CD_ID` bigint(20) DEFAULT NULL,
-  `INPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `IS_COMPRESSED` bit(1) NOT NULL,
-  `IS_STOREDASSUBDIRECTORIES` bit(1) NOT NULL,
-  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `NUM_BUCKETS` int(11) NOT NULL,
-  `OUTPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `SERDE_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`SD_ID`),
-  KEY `SDS_N49` (`SERDE_ID`),
-  KEY `SDS_N50` (`CD_ID`),
-  CONSTRAINT `SDS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
-  CONSTRAINT `SDS_FK2` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SD_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SD_PARAMS` (
-  `SD_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`SD_ID`,`PARAM_KEY`),
-  KEY `SD_PARAMS_N49` (`SD_ID`),
-  CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SEQUENCE_TABLE`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` (
-  `SEQUENCE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `NEXT_VAL` bigint(20) NOT NULL,
-  PRIMARY KEY (`SEQUENCE_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SERDES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SERDES` (
-  `SERDE_ID` bigint(20) NOT NULL,
-  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`SERDE_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SERDE_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` (
-  `SERDE_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`),
-  KEY `SERDE_PARAMS_N49` (`SERDE_ID`),
-  CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_COL_NAMES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` (
-  `SD_ID` bigint(20) NOT NULL,
-  `SKEWED_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
-  KEY `SKEWED_COL_NAMES_N49` (`SD_ID`),
-  CONSTRAINT `SKEWED_COL_NAMES_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_COL_VALUE_LOC_MAP`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` (
-  `SD_ID` bigint(20) NOT NULL,
-  `STRING_LIST_ID_KID` bigint(20) NOT NULL,
-  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`),
-  KEY `SKEWED_COL_VALUE_LOC_MAP_N49` (`STRING_LIST_ID_KID`),
-  KEY `SKEWED_COL_VALUE_LOC_MAP_N50` (`SD_ID`),
-  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK2` FOREIGN KEY (`STRING_LIST_ID_KID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
-  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_STRING_LIST`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` (
-  `STRING_LIST_ID` bigint(20) NOT NULL,
-  PRIMARY KEY (`STRING_LIST_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_STRING_LIST_VALUES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` (
-  `STRING_LIST_ID` bigint(20) NOT NULL,
-  `STRING_LIST_VALUE` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`),
-  KEY `SKEWED_STRING_LIST_VALUES_N49` (`STRING_LIST_ID`),
-  CONSTRAINT `SKEWED_STRING_LIST_VALUES_FK1` FOREIGN KEY (`STRING_LIST_ID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_VALUES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` (
-  `SD_ID_OID` bigint(20) NOT NULL,
-  `STRING_LIST_ID_EID` bigint(20) NOT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`),
-  KEY `SKEWED_VALUES_N50` (`SD_ID_OID`),
-  KEY `SKEWED_VALUES_N49` (`STRING_LIST_ID_EID`),
-  CONSTRAINT `SKEWED_VALUES_FK2` FOREIGN KEY (`STRING_LIST_ID_EID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
-  CONSTRAINT `SKEWED_VALUES_FK1` FOREIGN KEY (`SD_ID_OID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SORT_COLS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SORT_COLS` (
-  `SD_ID` bigint(20) NOT NULL,
-  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `ORDER` int(11) NOT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
-  KEY `SORT_COLS_N49` (`SD_ID`),
-  CONSTRAINT `SORT_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TABLE_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` (
-  `TBL_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`TBL_ID`,`PARAM_KEY`),
-  KEY `TABLE_PARAMS_N49` (`TBL_ID`),
-  CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TBLS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TBLS` (
-  `TBL_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `DB_ID` bigint(20) DEFAULT NULL,
-  `LAST_ACCESS_TIME` int(11) NOT NULL,
-  `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `RETENTION` int(11) NOT NULL,
-  `SD_ID` bigint(20) DEFAULT NULL,
-  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `VIEW_EXPANDED_TEXT` mediumtext,
-  `VIEW_ORIGINAL_TEXT` mediumtext,
-  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`TBL_ID`),
-  UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
-  KEY `TBLS_N50` (`SD_ID`),
-  KEY `TBLS_N49` (`DB_ID`),
-  KEY `TBLS_N51` (`LINK_TARGET_ID`),
-  CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
-  CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`),
-  CONSTRAINT `TBLS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TBL_COL_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` (
-  `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL,
-  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`TBL_COLUMN_GRANT_ID`),
-  KEY `TABLECOLUMNPRIVILEGEINDEX` (`TBL_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  KEY `TBL_COL_PRIVS_N49` (`TBL_ID`),
-  CONSTRAINT `TBL_COL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TBL_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TBL_PRIVS` (
-  `TBL_GRANT_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`TBL_GRANT_ID`),
-  KEY `TBL_PRIVS_N49` (`TBL_ID`),
-  KEY `TABLEPRIVILEGEINDEX` (`TBL_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  CONSTRAINT `TBL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TAB_COL_STATS`
---
-CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
- `CS_ID` bigint(20) NOT NULL,
- `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `TBL_ID` bigint(20) NOT NULL,
- `LONG_LOW_VALUE` bigint(20),
- `LONG_HIGH_VALUE` bigint(20),
- `DOUBLE_HIGH_VALUE` double(53,4),
- `DOUBLE_LOW_VALUE` double(53,4),
- `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
- `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
- `NUM_NULLS` bigint(20) NOT NULL,
- `NUM_DISTINCTS` bigint(20),
- `AVG_COL_LEN` double(53,4),
- `MAX_COL_LEN` bigint(20),
- `NUM_TRUES` bigint(20),
- `NUM_FALSES` bigint(20),
- `LAST_ANALYZED` bigint(20) NOT NULL,
-  PRIMARY KEY (`CS_ID`),
-  CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
---
--- Table structure for table `PART_COL_STATS`
---
-CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
- `CS_ID` bigint(20) NOT NULL,
- `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `PART_ID` bigint(20) NOT NULL,
- `LONG_LOW_VALUE` bigint(20),
- `LONG_HIGH_VALUE` bigint(20),
- `DOUBLE_HIGH_VALUE` double(53,4),
- `DOUBLE_LOW_VALUE` double(53,4),
- `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
- `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
- `NUM_NULLS` bigint(20) NOT NULL,
- `NUM_DISTINCTS` bigint(20),
- `AVG_COL_LEN` double(53,4),
- `MAX_COL_LEN` bigint(20),
- `NUM_TRUES` bigint(20),
- `NUM_FALSES` bigint(20),
- `LAST_ANALYZED` bigint(20) NOT NULL,
-  PRIMARY KEY (`CS_ID`),
-  CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
---
--- Table structure for table `TYPES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TYPES` (
-  `TYPES_ID` bigint(20) NOT NULL,
-  `TYPE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TYPE1` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TYPE2` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`TYPES_ID`),
-  UNIQUE KEY `UNIQUE_TYPE` (`TYPE_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TYPE_FIELDS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TYPE_FIELDS` (
-  `TYPE_NAME` bigint(20) NOT NULL,
-  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `FIELD_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `FIELD_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`TYPE_NAME`,`FIELD_NAME`),
-  KEY `TYPE_FIELDS_N49` (`TYPE_NAME`),
-  CONSTRAINT `TYPE_FIELDS_FK1` FOREIGN KEY (`TYPE_NAME`) REFERENCES `TYPES` (`TYPES_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
--- Table `MASTER_KEYS` for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
-CREATE TABLE IF NOT EXISTS `MASTER_KEYS` 
-(
-    `KEY_ID` INTEGER NOT NULL AUTO_INCREMENT,
-    `MASTER_KEY` VARCHAR(767) BINARY NULL,
-    PRIMARY KEY (`KEY_ID`)
-) ENGINE=INNODB DEFAULT CHARSET=latin1;
-
--- Table `DELEGATION_TOKENS` for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
-CREATE TABLE IF NOT EXISTS `DELEGATION_TOKENS`
-(
-    `TOKEN_IDENT` VARCHAR(767) BINARY NOT NULL,
-    `TOKEN` VARCHAR(767) BINARY NULL,
-    PRIMARY KEY (`TOKEN_IDENT`)
-) ENGINE=INNODB DEFAULT CHARSET=latin1;
-
---
--- Table structure for VERSION
---
-CREATE TABLE IF NOT EXISTS `VERSION` (
-  `VER_ID` BIGINT NOT NULL,
-  `SCHEMA_VERSION` VARCHAR(127) NOT NULL,
-  `VERSION_COMMENT` VARCHAR(255),
-  PRIMARY KEY (`VER_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.12.0', 'Hive release version 0.12.0');
-
-/*!40101 SET character_set_client = @saved_cs_client */;
-/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
-
-/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
-/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
-/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
-/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
-/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
-/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
-/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-
--- Dump completed on 2012-08-23  0:56:31

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/etc/hive-schema-0.12.0.oracle.sql
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/etc/hive-schema-0.12.0.oracle.sql b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/etc/hive-schema-0.12.0.oracle.sql
deleted file mode 100755
index 812b897..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/etc/hive-schema-0.12.0.oracle.sql
+++ /dev/null
@@ -1,718 +0,0 @@
--- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
--- NOTE: Some versions of SchemaTool do not automatically generate this table.
--- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
-CREATE TABLE SEQUENCE_TABLE
-(
-   SEQUENCE_NAME VARCHAR2(255) NOT NULL,
-   NEXT_VAL NUMBER NOT NULL
-);
-
-ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY (SEQUENCE_NAME);
-
--- Table NUCLEUS_TABLES is an internal table required by DataNucleus.
--- This table is required if datanucleus.autoStartMechanism=SchemaTable
--- NOTE: Some versions of SchemaTool do not automatically generate this table.
--- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
-CREATE TABLE NUCLEUS_TABLES
-(
-   CLASS_NAME VARCHAR2(128) NOT NULL,
-   TABLE_NAME VARCHAR2(128) NOT NULL,
-   TYPE VARCHAR2(4) NOT NULL,
-   OWNER VARCHAR2(2) NOT NULL,
-   VERSION VARCHAR2(20) NOT NULL,
-   INTERFACE_NAME VARCHAR2(255) NULL
-);
-
-ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY (CLASS_NAME);
-
--- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
-CREATE TABLE PART_COL_PRIVS
-(
-    PART_COLUMN_GRANT_ID NUMBER NOT NULL,
-    "COLUMN_NAME" VARCHAR2(128) NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PART_ID NUMBER NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    PART_COL_PRIV VARCHAR2(128) NULL
-);
-
-ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
-
--- Table CDS.
-CREATE TABLE CDS
-(
-    CD_ID NUMBER NOT NULL
-);
-
-ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
-
--- Table COLUMNS_V2 for join relationship
-CREATE TABLE COLUMNS_V2
-(
-    CD_ID NUMBER NOT NULL,
-    "COMMENT" VARCHAR2(256) NULL,
-    "COLUMN_NAME" VARCHAR2(128) NOT NULL,
-    TYPE_NAME VARCHAR2(4000) NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
-
--- Table PARTITION_KEY_VALS for join relationship
-CREATE TABLE PARTITION_KEY_VALS
-(
-    PART_ID NUMBER NOT NULL,
-    PART_KEY_VAL VARCHAR2(256) NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
-
--- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
-CREATE TABLE DBS
-(
-    DB_ID NUMBER NOT NULL,
-    "DESC" VARCHAR2(4000) NULL,
-    DB_LOCATION_URI VARCHAR2(4000) NOT NULL,
-    "NAME" VARCHAR2(128) NULL
-);
-
-ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
-
--- Table PARTITION_PARAMS for join relationship
-CREATE TABLE PARTITION_PARAMS
-(
-    PART_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
-
--- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
-CREATE TABLE SERDES
-(
-    SERDE_ID NUMBER NOT NULL,
-    "NAME" VARCHAR2(128) NULL,
-    SLIB VARCHAR2(4000) NULL
-);
-
-ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
-
--- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
-CREATE TABLE TYPES
-(
-    TYPES_ID NUMBER NOT NULL,
-    TYPE_NAME VARCHAR2(128) NULL,
-    TYPE1 VARCHAR2(767) NULL,
-    TYPE2 VARCHAR2(767) NULL
-);
-
-ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
-
--- Table PARTITION_KEYS for join relationship
-CREATE TABLE PARTITION_KEYS
-(
-    TBL_ID NUMBER NOT NULL,
-    PKEY_COMMENT VARCHAR2(4000) NULL,
-    PKEY_NAME VARCHAR2(128) NOT NULL,
-    PKEY_TYPE VARCHAR2(767) NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
-
--- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
-CREATE TABLE ROLES
-(
-    ROLE_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    OWNER_NAME VARCHAR2(128) NULL,
-    ROLE_NAME VARCHAR2(128) NULL
-);
-
-ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
-
--- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
-CREATE TABLE PARTITIONS
-(
-    PART_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
-    PART_NAME VARCHAR2(767) NULL,
-    SD_ID NUMBER NULL,
-    TBL_ID NUMBER NULL
-);
-
-ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
-
--- Table INDEX_PARAMS for join relationship
-CREATE TABLE INDEX_PARAMS
-(
-    INDEX_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
-
--- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
-CREATE TABLE TBL_COL_PRIVS
-(
-    TBL_COLUMN_GRANT_ID NUMBER NOT NULL,
-    "COLUMN_NAME" VARCHAR2(128) NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    TBL_COL_PRIV VARCHAR2(128) NULL,
-    TBL_ID NUMBER NULL
-);
-
-ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
-
--- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
-CREATE TABLE IDXS
-(
-    INDEX_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    DEFERRED_REBUILD NUMBER(1) NOT NULL CHECK (DEFERRED_REBUILD IN (1,0)),
-    INDEX_HANDLER_CLASS VARCHAR2(4000) NULL,
-    INDEX_NAME VARCHAR2(128) NULL,
-    INDEX_TBL_ID NUMBER NULL,
-    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
-    ORIG_TBL_ID NUMBER NULL,
-    SD_ID NUMBER NULL
-);
-
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
-
--- Table BUCKETING_COLS for join relationship
-CREATE TABLE BUCKETING_COLS
-(
-    SD_ID NUMBER NOT NULL,
-    BUCKET_COL_NAME VARCHAR2(256) NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
-
--- Table TYPE_FIELDS for join relationship
-CREATE TABLE TYPE_FIELDS
-(
-    TYPE_NAME NUMBER NOT NULL,
-    "COMMENT" VARCHAR2(256) NULL,
-    FIELD_NAME VARCHAR2(128) NOT NULL,
-    FIELD_TYPE VARCHAR2(767) NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
-
--- Table SD_PARAMS for join relationship
-CREATE TABLE SD_PARAMS
-(
-    SD_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
-
--- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
-CREATE TABLE GLOBAL_PRIVS
-(
-    USER_GRANT_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    USER_PRIV VARCHAR2(128) NULL
-);
-
-ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
-
--- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
-CREATE TABLE SDS
-(
-    SD_ID NUMBER NOT NULL,
-    CD_ID NUMBER NULL,
-    INPUT_FORMAT VARCHAR2(4000) NULL,
-    IS_COMPRESSED NUMBER(1) NOT NULL CHECK (IS_COMPRESSED IN (1,0)),
-    LOCATION VARCHAR2(4000) NULL,
-    NUM_BUCKETS NUMBER (10) NOT NULL,
-    OUTPUT_FORMAT VARCHAR2(4000) NULL,
-    SERDE_ID NUMBER NULL,
-    IS_STOREDASSUBDIRECTORIES NUMBER(1) NOT NULL CHECK (IS_STOREDASSUBDIRECTORIES IN (1,0))
-);
-
-ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
-
--- Table TABLE_PARAMS for join relationship
-CREATE TABLE TABLE_PARAMS
-(
-    TBL_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
-
--- Table SORT_COLS for join relationship
-CREATE TABLE SORT_COLS
-(
-    SD_ID NUMBER NOT NULL,
-    "COLUMN_NAME" VARCHAR2(128) NULL,
-    "ORDER" NUMBER (10) NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
-
--- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
-CREATE TABLE TBL_PRIVS
-(
-    TBL_GRANT_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    TBL_PRIV VARCHAR2(128) NULL,
-    TBL_ID NUMBER NULL
-);
-
-ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
-
--- Table DATABASE_PARAMS for join relationship
-CREATE TABLE DATABASE_PARAMS
-(
-    DB_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(180) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
-
--- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
-CREATE TABLE ROLE_MAP
-(
-    ROLE_GRANT_ID NUMBER NOT NULL,
-    ADD_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    ROLE_ID NUMBER NULL
-);
-
-ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
-
--- Table SERDE_PARAMS for join relationship
-CREATE TABLE SERDE_PARAMS
-(
-    SERDE_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
-
--- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
-CREATE TABLE PART_PRIVS
-(
-    PART_GRANT_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PART_ID NUMBER NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    PART_PRIV VARCHAR2(128) NULL
-);
-
-ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
-
--- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
-CREATE TABLE DB_PRIVS
-(
-    DB_GRANT_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    DB_ID NUMBER NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    DB_PRIV VARCHAR2(128) NULL
-);
-
-ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
-
--- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
-CREATE TABLE TBLS
-(
-    TBL_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    DB_ID NUMBER NULL,
-    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
-    OWNER VARCHAR2(767) NULL,
-    RETENTION NUMBER (10) NOT NULL,
-    SD_ID NUMBER NULL,
-    TBL_NAME VARCHAR2(128) NULL,
-    TBL_TYPE VARCHAR2(128) NULL,
-    VIEW_EXPANDED_TEXT CLOB NULL,
-    VIEW_ORIGINAL_TEXT CLOB NULL
-);
-
-ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
-
--- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
-CREATE TABLE PARTITION_EVENTS
-(
-    PART_NAME_ID NUMBER NOT NULL,
-    DB_NAME VARCHAR2(128) NULL,
-    EVENT_TIME NUMBER NOT NULL,
-    EVENT_TYPE NUMBER (10) NOT NULL,
-    PARTITION_NAME VARCHAR2(767) NULL,
-    TBL_NAME VARCHAR2(128) NULL
-);
-
-ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
-
--- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
-CREATE TABLE SKEWED_STRING_LIST
-(
-    STRING_LIST_ID NUMBER NOT NULL
-);
-
-ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
-
-CREATE TABLE SKEWED_STRING_LIST_VALUES
-(
-    STRING_LIST_ID NUMBER NOT NULL,
-    "STRING_LIST_VALUE" VARCHAR2(256) NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
-
-ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
-
-CREATE TABLE SKEWED_COL_NAMES
-(
-    SD_ID NUMBER NOT NULL,
-    "SKEWED_COL_NAME" VARCHAR2(256) NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
-
-ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
-(
-    SD_ID NUMBER NOT NULL,
-    STRING_LIST_ID_KID NUMBER NOT NULL,
-    "LOCATION" VARCHAR2(4000) NULL
-);
-
-CREATE TABLE MASTER_KEYS
-(
-    KEY_ID NUMBER (10) NOT NULL,
-    MASTER_KEY VARCHAR2(767) NULL
-);
-
-CREATE TABLE DELEGATION_TOKENS
-(
-    TOKEN_IDENT VARCHAR2(767) NOT NULL,
-    TOKEN VARCHAR2(767) NULL
-);
-
-ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
-
-ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE TABLE SKEWED_VALUES
-(
-    SD_ID_OID NUMBER NOT NULL,
-    STRING_LIST_ID_EID NUMBER NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
-
-ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
--- column statistics
-
-CREATE TABLE TAB_COL_STATS (
- CS_ID NUMBER NOT NULL,
- DB_NAME VARCHAR2(128) NOT NULL,
- TABLE_NAME VARCHAR2(128) NOT NULL, 
- COLUMN_NAME VARCHAR2(128) NOT NULL,
- COLUMN_TYPE VARCHAR2(128) NOT NULL,
- TBL_ID NUMBER NOT NULL,
- LONG_LOW_VALUE NUMBER,
- LONG_HIGH_VALUE NUMBER,
- DOUBLE_LOW_VALUE NUMBER,
- DOUBLE_HIGH_VALUE NUMBER,
- BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
- BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
- NUM_NULLS NUMBER NOT NULL,
- NUM_DISTINCTS NUMBER,
- AVG_COL_LEN NUMBER,
- MAX_COL_LEN NUMBER,
- NUM_TRUES NUMBER,
- NUM_FALSES NUMBER,
- LAST_ANALYZED NUMBER NOT NULL
-);
-
-CREATE TABLE VERSION (
-  VER_ID NUMBER NOT NULL,
-  SCHEMA_VERSION VARCHAR(127) NOT NULL,
-  VERSION_COMMENT VARCHAR(255)
-);
-ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
-
-ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID);
-
-ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID);
-
-CREATE TABLE PART_COL_STATS (
- CS_ID NUMBER NOT NULL,
- DB_NAME VARCHAR2(128) NOT NULL,
- TABLE_NAME VARCHAR2(128) NOT NULL,
- PARTITION_NAME VARCHAR2(767) NOT NULL,
- COLUMN_NAME VARCHAR2(128) NOT NULL,
- COLUMN_TYPE VARCHAR2(128) NOT NULL,
- PART_ID NUMBER NOT NULL,
- LONG_LOW_VALUE NUMBER,
- LONG_HIGH_VALUE NUMBER,
- DOUBLE_LOW_VALUE NUMBER,
- DOUBLE_HIGH_VALUE NUMBER,
- BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
- BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
- NUM_NULLS NUMBER NOT NULL,
- NUM_DISTINCTS NUMBER,
- AVG_COL_LEN NUMBER,
- MAX_COL_LEN NUMBER,
- NUM_TRUES NUMBER,
- NUM_FALSES NUMBER,
- LAST_ANALYZED NUMBER NOT NULL
-);
-
-ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
-
-ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED;
-
-CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
-
--- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
-ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
-
-CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table COLUMNS_V2
-ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
-
-
--- Constraints for table PARTITION_KEY_VALS
-ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
-
-
--- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
-CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME");
-
-
--- Constraints for table PARTITION_PARAMS
-ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
-
-
--- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
-
--- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
-CREATE UNIQUE INDEX UNIQUE_TYPE ON TYPES (TYPE_NAME);
-
-
--- Constraints for table PARTITION_KEYS
-ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
-
-
--- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
-CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
-
-
--- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
-ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
-
-CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
-
-CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
-
-
--- Constraints for table INDEX_PARAMS
-ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
-
-
--- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
-ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
-
-CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
-
-
--- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
-
-CREATE INDEX IDXS_N50 ON IDXS (INDEX_TBL_ID);
-
-CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
-
-CREATE INDEX IDXS_N49 ON IDXS (ORIG_TBL_ID);
-
-
--- Constraints for table BUCKETING_COLS
-ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
-
-
--- Constraints for table TYPE_FIELDS
-ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
-
-
--- Constraints for table SD_PARAMS
-ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
-
-
--- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
-CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
-ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
-ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
-CREATE INDEX SDS_N50 ON SDS (CD_ID);
-
-
--- Constraints for table TABLE_PARAMS
-ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
-
-
--- Constraints for table SORT_COLS
-ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
-
-
--- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
-ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
-
-CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table DATABASE_PARAMS
-ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
-
-
--- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
-ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
-
-CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table SERDE_PARAMS
-ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
-
-
--- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
-ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
-
-CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
-
-
--- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
-ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
-
-CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
-
-CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
-
-
--- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
-ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
-
-CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
-
-CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
-
-
--- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
-CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
-
-INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.12.0', 'Hive release version 0.12.0');
-


[16/52] bigtop git commit: ODPI-193. Add Hive 1.2 to ODPi ambari reference implementation

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/etc/hive-schema-0.12.0.mysql.sql
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/etc/hive-schema-0.12.0.mysql.sql b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/etc/hive-schema-0.12.0.mysql.sql
new file mode 100755
index 0000000..b0415b1
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/etc/hive-schema-0.12.0.mysql.sql
@@ -0,0 +1,777 @@
+-- MySQL dump 10.13  Distrib 5.5.25, for osx10.6 (i386)
+--
+-- Host: localhost    Database: test
+-- ------------------------------------------------------
+-- Server version	5.5.25
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+
+--
+-- Table structure for table `BUCKETING_COLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `BUCKETING_COLS_N49` (`SD_ID`),
+  CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `CDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `CDS` (
+  `CD_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `COLUMNS_V2`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
+  `CD_ID` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE_NAME` varchar(4000) DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
+  KEY `COLUMNS_V2_N49` (`CD_ID`),
+  CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DATABASE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_ID`,`PARAM_KEY`),
+  KEY `DATABASE_PARAMS_N49` (`DB_ID`),
+  CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DBS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DBS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_ID`),
+  UNIQUE KEY `UNIQUE_DATABASE` (`NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DB_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
+  `DB_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DB_ID` bigint(20) DEFAULT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DB_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_GRANT_ID`),
+  UNIQUE KEY `DBPRIVILEGEINDEX` (`DB_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DB_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `DB_PRIVS_N49` (`DB_ID`),
+  CONSTRAINT `DB_PRIVS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `GLOBAL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` (
+  `USER_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `USER_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`USER_GRANT_ID`),
+  UNIQUE KEY `GLOBALPRIVILEGEINDEX` (`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`USER_PRIV`,`GRANTOR`,`GRANTOR_TYPE`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `IDXS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `IDXS` (
+  `INDEX_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DEFERRED_REBUILD` bit(1) NOT NULL,
+  `INDEX_HANDLER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INDEX_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INDEX_TBL_ID` bigint(20) DEFAULT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `ORIG_TBL_ID` bigint(20) DEFAULT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`INDEX_ID`),
+  UNIQUE KEY `UNIQUEINDEX` (`INDEX_NAME`,`ORIG_TBL_ID`),
+  KEY `IDXS_N51` (`SD_ID`),
+  KEY `IDXS_N50` (`INDEX_TBL_ID`),
+  KEY `IDXS_N49` (`ORIG_TBL_ID`),
+  CONSTRAINT `IDXS_FK1` FOREIGN KEY (`ORIG_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+  CONSTRAINT `IDXS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `IDXS_FK3` FOREIGN KEY (`INDEX_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `INDEX_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `INDEX_PARAMS` (
+  `INDEX_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`),
+  KEY `INDEX_PARAMS_N49` (`INDEX_ID`),
+  CONSTRAINT `INDEX_PARAMS_FK1` FOREIGN KEY (`INDEX_ID`) REFERENCES `IDXS` (`INDEX_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `NUCLEUS_TABLES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `NUCLEUS_TABLES` (
+  `CLASS_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE` varchar(4) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `OWNER` varchar(2) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `VERSION` varchar(20) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTERFACE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`CLASS_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITIONS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITIONS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`PART_ID`),
+  UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
+  KEY `PARTITIONS_N49` (`TBL_ID`),
+  KEY `PARTITIONS_N50` (`SD_ID`),
+  KEY `PARTITIONS_N51` (`LINK_TARGET_ID`),
+  CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+  CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `PARTITIONS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_EVENTS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` (
+  `PART_NAME_ID` bigint(20) NOT NULL,
+  `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `EVENT_TIME` bigint(20) NOT NULL,
+  `EVENT_TYPE` int(11) NOT NULL,
+  `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_NAME_ID`),
+  KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEYS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `PKEY_COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PKEY_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PKEY_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`TBL_ID`,`PKEY_NAME`),
+  KEY `PARTITION_KEYS_N49` (`TBL_ID`),
+  CONSTRAINT `PARTITION_KEYS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEY_VALS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `PART_KEY_VAL` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`PART_ID`,`INTEGER_IDX`),
+  KEY `PARTITION_KEY_VALS_N49` (`PART_ID`),
+  CONSTRAINT `PARTITION_KEY_VALS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_ID`,`PARAM_KEY`),
+  KEY `PARTITION_PARAMS_N49` (`PART_ID`),
+  CONSTRAINT `PARTITION_PARAMS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_COL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` (
+  `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_ID` bigint(20) DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_COLUMN_GRANT_ID`),
+  KEY `PART_COL_PRIVS_N49` (`PART_ID`),
+  KEY `PARTITIONCOLUMNPRIVILEGEINDEX` (`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  CONSTRAINT `PART_COL_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PART_PRIVS` (
+  `PART_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_ID` bigint(20) DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_GRANT_ID`),
+  KEY `PARTPRIVILEGEINDEX` (`PART_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `PART_PRIVS_N49` (`PART_ID`),
+  CONSTRAINT `PART_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `ROLES` (
+  `ROLE_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ROLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`ROLE_ID`),
+  UNIQUE KEY `ROLEENTITYINDEX` (`ROLE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLE_MAP`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `ROLE_MAP` (
+  `ROLE_GRANT_ID` bigint(20) NOT NULL,
+  `ADD_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ROLE_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`ROLE_GRANT_ID`),
+  UNIQUE KEY `USERROLEMAPINDEX` (`PRINCIPAL_NAME`,`ROLE_ID`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `ROLE_MAP_N49` (`ROLE_ID`),
+  CONSTRAINT `ROLE_MAP_FK1` FOREIGN KEY (`ROLE_ID`) REFERENCES `ROLES` (`ROLE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SDS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `CD_ID` bigint(20) DEFAULT NULL,
+  `INPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `IS_COMPRESSED` bit(1) NOT NULL,
+  `IS_STOREDASSUBDIRECTORIES` bit(1) NOT NULL,
+  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `NUM_BUCKETS` int(11) NOT NULL,
+  `OUTPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SERDE_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`),
+  KEY `SDS_N49` (`SERDE_ID`),
+  KEY `SDS_N50` (`CD_ID`),
+  CONSTRAINT `SDS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
+  CONSTRAINT `SDS_FK2` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SD_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SD_PARAMS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`,`PARAM_KEY`),
+  KEY `SD_PARAMS_N49` (`SD_ID`),
+  CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SEQUENCE_TABLE`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` (
+  `SEQUENCE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `NEXT_VAL` bigint(20) NOT NULL,
+  PRIMARY KEY (`SEQUENCE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SERDES` (
+  `SERDE_ID` bigint(20) NOT NULL,
+  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` (
+  `SERDE_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`),
+  KEY `SERDE_PARAMS_N49` (`SERDE_ID`),
+  CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_COL_NAMES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` (
+  `SD_ID` bigint(20) NOT NULL,
+  `SKEWED_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `SKEWED_COL_NAMES_N49` (`SD_ID`),
+  CONSTRAINT `SKEWED_COL_NAMES_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_COL_VALUE_LOC_MAP`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` (
+  `SD_ID` bigint(20) NOT NULL,
+  `STRING_LIST_ID_KID` bigint(20) NOT NULL,
+  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`),
+  KEY `SKEWED_COL_VALUE_LOC_MAP_N49` (`STRING_LIST_ID_KID`),
+  KEY `SKEWED_COL_VALUE_LOC_MAP_N50` (`SD_ID`),
+  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK2` FOREIGN KEY (`STRING_LIST_ID_KID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_STRING_LIST`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` (
+  `STRING_LIST_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`STRING_LIST_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_STRING_LIST_VALUES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` (
+  `STRING_LIST_ID` bigint(20) NOT NULL,
+  `STRING_LIST_VALUE` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`),
+  KEY `SKEWED_STRING_LIST_VALUES_N49` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_STRING_LIST_VALUES_FK1` FOREIGN KEY (`STRING_LIST_ID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_VALUES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` (
+  `SD_ID_OID` bigint(20) NOT NULL,
+  `STRING_LIST_ID_EID` bigint(20) NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`),
+  KEY `SKEWED_VALUES_N50` (`SD_ID_OID`),
+  KEY `SKEWED_VALUES_N49` (`STRING_LIST_ID_EID`),
+  CONSTRAINT `SKEWED_VALUES_FK2` FOREIGN KEY (`STRING_LIST_ID_EID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_VALUES_FK1` FOREIGN KEY (`SD_ID_OID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SORT_COLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SORT_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ORDER` int(11) NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `SORT_COLS_N49` (`SD_ID`),
+  CONSTRAINT `SORT_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TABLE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`TBL_ID`,`PARAM_KEY`),
+  KEY `TABLE_PARAMS_N49` (`TBL_ID`),
+  CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBLS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DB_ID` bigint(20) DEFAULT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `RETENTION` int(11) NOT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `VIEW_EXPANDED_TEXT` mediumtext,
+  `VIEW_ORIGINAL_TEXT` mediumtext,
+  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_ID`),
+  UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
+  KEY `TBLS_N50` (`SD_ID`),
+  KEY `TBLS_N49` (`DB_ID`),
+  KEY `TBLS_N51` (`LINK_TARGET_ID`),
+  CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`),
+  CONSTRAINT `TBLS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_COL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` (
+  `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_COLUMN_GRANT_ID`),
+  KEY `TABLECOLUMNPRIVILEGEINDEX` (`TBL_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `TBL_COL_PRIVS_N49` (`TBL_ID`),
+  CONSTRAINT `TBL_COL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBL_PRIVS` (
+  `TBL_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_GRANT_ID`),
+  KEY `TBL_PRIVS_N49` (`TBL_ID`),
+  KEY `TABLEPRIVILEGEINDEX` (`TBL_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  CONSTRAINT `TBL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TAB_COL_STATS`
+--
+CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
+ `CS_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TBL_ID` bigint(20) NOT NULL,
+ `LONG_LOW_VALUE` bigint(20),
+ `LONG_HIGH_VALUE` bigint(20),
+ `DOUBLE_HIGH_VALUE` double(53,4),
+ `DOUBLE_LOW_VALUE` double(53,4),
+ `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `NUM_NULLS` bigint(20) NOT NULL,
+ `NUM_DISTINCTS` bigint(20),
+ `AVG_COL_LEN` double(53,4),
+ `MAX_COL_LEN` bigint(20),
+ `NUM_TRUES` bigint(20),
+ `NUM_FALSES` bigint(20),
+ `LAST_ANALYZED` bigint(20) NOT NULL,
+  PRIMARY KEY (`CS_ID`),
+  CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table `PART_COL_STATS`
+--
+CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
+ `CS_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PART_ID` bigint(20) NOT NULL,
+ `LONG_LOW_VALUE` bigint(20),
+ `LONG_HIGH_VALUE` bigint(20),
+ `DOUBLE_HIGH_VALUE` double(53,4),
+ `DOUBLE_LOW_VALUE` double(53,4),
+ `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `NUM_NULLS` bigint(20) NOT NULL,
+ `NUM_DISTINCTS` bigint(20),
+ `AVG_COL_LEN` double(53,4),
+ `MAX_COL_LEN` bigint(20),
+ `NUM_TRUES` bigint(20),
+ `NUM_FALSES` bigint(20),
+ `LAST_ANALYZED` bigint(20) NOT NULL,
+  PRIMARY KEY (`CS_ID`),
+  CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table `TYPES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TYPES` (
+  `TYPES_ID` bigint(20) NOT NULL,
+  `TYPE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TYPE1` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TYPE2` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`TYPES_ID`),
+  UNIQUE KEY `UNIQUE_TYPE` (`TYPE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TYPE_FIELDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TYPE_FIELDS` (
+  `TYPE_NAME` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `FIELD_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `FIELD_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`TYPE_NAME`,`FIELD_NAME`),
+  KEY `TYPE_FIELDS_N49` (`TYPE_NAME`),
+  CONSTRAINT `TYPE_FIELDS_FK1` FOREIGN KEY (`TYPE_NAME`) REFERENCES `TYPES` (`TYPES_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+-- Table `MASTER_KEYS` for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
+CREATE TABLE IF NOT EXISTS `MASTER_KEYS` 
+(
+    `KEY_ID` INTEGER NOT NULL AUTO_INCREMENT,
+    `MASTER_KEY` VARCHAR(767) BINARY NULL,
+    PRIMARY KEY (`KEY_ID`)
+) ENGINE=INNODB DEFAULT CHARSET=latin1;
+
+-- Table `DELEGATION_TOKENS` for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
+CREATE TABLE IF NOT EXISTS `DELEGATION_TOKENS`
+(
+    `TOKEN_IDENT` VARCHAR(767) BINARY NOT NULL,
+    `TOKEN` VARCHAR(767) BINARY NULL,
+    PRIMARY KEY (`TOKEN_IDENT`)
+) ENGINE=INNODB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for VERSION
+--
+CREATE TABLE IF NOT EXISTS `VERSION` (
+  `VER_ID` BIGINT NOT NULL,
+  `SCHEMA_VERSION` VARCHAR(127) NOT NULL,
+  `VERSION_COMMENT` VARCHAR(255),
+  PRIMARY KEY (`VER_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.12.0', 'Hive release version 0.12.0');
+
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2012-08-23  0:56:31

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/etc/hive-schema-0.12.0.oracle.sql
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/etc/hive-schema-0.12.0.oracle.sql b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/etc/hive-schema-0.12.0.oracle.sql
new file mode 100755
index 0000000..812b897
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/etc/hive-schema-0.12.0.oracle.sql
@@ -0,0 +1,718 @@
+-- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE SEQUENCE_TABLE
+(
+   SEQUENCE_NAME VARCHAR2(255) NOT NULL,
+   NEXT_VAL NUMBER NOT NULL
+);
+
+ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY (SEQUENCE_NAME);
+
+-- Table NUCLEUS_TABLES is an internal table required by DataNucleus.
+-- This table is required if datanucleus.autoStartMechanism=SchemaTable
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE NUCLEUS_TABLES
+(
+   CLASS_NAME VARCHAR2(128) NOT NULL,
+   TABLE_NAME VARCHAR2(128) NOT NULL,
+   TYPE VARCHAR2(4) NOT NULL,
+   OWNER VARCHAR2(2) NOT NULL,
+   VERSION VARCHAR2(20) NOT NULL,
+   INTERFACE_NAME VARCHAR2(255) NULL
+);
+
+ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY (CLASS_NAME);
+
+-- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+CREATE TABLE PART_COL_PRIVS
+(
+    PART_COLUMN_GRANT_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PART_ID NUMBER NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    PART_COL_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
+
+-- Table CDS.
+CREATE TABLE CDS
+(
+    CD_ID NUMBER NOT NULL
+);
+
+ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
+
+-- Table COLUMNS_V2 for join relationship
+CREATE TABLE COLUMNS_V2
+(
+    CD_ID NUMBER NOT NULL,
+    "COMMENT" VARCHAR2(256) NULL,
+    "COLUMN_NAME" VARCHAR2(128) NOT NULL,
+    TYPE_NAME VARCHAR2(4000) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
+
+-- Table PARTITION_KEY_VALS for join relationship
+CREATE TABLE PARTITION_KEY_VALS
+(
+    PART_ID NUMBER NOT NULL,
+    PART_KEY_VAL VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
+
+-- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE TABLE DBS
+(
+    DB_ID NUMBER NOT NULL,
+    "DESC" VARCHAR2(4000) NULL,
+    DB_LOCATION_URI VARCHAR2(4000) NOT NULL,
+    "NAME" VARCHAR2(128) NULL
+);
+
+ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
+
+-- Table PARTITION_PARAMS for join relationship
+CREATE TABLE PARTITION_PARAMS
+(
+    PART_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
+
+-- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+CREATE TABLE SERDES
+(
+    SERDE_ID NUMBER NOT NULL,
+    "NAME" VARCHAR2(128) NULL,
+    SLIB VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
+
+-- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
+CREATE TABLE TYPES
+(
+    TYPES_ID NUMBER NOT NULL,
+    TYPE_NAME VARCHAR2(128) NULL,
+    TYPE1 VARCHAR2(767) NULL,
+    TYPE2 VARCHAR2(767) NULL
+);
+
+ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
+
+-- Table PARTITION_KEYS for join relationship
+CREATE TABLE PARTITION_KEYS
+(
+    TBL_ID NUMBER NOT NULL,
+    PKEY_COMMENT VARCHAR2(4000) NULL,
+    PKEY_NAME VARCHAR2(128) NOT NULL,
+    PKEY_TYPE VARCHAR2(767) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
+
+-- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE TABLE ROLES
+(
+    ROLE_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    OWNER_NAME VARCHAR2(128) NULL,
+    ROLE_NAME VARCHAR2(128) NULL
+);
+
+ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
+
+-- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
+CREATE TABLE PARTITIONS
+(
+    PART_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    PART_NAME VARCHAR2(767) NULL,
+    SD_ID NUMBER NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
+
+-- Table INDEX_PARAMS for join relationship
+CREATE TABLE INDEX_PARAMS
+(
+    INDEX_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
+
+-- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+CREATE TABLE TBL_COL_PRIVS
+(
+    TBL_COLUMN_GRANT_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    TBL_COL_PRIV VARCHAR2(128) NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
+
+-- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
+CREATE TABLE IDXS
+(
+    INDEX_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DEFERRED_REBUILD NUMBER(1) NOT NULL CHECK (DEFERRED_REBUILD IN (1,0)),
+    INDEX_HANDLER_CLASS VARCHAR2(4000) NULL,
+    INDEX_NAME VARCHAR2(128) NULL,
+    INDEX_TBL_ID NUMBER NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    ORIG_TBL_ID NUMBER NULL,
+    SD_ID NUMBER NULL
+);
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
+
+-- Table BUCKETING_COLS for join relationship
+CREATE TABLE BUCKETING_COLS
+(
+    SD_ID NUMBER NOT NULL,
+    BUCKET_COL_NAME VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table TYPE_FIELDS for join relationship
+CREATE TABLE TYPE_FIELDS
+(
+    TYPE_NAME NUMBER NOT NULL,
+    "COMMENT" VARCHAR2(256) NULL,
+    FIELD_NAME VARCHAR2(128) NOT NULL,
+    FIELD_TYPE VARCHAR2(767) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
+
+-- Table SD_PARAMS for join relationship
+CREATE TABLE SD_PARAMS
+(
+    SD_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
+
+-- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE TABLE GLOBAL_PRIVS
+(
+    USER_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    USER_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
+
+-- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+CREATE TABLE SDS
+(
+    SD_ID NUMBER NOT NULL,
+    CD_ID NUMBER NULL,
+    INPUT_FORMAT VARCHAR2(4000) NULL,
+    IS_COMPRESSED NUMBER(1) NOT NULL CHECK (IS_COMPRESSED IN (1,0)),
+    LOCATION VARCHAR2(4000) NULL,
+    NUM_BUCKETS NUMBER (10) NOT NULL,
+    OUTPUT_FORMAT VARCHAR2(4000) NULL,
+    SERDE_ID NUMBER NULL,
+    IS_STOREDASSUBDIRECTORIES NUMBER(1) NOT NULL CHECK (IS_STOREDASSUBDIRECTORIES IN (1,0))
+);
+
+ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
+
+-- Table TABLE_PARAMS for join relationship
+CREATE TABLE TABLE_PARAMS
+(
+    TBL_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
+
+-- Table SORT_COLS for join relationship
+CREATE TABLE SORT_COLS
+(
+    SD_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    "ORDER" NUMBER (10) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+CREATE TABLE TBL_PRIVS
+(
+    TBL_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    TBL_PRIV VARCHAR2(128) NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
+
+-- Table DATABASE_PARAMS for join relationship
+CREATE TABLE DATABASE_PARAMS
+(
+    DB_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(180) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
+
+-- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
+CREATE TABLE ROLE_MAP
+(
+    ROLE_GRANT_ID NUMBER NOT NULL,
+    ADD_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    ROLE_ID NUMBER NULL
+);
+
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
+
+-- Table SERDE_PARAMS for join relationship
+CREATE TABLE SERDE_PARAMS
+(
+    SERDE_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
+
+-- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+CREATE TABLE PART_PRIVS
+(
+    PART_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PART_ID NUMBER NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    PART_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
+
+-- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+CREATE TABLE DB_PRIVS
+(
+    DB_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DB_ID NUMBER NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    DB_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
+
+-- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
+CREATE TABLE TBLS
+(
+    TBL_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DB_ID NUMBER NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    OWNER VARCHAR2(767) NULL,
+    RETENTION NUMBER (10) NOT NULL,
+    SD_ID NUMBER NULL,
+    TBL_NAME VARCHAR2(128) NULL,
+    TBL_TYPE VARCHAR2(128) NULL,
+    VIEW_EXPANDED_TEXT CLOB NULL,
+    VIEW_ORIGINAL_TEXT CLOB NULL
+);
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
+
+-- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE TABLE PARTITION_EVENTS
+(
+    PART_NAME_ID NUMBER NOT NULL,
+    DB_NAME VARCHAR2(128) NULL,
+    EVENT_TIME NUMBER NOT NULL,
+    EVENT_TYPE NUMBER (10) NOT NULL,
+    PARTITION_NAME VARCHAR2(767) NULL,
+    TBL_NAME VARCHAR2(128) NULL
+);
+
+ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
+
+-- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
+CREATE TABLE SKEWED_STRING_LIST
+(
+    STRING_LIST_ID NUMBER NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
+
+CREATE TABLE SKEWED_STRING_LIST_VALUES
+(
+    STRING_LIST_ID NUMBER NOT NULL,
+    "STRING_LIST_VALUE" VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_COL_NAMES
+(
+    SD_ID NUMBER NOT NULL,
+    "SKEWED_COL_NAME" VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
+(
+    SD_ID NUMBER NOT NULL,
+    STRING_LIST_ID_KID NUMBER NOT NULL,
+    "LOCATION" VARCHAR2(4000) NULL
+);
+
+CREATE TABLE MASTER_KEYS
+(
+    KEY_ID NUMBER (10) NOT NULL,
+    MASTER_KEY VARCHAR2(767) NULL
+);
+
+CREATE TABLE DELEGATION_TOKENS
+(
+    TOKEN_IDENT VARCHAR2(767) NOT NULL,
+    TOKEN VARCHAR2(767) NULL
+);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_VALUES
+(
+    SD_ID_OID NUMBER NOT NULL,
+    STRING_LIST_ID_EID NUMBER NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+-- column statistics
+
+CREATE TABLE TAB_COL_STATS (
+ CS_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(128) NOT NULL, 
+ COLUMN_NAME VARCHAR2(128) NOT NULL,
+ COLUMN_TYPE VARCHAR2(128) NOT NULL,
+ TBL_ID NUMBER NOT NULL,
+ LONG_LOW_VALUE NUMBER,
+ LONG_HIGH_VALUE NUMBER,
+ DOUBLE_LOW_VALUE NUMBER,
+ DOUBLE_HIGH_VALUE NUMBER,
+ BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+ BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+ NUM_NULLS NUMBER NOT NULL,
+ NUM_DISTINCTS NUMBER,
+ AVG_COL_LEN NUMBER,
+ MAX_COL_LEN NUMBER,
+ NUM_TRUES NUMBER,
+ NUM_FALSES NUMBER,
+ LAST_ANALYZED NUMBER NOT NULL
+);
+
+CREATE TABLE VERSION (
+  VER_ID NUMBER NOT NULL,
+  SCHEMA_VERSION VARCHAR(127) NOT NULL,
+  VERSION_COMMENT VARCHAR(255)
+);
+ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID);
+
+CREATE TABLE PART_COL_STATS (
+ CS_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(128) NOT NULL,
+ PARTITION_NAME VARCHAR2(767) NOT NULL,
+ COLUMN_NAME VARCHAR2(128) NOT NULL,
+ COLUMN_TYPE VARCHAR2(128) NOT NULL,
+ PART_ID NUMBER NOT NULL,
+ LONG_LOW_VALUE NUMBER,
+ LONG_HIGH_VALUE NUMBER,
+ DOUBLE_LOW_VALUE NUMBER,
+ DOUBLE_HIGH_VALUE NUMBER,
+ BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+ BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+ NUM_NULLS NUMBER NOT NULL,
+ NUM_DISTINCTS NUMBER,
+ AVG_COL_LEN NUMBER,
+ MAX_COL_LEN NUMBER,
+ NUM_TRUES NUMBER,
+ NUM_FALSES NUMBER,
+ LAST_ANALYZED NUMBER NOT NULL
+);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED;
+
+CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
+
+-- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
+
+CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table COLUMNS_V2
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
+
+
+-- Constraints for table PARTITION_KEY_VALS
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
+
+
+-- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME");
+
+
+-- Constraints for table PARTITION_PARAMS
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
+
+
+-- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+
+-- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
+CREATE UNIQUE INDEX UNIQUE_TYPE ON TYPES (TYPE_NAME);
+
+
+-- Constraints for table PARTITION_KEYS
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
+
+
+-- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
+
+
+-- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
+
+CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
+
+CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
+
+
+-- Constraints for table INDEX_PARAMS
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
+
+
+-- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
+
+
+-- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
+
+CREATE INDEX IDXS_N50 ON IDXS (INDEX_TBL_ID);
+
+CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
+
+CREATE INDEX IDXS_N49 ON IDXS (ORIG_TBL_ID);
+
+
+-- Constraints for table BUCKETING_COLS
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
+
+
+-- Constraints for table TYPE_FIELDS
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
+
+
+-- Constraints for table SD_PARAMS
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
+
+
+-- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
+CREATE INDEX SDS_N50 ON SDS (CD_ID);
+
+
+-- Constraints for table TABLE_PARAMS
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
+
+
+-- Constraints for table SORT_COLS
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
+
+
+-- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
+
+CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table DATABASE_PARAMS
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
+
+
+-- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
+
+CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table SERDE_PARAMS
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
+
+
+-- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
+
+
+-- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
+
+
+-- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
+
+CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
+
+CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
+
+
+-- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
+
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.12.0', 'Hive release version 0.12.0');
+


[33/52] bigtop git commit: BIGTOP-1406. package Ambari in Bigtop

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/stack_advisor.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/stack_advisor.py
deleted file mode 100755
index 568e46e..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/stack_advisor.py
+++ /dev/null
@@ -1,1947 +0,0 @@
-#!/usr/bin/env ambari-python-wrap
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import re
-import os
-import sys
-import socket
-
-from math import ceil, floor
-
-from resource_management.core.logger import Logger
-from resource_management.libraries.functions.mounted_dirs_helper import get_mounts_with_multiple_data_dirs
-
-from stack_advisor import DefaultStackAdvisor
-
-
-class ODPi10StackAdvisor(DefaultStackAdvisor):
-
-  def __init__(self):
-    super(ODPi10StackAdvisor, self).__init__()
-    Logger.initialize_logger()
-
-  def getComponentLayoutValidations(self, services, hosts):
-    """Returns array of Validation objects about issues with hostnames components assigned to"""
-    items = super(ODPi10StackAdvisor, self).getComponentLayoutValidations(services, hosts)
-
-    # Validating NAMENODE and SECONDARY_NAMENODE are on different hosts if possible
-    # Use a set for fast lookup
-    hostsSet =  set(super(ODPi10StackAdvisor, self).getActiveHosts([host["Hosts"] for host in hosts["items"]]))  #[host["Hosts"]["host_name"] for host in hosts["items"]]
-    hostsCount = len(hostsSet)
-
-    componentsListList = [service["components"] for service in services["services"]]
-    componentsList = [item for sublist in componentsListList for item in sublist]
-    nameNodeHosts = [component["StackServiceComponents"]["hostnames"] for component in componentsList if component["StackServiceComponents"]["component_name"] == "NAMENODE"]
-    secondaryNameNodeHosts = [component["StackServiceComponents"]["hostnames"] for component in componentsList if component["StackServiceComponents"]["component_name"] == "SECONDARY_NAMENODE"]
-
-    # Validating cardinality
-    for component in componentsList:
-      if component["StackServiceComponents"]["cardinality"] is not None:
-         componentName = component["StackServiceComponents"]["component_name"]
-         componentDisplayName = component["StackServiceComponents"]["display_name"]
-         componentHosts = []
-         if component["StackServiceComponents"]["hostnames"] is not None:
-           componentHosts = [componentHost for componentHost in component["StackServiceComponents"]["hostnames"] if componentHost in hostsSet]
-         componentHostsCount = len(componentHosts)
-         cardinality = str(component["StackServiceComponents"]["cardinality"])
-         # cardinality types: null, 1+, 1-2, 1, ALL
-         message = None
-         if "+" in cardinality:
-           hostsMin = int(cardinality[:-1])
-           if componentHostsCount < hostsMin:
-             message = "At least {0} {1} components should be installed in cluster.".format(hostsMin, componentDisplayName)
-         elif "-" in cardinality:
-           nums = cardinality.split("-")
-           hostsMin = int(nums[0])
-           hostsMax = int(nums[1])
-           if componentHostsCount > hostsMax or componentHostsCount < hostsMin:
-             message = "Between {0} and {1} {2} components should be installed in cluster.".format(hostsMin, hostsMax, componentDisplayName)
-         elif "ALL" == cardinality:
-           if componentHostsCount != hostsCount:
-             message = "{0} component should be installed on all hosts in cluster.".format(componentDisplayName)
-         else:
-           if componentHostsCount != int(cardinality):
-             message = "Exactly {0} {1} components should be installed in cluster.".format(int(cardinality), componentDisplayName)
-
-         if message is not None:
-           items.append({"type": 'host-component', "level": 'ERROR', "message": message, "component-name": componentName})
-
-    # Validating host-usage
-    usedHostsListList = [component["StackServiceComponents"]["hostnames"] for component in componentsList if not self.isComponentNotValuable(component)]
-    usedHostsList = [item for sublist in usedHostsListList for item in sublist]
-    nonUsedHostsList = [item for item in hostsSet if item not in usedHostsList]
-    for host in nonUsedHostsList:
-      items.append( { "type": 'host-component', "level": 'ERROR', "message": 'Host is not used', "host": str(host) } )
-
-    return items
-
-  def getServiceConfigurationRecommenderDict(self):
-    return {
-      "YARN": self.recommendYARNConfigurations,
-      "MAPREDUCE2": self.recommendMapReduce2Configurations,
-      "HDFS": self.recommendHDFSConfigurations,
-      "HBASE": self.recommendHbaseConfigurations,
-      "STORM": self.recommendStormConfigurations,
-      "AMBARI_METRICS": self.recommendAmsConfigurations,
-      "RANGER": self.recommendRangerConfigurations
-    }
-
-  def recommendYARNConfigurations(self, configurations, clusterData, services, hosts):
-    putYarnProperty = self.putProperty(configurations, "yarn-site", services)
-    putYarnPropertyAttribute = self.putPropertyAttribute(configurations, "yarn-site")
-    putYarnEnvProperty = self.putProperty(configurations, "yarn-env", services)
-    nodemanagerMinRam = 1048576 # 1TB in mb
-    if "referenceNodeManagerHost" in clusterData:
-      nodemanagerMinRam = min(clusterData["referenceNodeManagerHost"]["total_mem"]/1024, nodemanagerMinRam)
-    putYarnProperty('yarn.nodemanager.resource.memory-mb', int(round(min(clusterData['containers'] * clusterData['ramPerContainer'], nodemanagerMinRam))))
-    putYarnProperty('yarn.scheduler.minimum-allocation-mb', int(clusterData['ramPerContainer']))
-    putYarnProperty('yarn.scheduler.maximum-allocation-mb', int(configurations["yarn-site"]["properties"]["yarn.nodemanager.resource.memory-mb"]))
-    putYarnEnvProperty('min_user_id', self.get_system_min_uid())
-
-    sc_queue_name = self.recommendYarnQueue(services, "yarn-env", "service_check.queue.name")
-    if sc_queue_name is not None:
-      putYarnEnvProperty("service_check.queue.name", sc_queue_name)
-
-    containerExecutorGroup = 'hadoop'
-    if 'cluster-env' in services['configurations'] and 'user_group' in services['configurations']['cluster-env']['properties']:
-      containerExecutorGroup = services['configurations']['cluster-env']['properties']['user_group']
-    putYarnProperty("yarn.nodemanager.linux-container-executor.group", containerExecutorGroup)
-
-    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
-    if "TEZ" in servicesList:
-        ambari_user = self.getAmbariUser(services)
-        ambariHostName = socket.getfqdn()
-        putYarnProperty("yarn.timeline-service.http-authentication.proxyuser.{0}.hosts".format(ambari_user), ambariHostName)
-        putYarnProperty("yarn.timeline-service.http-authentication.proxyuser.{0}.groups".format(ambari_user), "*")
-        old_ambari_user = self.getOldAmbariUser(services)
-        if old_ambari_user is not None:
-            putYarnPropertyAttribute("yarn.timeline-service.http-authentication.proxyuser.{0}.hosts".format(old_ambari_user), 'delete', 'true')
-            putYarnPropertyAttribute("yarn.timeline-service.http-authentication.proxyuser.{0}.groups".format(old_ambari_user), 'delete', 'true')
-
-
-  def recommendMapReduce2Configurations(self, configurations, clusterData, services, hosts):
-    putMapredProperty = self.putProperty(configurations, "mapred-site", services)
-    putMapredProperty('yarn.app.mapreduce.am.resource.mb', int(clusterData['amMemory']))
-    putMapredProperty('yarn.app.mapreduce.am.command-opts', "-Xmx" + str(int(round(0.8 * clusterData['amMemory']))) + "m")
-    putMapredProperty('mapreduce.map.memory.mb', clusterData['mapMemory'])
-    putMapredProperty('mapreduce.reduce.memory.mb', int(clusterData['reduceMemory']))
-    putMapredProperty('mapreduce.map.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['mapMemory']))) + "m")
-    putMapredProperty('mapreduce.reduce.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['reduceMemory']))) + "m")
-    putMapredProperty('mapreduce.task.io.sort.mb', min(int(round(0.4 * clusterData['mapMemory'])), 1024))
-    mr_queue = self.recommendYarnQueue(services, "mapred-site", "mapreduce.job.queuename")
-    if mr_queue is not None:
-      putMapredProperty("mapreduce.job.queuename", mr_queue)
-
-  def getAmbariUser(self, services):
-    ambari_user = services['ambari-server-properties']['ambari-server.user']
-    if "cluster-env" in services["configurations"] \
-          and "ambari_principal_name" in services["configurations"]["cluster-env"]["properties"] \
-                and "security_enabled" in services["configurations"]["cluster-env"]["properties"] \
-                    and services["configurations"]["cluster-env"]["properties"]["security_enabled"].lower() == "true":
-      ambari_user = services["configurations"]["cluster-env"]["properties"]["ambari_principal_name"]
-      ambari_user = ambari_user.split('@')[0]
-    return ambari_user
-
-  def getOldAmbariUser(self, services):
-    ambari_user = None
-    if "cluster-env" in services["configurations"]:
-      if "security_enabled" in services["configurations"]["cluster-env"]["properties"] \
-              and services["configurations"]["cluster-env"]["properties"]["security_enabled"].lower() == "true":
-         ambari_user = services['ambari-server-properties']['ambari-server.user']
-      elif "ambari_principal_name" in services["configurations"]["cluster-env"]["properties"]:
-         ambari_user = services["configurations"]["cluster-env"]["properties"]["ambari_principal_name"]
-         ambari_user = ambari_user.split('@')[0]
-    return ambari_user
-
-  def recommendAmbariProxyUsersForHDFS(self, services, servicesList, putCoreSiteProperty, putCoreSitePropertyAttribute):
-      if "HDFS" in servicesList:
-          ambari_user = self.getAmbariUser(services)
-          ambariHostName = socket.getfqdn()
-          putCoreSiteProperty("hadoop.proxyuser.{0}.hosts".format(ambari_user), ambariHostName)
-          putCoreSiteProperty("hadoop.proxyuser.{0}.groups".format(ambari_user), "*")
-          old_ambari_user = self.getOldAmbariUser(services)
-          if old_ambari_user is not None:
-            putCoreSitePropertyAttribute("hadoop.proxyuser.{0}.hosts".format(old_ambari_user), 'delete', 'true')
-            putCoreSitePropertyAttribute("hadoop.proxyuser.{0}.groups".format(old_ambari_user), 'delete', 'true')
-
-  def recommendHadoopProxyUsers (self, configurations, services, hosts):
-    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
-    users = {}
-
-    if 'forced-configurations' not in services:
-      services["forced-configurations"] = []
-
-    if "HDFS" in servicesList:
-      hdfs_user = None
-      if "hadoop-env" in services["configurations"] and "hdfs_user" in services["configurations"]["hadoop-env"]["properties"]:
-        hdfs_user = services["configurations"]["hadoop-env"]["properties"]["hdfs_user"]
-        if not hdfs_user in users and hdfs_user is not None:
-          users[hdfs_user] = {"propertyHosts" : "*","propertyGroups" : "*", "config" : "hadoop-env", "propertyName" : "hdfs_user"}
-
-    if "OOZIE" in servicesList:
-      oozie_user = None
-      if "oozie-env" in services["configurations"] and "oozie_user" in services["configurations"]["oozie-env"]["properties"]:
-        oozie_user = services["configurations"]["oozie-env"]["properties"]["oozie_user"]
-        oozieServerrHosts = self.getHostsWithComponent("OOZIE", "OOZIE_SERVER", services, hosts)
-        if oozieServerrHosts is not None:
-          oozieServerHostsNameList = []
-          for oozieServerHost in oozieServerrHosts:
-            oozieServerHostsNameList.append(oozieServerHost["Hosts"]["host_name"])
-          oozieServerHostsNames = ",".join(oozieServerHostsNameList)
-          if not oozie_user in users and oozie_user is not None:
-            users[oozie_user] = {"propertyHosts" : oozieServerHostsNames,"propertyGroups" : "*", "config" : "oozie-env", "propertyName" : "oozie_user"}
-
-    hive_user = None
-    if "HIVE" in servicesList:
-      webhcat_user = None
-      if "hive-env" in services["configurations"] and "hive_user" in services["configurations"]["hive-env"]["properties"] \
-              and "webhcat_user" in services["configurations"]["hive-env"]["properties"]:
-        hive_user = services["configurations"]["hive-env"]["properties"]["hive_user"]
-        webhcat_user = services["configurations"]["hive-env"]["properties"]["webhcat_user"]
-        hiveServerHosts = self.getHostsWithComponent("HIVE", "HIVE_SERVER", services, hosts)
-        hiveServerInteractiveHosts = self.getHostsWithComponent("HIVE", "HIVE_SERVER_INTERACTIVE", services, hosts)
-        webHcatServerHosts = self.getHostsWithComponent("HIVE", "WEBHCAT_SERVER", services, hosts)
-
-        if hiveServerHosts is not None:
-          hiveServerHostsNameList = []
-          for hiveServerHost in hiveServerHosts:
-            hiveServerHostsNameList.append(hiveServerHost["Hosts"]["host_name"])
-          # Append Hive Server Interactive host as well, as it is Hive2/HiveServer2 component.
-          if hiveServerInteractiveHosts:
-            for hiveServerInteractiveHost in hiveServerInteractiveHosts:
-              hiveServerInteractiveHostName = hiveServerInteractiveHost["Hosts"]["host_name"]
-              if hiveServerInteractiveHostName not in hiveServerHostsNameList:
-                hiveServerHostsNameList.append(hiveServerInteractiveHostName)
-                Logger.info("Appended (if not exiting), Hive Server Interactive Host : '{0}', to Hive Server Host List : '{1}'".format(hiveServerInteractiveHostName, hiveServerHostsNameList))
-
-          hiveServerHostsNames = ",".join(hiveServerHostsNameList)  # includes Hive Server interactive host also.
-          Logger.info("Hive Server and Hive Server Interactive (if enabled) Host List : {0}".format(hiveServerHostsNameList))
-          if not hive_user in users and hive_user is not None:
-            users[hive_user] = {"propertyHosts" : hiveServerHostsNames,"propertyGroups" : "*", "config" : "hive-env", "propertyName" : "hive_user"}
-
-        if webHcatServerHosts is not None:
-          webHcatServerHostsNameList = []
-          for webHcatServerHost in webHcatServerHosts:
-            webHcatServerHostsNameList.append(webHcatServerHost["Hosts"]["host_name"])
-          webHcatServerHostsNames = ",".join(webHcatServerHostsNameList)
-          if not webhcat_user in users and webhcat_user is not None:
-            users[webhcat_user] = {"propertyHosts" : webHcatServerHostsNames,"propertyGroups" : "*", "config" : "hive-env", "propertyName" : "webhcat_user"}
-
-    if "YARN" in servicesList:
-      yarn_user = None
-      if "yarn-env" in services["configurations"] and "yarn_user" in services["configurations"]["yarn-env"]["properties"]:
-        yarn_user = services["configurations"]["yarn-env"]["properties"]["yarn_user"]
-        rmHosts = self.getHostsWithComponent("YARN", "RESOURCEMANAGER", services, hosts)
-
-        if len(rmHosts) > 1:
-          rmHostsNameList = []
-          for rmHost in rmHosts:
-            rmHostsNameList.append(rmHost["Hosts"]["host_name"])
-          rmHostsNames = ",".join(rmHostsNameList)
-          if not yarn_user in users and yarn_user is not None:
-            users[yarn_user] = {"propertyHosts" : rmHostsNames, "config" : "yarn-env", "propertyName" : "yarn_user"}
-
-
-    if "FALCON" in servicesList:
-      falconUser = None
-      if "falcon-env" in services["configurations"] and "falcon_user" in services["configurations"]["falcon-env"]["properties"]:
-        falconUser = services["configurations"]["falcon-env"]["properties"]["falcon_user"]
-        if not falconUser in users and falconUser is not None:
-          users[falconUser] = {"propertyHosts" : "*","propertyGroups" : "*", "config" : "falcon-env", "propertyName" : "falcon_user"}
-
-    if "SPARK" in servicesList:
-      livyUser = None
-      if "livy-env" in services["configurations"] and "livy_user" in services["configurations"]["livy-env"]["properties"]:
-        livyUser = services["configurations"]["livy-env"]["properties"]["livy_user"]
-        if not livyUser in users and livyUser is not None:
-          users[livyUser] = {"propertyHosts" : "*","propertyGroups" : "*", "config" : "livy-env", "propertyName" : "livy_user"}
-
-    putCoreSiteProperty = self.putProperty(configurations, "core-site", services)
-    putCoreSitePropertyAttribute = self.putPropertyAttribute(configurations, "core-site")
-
-    for user_name, user_properties in users.iteritems():
-      if hive_user and hive_user == user_name:
-        if "propertyHosts" in user_properties:
-          services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.hosts".format(hive_user)})
-      # Add properties "hadoop.proxyuser.*.hosts", "hadoop.proxyuser.*.groups" to core-site for all users
-      putCoreSiteProperty("hadoop.proxyuser.{0}.hosts".format(user_name) , user_properties["propertyHosts"])
-      Logger.info("Updated hadoop.proxyuser.{0}.hosts as : {1}".format(hive_user, user_properties["propertyHosts"]))
-      if "propertyGroups" in user_properties:
-        putCoreSiteProperty("hadoop.proxyuser.{0}.groups".format(user_name) , user_properties["propertyGroups"])
-
-      # Remove old properties if user was renamed
-      userOldValue = getOldValue(self, services, user_properties["config"], user_properties["propertyName"])
-      if userOldValue is not None and userOldValue != user_name:
-        putCoreSitePropertyAttribute("hadoop.proxyuser.{0}.hosts".format(userOldValue), 'delete', 'true')
-        services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.hosts".format(userOldValue)})
-        services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.hosts".format(user_name)})
-
-        if "propertyGroups" in user_properties:
-          putCoreSitePropertyAttribute("hadoop.proxyuser.{0}.groups".format(userOldValue), 'delete', 'true')
-          services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.groups".format(userOldValue)})
-          services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.groups".format(user_name)})
-
-    self.recommendAmbariProxyUsersForHDFS(services, servicesList, putCoreSiteProperty, putCoreSitePropertyAttribute)
-
-  def recommendHDFSConfigurations(self, configurations, clusterData, services, hosts):
-    putHDFSProperty = self.putProperty(configurations, "hadoop-env", services)
-    putHDFSSiteProperty = self.putProperty(configurations, "hdfs-site", services)
-    putHDFSSitePropertyAttributes = self.putPropertyAttribute(configurations, "hdfs-site")
-    putHDFSProperty('namenode_heapsize', max(int(clusterData['totalAvailableRam'] / 2), 1024))
-    putHDFSProperty = self.putProperty(configurations, "hadoop-env", services)
-    putHDFSProperty('namenode_opt_newsize', max(int(clusterData['totalAvailableRam'] / 8), 128))
-    putHDFSProperty = self.putProperty(configurations, "hadoop-env", services)
-    putHDFSProperty('namenode_opt_maxnewsize', max(int(clusterData['totalAvailableRam'] / 8), 256))
-
-    # Check if NN HA is enabled and recommend removing dfs.namenode.rpc-address
-    hdfsSiteProperties = getServicesSiteProperties(services, "hdfs-site")
-    nameServices = None
-    if hdfsSiteProperties and 'dfs.internal.nameservices' in hdfsSiteProperties:
-      nameServices = hdfsSiteProperties['dfs.internal.nameservices']
-    if nameServices is None and hdfsSiteProperties and 'dfs.nameservices' in hdfsSiteProperties:
-      nameServices = hdfsSiteProperties['dfs.nameservices']
-    if nameServices and "dfs.ha.namenodes.%s" % nameServices in hdfsSiteProperties:
-      namenodes = hdfsSiteProperties["dfs.ha.namenodes.%s" % nameServices]
-      if len(namenodes.split(',')) > 1:
-        putHDFSSitePropertyAttributes("dfs.namenode.rpc-address", "delete", "true")
-
-    #Initialize default 'dfs.datanode.data.dir' if needed
-    if (not hdfsSiteProperties) or ('dfs.datanode.data.dir' not in hdfsSiteProperties):
-      dataDirs = '/hadoop/hdfs/data'
-      putHDFSSiteProperty('dfs.datanode.data.dir', dataDirs)
-    else:
-      dataDirs = hdfsSiteProperties['dfs.datanode.data.dir'].split(",")
-
-    # dfs.datanode.du.reserved should be set to 10-15% of volume size
-    # For each host selects maximum size of the volume. Then gets minimum for all hosts.
-    # This ensures that each host will have at least one data dir with available space.
-    reservedSizeRecommendation = 0l #kBytes
-    for host in hosts["items"]:
-      mountPoints = []
-      mountPointDiskAvailableSpace = [] #kBytes
-      for diskInfo in host["Hosts"]["disk_info"]:
-        mountPoints.append(diskInfo["mountpoint"])
-        mountPointDiskAvailableSpace.append(long(diskInfo["size"]))
-
-      maxFreeVolumeSizeForHost = 0l #kBytes
-      for dataDir in dataDirs:
-        mp = getMountPointForDir(dataDir, mountPoints)
-        for i in range(len(mountPoints)):
-          if mp == mountPoints[i]:
-            if mountPointDiskAvailableSpace[i] > maxFreeVolumeSizeForHost:
-              maxFreeVolumeSizeForHost = mountPointDiskAvailableSpace[i]
-
-      if not reservedSizeRecommendation or maxFreeVolumeSizeForHost and maxFreeVolumeSizeForHost < reservedSizeRecommendation:
-        reservedSizeRecommendation = maxFreeVolumeSizeForHost
-
-    if reservedSizeRecommendation:
-      reservedSizeRecommendation = max(reservedSizeRecommendation * 1024 / 8, 1073741824) # At least 1Gb is reserved
-      putHDFSSiteProperty('dfs.datanode.du.reserved', reservedSizeRecommendation) #Bytes
-
-    # recommendations for "hadoop.proxyuser.*.hosts", "hadoop.proxyuser.*.groups" properties in core-site
-    self.recommendHadoopProxyUsers(configurations, services, hosts)
-
-  def recommendHbaseConfigurations(self, configurations, clusterData, services, hosts):
-    # recommendations for HBase env config
-
-    # If cluster size is < 100, hbase master heap = 2G
-    # else If cluster size is < 500, hbase master heap = 4G
-    # else hbase master heap = 8G
-    # for small test clusters use 1 gb
-    hostsCount = 0
-    if hosts and "items" in hosts:
-      hostsCount = len(hosts["items"])
-
-    hbaseMasterRam = {
-      hostsCount < 20: 1,
-      20 <= hostsCount < 100: 2,
-      100 <= hostsCount < 500: 4,
-      500 <= hostsCount: 8
-    }[True]
-
-    putHbaseProperty = self.putProperty(configurations, "hbase-env", services)
-    putHbaseProperty('hbase_regionserver_heapsize', int(clusterData['hbaseRam']) * 1024)
-    putHbaseProperty('hbase_master_heapsize', hbaseMasterRam * 1024)
-
-    # recommendations for HBase site config
-    putHbaseSiteProperty = self.putProperty(configurations, "hbase-site", services)
-
-    if 'hbase-site' in services['configurations'] and 'hbase.superuser' in services['configurations']['hbase-site']['properties'] \
-      and 'hbase-env' in services['configurations'] and 'hbase_user' in services['configurations']['hbase-env']['properties'] \
-      and services['configurations']['hbase-env']['properties']['hbase_user'] != services['configurations']['hbase-site']['properties']['hbase.superuser']:
-      putHbaseSiteProperty("hbase.superuser", services['configurations']['hbase-env']['properties']['hbase_user'])
-
-
-  def recommendRangerConfigurations(self, configurations, clusterData, services, hosts):
-
-    putRangerAdminProperty = self.putProperty(configurations, "admin-properties", services)
-
-    # Build policymgr_external_url
-    protocol = 'http'
-    ranger_admin_host = 'localhost'
-    port = '6080'
-
-    # Check if http is disabled. For HDP-2.3 this can be checked in ranger-admin-site/ranger.service.http.enabled
-    # For Ranger-0.4.0 this can be checked in ranger-site/http.enabled
-    if ('ranger-site' in services['configurations'] and 'http.enabled' in services['configurations']['ranger-site']['properties'] \
-      and services['configurations']['ranger-site']['properties']['http.enabled'].lower() == 'false') or \
-      ('ranger-admin-site' in services['configurations'] and 'ranger.service.http.enabled' in services['configurations']['ranger-admin-site']['properties'] \
-      and services['configurations']['ranger-admin-site']['properties']['ranger.service.http.enabled'].lower() == 'false'):
-      # HTTPS protocol is used
-      protocol = 'https'
-      # Starting Ranger-0.5.0.2.3 port stored in ranger-admin-site ranger.service.https.port
-      if 'ranger-admin-site' in services['configurations'] and \
-          'ranger.service.https.port' in services['configurations']['ranger-admin-site']['properties']:
-        port = services['configurations']['ranger-admin-site']['properties']['ranger.service.https.port']
-      # In Ranger-0.4.0 port stored in ranger-site https.service.port
-      elif 'ranger-site' in services['configurations'] and \
-          'https.service.port' in services['configurations']['ranger-site']['properties']:
-        port = services['configurations']['ranger-site']['properties']['https.service.port']
-    else:
-      # HTTP protocol is used
-      # Starting Ranger-0.5.0.2.3 port stored in ranger-admin-site ranger.service.http.port
-      if 'ranger-admin-site' in services['configurations'] and \
-          'ranger.service.http.port' in services['configurations']['ranger-admin-site']['properties']:
-        port = services['configurations']['ranger-admin-site']['properties']['ranger.service.http.port']
-      # In Ranger-0.4.0 port stored in ranger-site http.service.port
-      elif 'ranger-site' in services['configurations'] and \
-          'http.service.port' in services['configurations']['ranger-site']['properties']:
-        port = services['configurations']['ranger-site']['properties']['http.service.port']
-
-    ranger_admin_hosts = self.getComponentHostNames(services, "RANGER", "RANGER_ADMIN")
-    if ranger_admin_hosts:
-      if len(ranger_admin_hosts) > 1 \
-        and services['configurations'] \
-        and 'admin-properties' in services['configurations'] and 'policymgr_external_url' in services['configurations']['admin-properties']['properties'] \
-        and services['configurations']['admin-properties']['properties']['policymgr_external_url'] \
-        and services['configurations']['admin-properties']['properties']['policymgr_external_url'].strip():
-
-        # in case of HA deployment keep the policymgr_external_url specified in the config
-        policymgr_external_url = services['configurations']['admin-properties']['properties']['policymgr_external_url']
-      else:
-
-        ranger_admin_host = ranger_admin_hosts[0]
-        policymgr_external_url = "%s://%s:%s" % (protocol, ranger_admin_host, port)
-
-      putRangerAdminProperty('policymgr_external_url', policymgr_external_url)
-
-    rangerServiceVersion = [service['StackServices']['service_version'] for service in services["services"] if service['StackServices']['service_name'] == 'RANGER'][0]
-    if rangerServiceVersion == '0.4.0':
-      # Recommend ldap settings based on ambari.properties configuration
-      # If 'ambari.ldap.isConfigured' == true
-      # For Ranger version 0.4.0
-      if 'ambari-server-properties' in services and \
-      'ambari.ldap.isConfigured' in services['ambari-server-properties'] and \
-        services['ambari-server-properties']['ambari.ldap.isConfigured'].lower() == "true":
-        putUserSyncProperty = self.putProperty(configurations, "usersync-properties", services)
-        serverProperties = services['ambari-server-properties']
-        if 'authentication.ldap.managerDn' in serverProperties:
-          putUserSyncProperty('SYNC_LDAP_BIND_DN', serverProperties['authentication.ldap.managerDn'])
-        if 'authentication.ldap.primaryUrl' in serverProperties:
-          ldap_protocol =  'ldap://'
-          if 'authentication.ldap.useSSL' in serverProperties and serverProperties['authentication.ldap.useSSL'] == 'true':
-            ldap_protocol =  'ldaps://'
-          ldapUrl = ldap_protocol + serverProperties['authentication.ldap.primaryUrl'] if serverProperties['authentication.ldap.primaryUrl'] else serverProperties['authentication.ldap.primaryUrl']
-          putUserSyncProperty('SYNC_LDAP_URL', ldapUrl)
-        if 'authentication.ldap.userObjectClass' in serverProperties:
-          putUserSyncProperty('SYNC_LDAP_USER_OBJECT_CLASS', serverProperties['authentication.ldap.userObjectClass'])
-        if 'authentication.ldap.usernameAttribute' in serverProperties:
-          putUserSyncProperty('SYNC_LDAP_USER_NAME_ATTRIBUTE', serverProperties['authentication.ldap.usernameAttribute'])
-
-
-      # Set Ranger Admin Authentication method
-      if 'admin-properties' in services['configurations'] and 'usersync-properties' in services['configurations'] and \
-          'SYNC_SOURCE' in services['configurations']['usersync-properties']['properties']:
-        rangerUserSyncSource = services['configurations']['usersync-properties']['properties']['SYNC_SOURCE']
-        authenticationMethod = rangerUserSyncSource.upper()
-        if authenticationMethod != 'FILE':
-          putRangerAdminProperty('authentication_method', authenticationMethod)
-
-      # Recommend xasecure.audit.destination.hdfs.dir
-      # For Ranger version 0.4.0
-      servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
-      putRangerEnvProperty = self.putProperty(configurations, "ranger-env", services)
-      include_hdfs = "HDFS" in servicesList
-      if include_hdfs:
-        if 'core-site' in services['configurations'] and ('fs.defaultFS' in services['configurations']['core-site']['properties']):
-          default_fs = services['configurations']['core-site']['properties']['fs.defaultFS']
-          default_fs += '/ranger/audit/%app-type%/%time:yyyyMMdd%'
-          putRangerEnvProperty('xasecure.audit.destination.hdfs.dir', default_fs)
-
-      # Recommend Ranger Audit properties for ranger supported services
-      # For Ranger version 0.4.0
-      ranger_services = [
-        {'service_name': 'HDFS', 'audit_file': 'ranger-hdfs-plugin-properties'},
-        {'service_name': 'HBASE', 'audit_file': 'ranger-hbase-plugin-properties'},
-        {'service_name': 'HIVE', 'audit_file': 'ranger-hive-plugin-properties'},
-        {'service_name': 'KNOX', 'audit_file': 'ranger-knox-plugin-properties'},
-        {'service_name': 'STORM', 'audit_file': 'ranger-storm-plugin-properties'}
-      ]
-
-      for item in range(len(ranger_services)):
-        if ranger_services[item]['service_name'] in servicesList:
-          component_audit_file =  ranger_services[item]['audit_file']
-          if component_audit_file in services["configurations"]:
-            ranger_audit_dict = [
-              {'filename': 'ranger-env', 'configname': 'xasecure.audit.destination.db', 'target_configname': 'XAAUDIT.DB.IS_ENABLED'},
-              {'filename': 'ranger-env', 'configname': 'xasecure.audit.destination.hdfs', 'target_configname': 'XAAUDIT.HDFS.IS_ENABLED'},
-              {'filename': 'ranger-env', 'configname': 'xasecure.audit.destination.hdfs.dir', 'target_configname': 'XAAUDIT.HDFS.DESTINATION_DIRECTORY'}
-            ]
-            putRangerAuditProperty = self.putProperty(configurations, component_audit_file, services)
-
-            for item in ranger_audit_dict:
-              if item['filename'] in services["configurations"] and item['configname'] in  services["configurations"][item['filename']]["properties"]:
-                if item['filename'] in configurations and item['configname'] in  configurations[item['filename']]["properties"]:
-                  rangerAuditProperty = configurations[item['filename']]["properties"][item['configname']]
-                else:
-                  rangerAuditProperty = services["configurations"][item['filename']]["properties"][item['configname']]
-                putRangerAuditProperty(item['target_configname'], rangerAuditProperty)
-
-
-  def getAmsMemoryRecommendation(self, services, hosts):
-    # MB per sink in hbase heapsize
-    HEAP_PER_MASTER_COMPONENT = 50
-    HEAP_PER_SLAVE_COMPONENT = 10
-
-    schMemoryMap = {
-      "HDFS": {
-        "NAMENODE": HEAP_PER_MASTER_COMPONENT,
-        "DATANODE": HEAP_PER_SLAVE_COMPONENT
-      },
-      "YARN": {
-        "RESOURCEMANAGER": HEAP_PER_MASTER_COMPONENT,
-      },
-      "HBASE": {
-        "HBASE_MASTER": HEAP_PER_MASTER_COMPONENT,
-        "HBASE_REGIONSERVER": HEAP_PER_SLAVE_COMPONENT
-      },
-      "ACCUMULO": {
-        "ACCUMULO_MASTER": HEAP_PER_MASTER_COMPONENT,
-        "ACCUMULO_TSERVER": HEAP_PER_SLAVE_COMPONENT
-      },
-      "KAFKA": {
-        "KAFKA_BROKER": HEAP_PER_MASTER_COMPONENT
-      },
-      "FLUME": {
-        "FLUME_HANDLER": HEAP_PER_SLAVE_COMPONENT
-      },
-      "STORM": {
-        "NIMBUS": HEAP_PER_MASTER_COMPONENT,
-      },
-      "AMBARI_METRICS": {
-        "METRICS_COLLECTOR": HEAP_PER_MASTER_COMPONENT,
-        "METRICS_MONITOR": HEAP_PER_SLAVE_COMPONENT
-      }
-    }
-    total_sinks_count = 0
-    # minimum heap size
-    hbase_heapsize = 500
-    for serviceName, componentsDict in schMemoryMap.items():
-      for componentName, multiplier in componentsDict.items():
-        schCount = len(
-          self.getHostsWithComponent(serviceName, componentName, services,
-                                     hosts))
-        hbase_heapsize += int((schCount * multiplier) ** 0.9)
-        total_sinks_count += schCount
-    collector_heapsize = int(hbase_heapsize/4 if hbase_heapsize > 2048 else 512)
-
-    return round_to_n(collector_heapsize), round_to_n(hbase_heapsize), total_sinks_count
-
-  def recommendStormConfigurations(self, configurations, clusterData, services, hosts):
-    putStormSiteProperty = self.putProperty(configurations, "storm-site", services)
-    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
-    # Storm AMS integration
-    if 'AMBARI_METRICS' in servicesList:
-      putStormSiteProperty('metrics.reporter.register', 'org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter')
-
-  def recommendAmsConfigurations(self, configurations, clusterData, services, hosts):
-    putAmsEnvProperty = self.putProperty(configurations, "ams-env", services)
-    putAmsHbaseSiteProperty = self.putProperty(configurations, "ams-hbase-site", services)
-    putAmsSiteProperty = self.putProperty(configurations, "ams-site", services)
-    putHbaseEnvProperty = self.putProperty(configurations, "ams-hbase-env", services)
-    putGrafanaProperty = self.putProperty(configurations, "ams-grafana-env", services)
-    putGrafanaPropertyAttribute = self.putPropertyAttribute(configurations, "ams-grafana-env")
-
-    amsCollectorHosts = self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")
-
-    if 'cluster-env' in services['configurations'] and \
-        'metrics_collector_vip_host' in services['configurations']['cluster-env']['properties']:
-      metric_collector_host = services['configurations']['cluster-env']['properties']['metrics_collector_vip_host']
-    else:
-      metric_collector_host = 'localhost' if len(amsCollectorHosts) == 0 else amsCollectorHosts[0]
-
-    putAmsSiteProperty("timeline.metrics.service.webapp.address", str(metric_collector_host) + ":6188")
-
-    log_dir = "/var/log/ambari-metrics-collector"
-    if "ams-env" in services["configurations"]:
-      if "metrics_collector_log_dir" in services["configurations"]["ams-env"]["properties"]:
-        log_dir = services["configurations"]["ams-env"]["properties"]["metrics_collector_log_dir"]
-      putHbaseEnvProperty("hbase_log_dir", log_dir)
-
-    defaultFs = 'file:///'
-    if "core-site" in services["configurations"] and \
-      "fs.defaultFS" in services["configurations"]["core-site"]["properties"]:
-      defaultFs = services["configurations"]["core-site"]["properties"]["fs.defaultFS"]
-
-    operatingMode = "embedded"
-    if "ams-site" in services["configurations"]:
-      if "timeline.metrics.service.operation.mode" in services["configurations"]["ams-site"]["properties"]:
-        operatingMode = services["configurations"]["ams-site"]["properties"]["timeline.metrics.service.operation.mode"]
-
-    if operatingMode == "distributed":
-      putAmsSiteProperty("timeline.metrics.service.watcher.disabled", 'true')
-      putAmsHbaseSiteProperty("hbase.cluster.distributed", 'true')
-    else:
-      putAmsSiteProperty("timeline.metrics.service.watcher.disabled", 'false')
-      putAmsHbaseSiteProperty("hbase.cluster.distributed", 'false')
-
-    rootDir = "file:///var/lib/ambari-metrics-collector/hbase"
-    tmpDir = "/var/lib/ambari-metrics-collector/hbase-tmp"
-    zk_port_default = []
-    if "ams-hbase-site" in services["configurations"]:
-      if "hbase.rootdir" in services["configurations"]["ams-hbase-site"]["properties"]:
-        rootDir = services["configurations"]["ams-hbase-site"]["properties"]["hbase.rootdir"]
-      if "hbase.tmp.dir" in services["configurations"]["ams-hbase-site"]["properties"]:
-        tmpDir = services["configurations"]["ams-hbase-site"]["properties"]["hbase.tmp.dir"]
-      if "hbase.zookeeper.property.clientPort" in services["configurations"]["ams-hbase-site"]["properties"]:
-        zk_port_default = services["configurations"]["ams-hbase-site"]["properties"]["hbase.zookeeper.property.clientPort"]
-
-      # Skip recommendation item if default value is present
-    if operatingMode == "distributed" and not "{{zookeeper_clientPort}}" in zk_port_default:
-      zkPort = self.getZKPort(services)
-      putAmsHbaseSiteProperty("hbase.zookeeper.property.clientPort", zkPort)
-    elif operatingMode == "embedded" and not "{{zookeeper_clientPort}}" in zk_port_default:
-      putAmsHbaseSiteProperty("hbase.zookeeper.property.clientPort", "61181")
-
-    mountpoints = ["/"]
-    for collectorHostName in amsCollectorHosts:
-      for host in hosts["items"]:
-        if host["Hosts"]["host_name"] == collectorHostName:
-          mountpoints = self.getPreferredMountPoints(host["Hosts"])
-          break
-    isLocalRootDir = rootDir.startswith("file://") or (defaultFs.startswith("file://") and rootDir.startswith("/"))
-    if isLocalRootDir:
-      rootDir = re.sub("^file:///|/", "", rootDir, count=1)
-      rootDir = "file://" + os.path.join(mountpoints[0], rootDir)
-    tmpDir = re.sub("^file:///|/", "", tmpDir, count=1)
-    if len(mountpoints) > 1 and isLocalRootDir:
-      tmpDir = os.path.join(mountpoints[1], tmpDir)
-    else:
-      tmpDir = os.path.join(mountpoints[0], tmpDir)
-    putAmsHbaseSiteProperty("hbase.tmp.dir", tmpDir)
-
-    if operatingMode == "distributed":
-      putAmsHbaseSiteProperty("hbase.rootdir", defaultFs + "/user/ams/hbase")
-
-    if operatingMode == "embedded":
-      if isLocalRootDir:
-        putAmsHbaseSiteProperty("hbase.rootdir", rootDir)
-      else:
-        putAmsHbaseSiteProperty("hbase.rootdir", "file:///var/lib/ambari-metrics-collector/hbase")
-
-    collector_heapsize, hbase_heapsize, total_sinks_count = self.getAmsMemoryRecommendation(services, hosts)
-
-    putAmsEnvProperty("metrics_collector_heapsize", collector_heapsize)
-
-    # blockCache = 0.3, memstore = 0.35, phoenix-server = 0.15, phoenix-client = 0.25
-    putAmsHbaseSiteProperty("hfile.block.cache.size", 0.3)
-    putAmsHbaseSiteProperty("hbase.hregion.memstore.flush.size", 134217728)
-    putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.upperLimit", 0.35)
-    putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.lowerLimit", 0.3)
-
-    if len(amsCollectorHosts) > 1:
-      pass
-    else:
-      # blockCache = 0.3, memstore = 0.3, phoenix-server = 0.2, phoenix-client = 0.3
-      if total_sinks_count >= 2000:
-        putAmsHbaseSiteProperty("hbase.regionserver.handler.count", 60)
-        putAmsHbaseSiteProperty("hbase.regionserver.hlog.blocksize", 134217728)
-        putAmsHbaseSiteProperty("hbase.regionserver.maxlogs", 64)
-        putAmsHbaseSiteProperty("hbase.hregion.memstore.flush.size", 268435456)
-        putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.upperLimit", 0.3)
-        putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.lowerLimit", 0.25)
-        putAmsHbaseSiteProperty("phoenix.query.maxGlobalMemoryPercentage", 20)
-        putAmsHbaseSiteProperty("phoenix.coprocessor.maxMetaDataCacheSize", 81920000)
-        putAmsSiteProperty("phoenix.query.maxGlobalMemoryPercentage", 30)
-        putAmsSiteProperty("timeline.metrics.service.resultset.fetchSize", 10000)
-      elif total_sinks_count >= 500:
-        putAmsHbaseSiteProperty("hbase.regionserver.handler.count", 60)
-        putAmsHbaseSiteProperty("hbase.regionserver.hlog.blocksize", 134217728)
-        putAmsHbaseSiteProperty("hbase.regionserver.maxlogs", 64)
-        putAmsHbaseSiteProperty("hbase.hregion.memstore.flush.size", 268435456)
-        putAmsHbaseSiteProperty("phoenix.coprocessor.maxMetaDataCacheSize", 40960000)
-        putAmsSiteProperty("timeline.metrics.service.resultset.fetchSize", 5000)
-      else:
-        putAmsHbaseSiteProperty("phoenix.coprocessor.maxMetaDataCacheSize", 20480000)
-      pass
-
-    metrics_api_handlers = min(50, max(20, int(total_sinks_count / 100)))
-    putAmsSiteProperty("timeline.metrics.service.handler.thread.count", metrics_api_handlers)
-
-    # Distributed mode heap size
-    if operatingMode == "distributed":
-      hbase_heapsize = max(hbase_heapsize, 768)
-      putHbaseEnvProperty("hbase_master_heapsize", "512")
-      putHbaseEnvProperty("hbase_master_xmn_size", "102") #20% of 512 heap size
-      putHbaseEnvProperty("hbase_regionserver_heapsize", hbase_heapsize)
-      putHbaseEnvProperty("regionserver_xmn_size", round_to_n(0.15*hbase_heapsize,64))
-    else:
-      # Embedded mode heap size : master + regionserver
-      hbase_rs_heapsize = 768
-      putHbaseEnvProperty("hbase_regionserver_heapsize", hbase_rs_heapsize)
-      putHbaseEnvProperty("hbase_master_heapsize", hbase_heapsize)
-      putHbaseEnvProperty("hbase_master_xmn_size", round_to_n(0.15*(hbase_heapsize+hbase_rs_heapsize),64))
-
-    # If no local DN in distributed mode
-    if operatingMode == "distributed":
-      dn_hosts = self.getComponentHostNames(services, "HDFS", "DATANODE")
-      # call by Kerberos wizard sends only the service being affected
-      # so it is possible for dn_hosts to be None but not amsCollectorHosts
-      if dn_hosts and len(dn_hosts) > 0:
-        if set(amsCollectorHosts).intersection(dn_hosts):
-          collector_cohosted_with_dn = "true"
-        else:
-          collector_cohosted_with_dn = "false"
-        putAmsHbaseSiteProperty("dfs.client.read.shortcircuit", collector_cohosted_with_dn)
-
-    #split points
-    scriptDir = os.path.dirname(os.path.abspath(__file__))
-    metricsDir = os.path.join(scriptDir, '../../../../common-services/AMBARI_METRICS/0.1.0/package')
-    serviceMetricsDir = os.path.join(metricsDir, 'files', 'service-metrics')
-    sys.path.append(os.path.join(metricsDir, 'scripts'))
-    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
-
-    from split_points import FindSplitPointsForAMSRegions
-
-    ams_hbase_site = None
-    ams_hbase_env = None
-
-    # Overriden properties form the UI
-    if "ams-hbase-site" in services["configurations"]:
-      ams_hbase_site = services["configurations"]["ams-hbase-site"]["properties"]
-    if "ams-hbase-env" in services["configurations"]:
-       ams_hbase_env = services["configurations"]["ams-hbase-env"]["properties"]
-
-    # Recommendations
-    if not ams_hbase_site:
-      ams_hbase_site = configurations["ams-hbase-site"]["properties"]
-    if not ams_hbase_env:
-      ams_hbase_env = configurations["ams-hbase-env"]["properties"]
-
-    split_point_finder = FindSplitPointsForAMSRegions(
-      ams_hbase_site, ams_hbase_env, serviceMetricsDir, operatingMode, servicesList)
-
-    result = split_point_finder.get_split_points()
-    precision_splits = ' '
-    aggregate_splits = ' '
-    if result.precision:
-      precision_splits = result.precision
-    if result.aggregate:
-      aggregate_splits = result.aggregate
-    putAmsSiteProperty("timeline.metrics.host.aggregate.splitpoints", ','.join(precision_splits))
-    putAmsSiteProperty("timeline.metrics.cluster.aggregate.splitpoints", ','.join(aggregate_splits))
-
-    component_grafana_exists = False
-    for service in services['services']:
-      if 'components' in service:
-        for component in service['components']:
-          if 'StackServiceComponents' in component:
-            # If Grafana is installed the hostnames would indicate its location
-            if 'METRICS_GRAFANA' in component['StackServiceComponents']['component_name'] and\
-              len(component['StackServiceComponents']['hostnames']) != 0:
-              component_grafana_exists = True
-              break
-    pass
-
-    if not component_grafana_exists:
-      putGrafanaPropertyAttribute("metrics_grafana_password", "visible", "false")
-
-    pass
-
-  def getHostNamesWithComponent(self, serviceName, componentName, services):
-    """
-    Returns the list of hostnames on which service component is installed
-    """
-    if services is not None and serviceName in [service["StackServices"]["service_name"] for service in services["services"]]:
-      service = [serviceEntry for serviceEntry in services["services"] if serviceEntry["StackServices"]["service_name"] == serviceName][0]
-      components = [componentEntry for componentEntry in service["components"] if componentEntry["StackServiceComponents"]["component_name"] == componentName]
-      if (len(components) > 0 and len(components[0]["StackServiceComponents"]["hostnames"]) > 0):
-        componentHostnames = components[0]["StackServiceComponents"]["hostnames"]
-        return componentHostnames
-    return []
-
-  def getHostsWithComponent(self, serviceName, componentName, services, hosts):
-    if services is not None and hosts is not None and serviceName in [service["StackServices"]["service_name"] for service in services["services"]]:
-      service = [serviceEntry for serviceEntry in services["services"] if serviceEntry["StackServices"]["service_name"] == serviceName][0]
-      components = [componentEntry for componentEntry in service["components"] if componentEntry["StackServiceComponents"]["component_name"] == componentName]
-      if (len(components) > 0 and len(components[0]["StackServiceComponents"]["hostnames"]) > 0):
-        componentHostnames = components[0]["StackServiceComponents"]["hostnames"]
-        componentHosts = [host for host in hosts["items"] if host["Hosts"]["host_name"] in componentHostnames]
-        return componentHosts
-    return []
-
-  def getHostWithComponent(self, serviceName, componentName, services, hosts):
-    componentHosts = self.getHostsWithComponent(serviceName, componentName, services, hosts)
-    if (len(componentHosts) > 0):
-      return componentHosts[0]
-    return None
-
-  def getHostComponentsByCategories(self, hostname, categories, services, hosts):
-    components = []
-    if services is not None and hosts is not None:
-      for service in services["services"]:
-          components.extend([componentEntry for componentEntry in service["components"]
-                              if componentEntry["StackServiceComponents"]["component_category"] in categories
-                              and hostname in componentEntry["StackServiceComponents"]["hostnames"]])
-    return components
-
-  def getZKHostPortString(self, services, include_port=True):
-    """
-    Returns the comma delimited string of zookeeper server host with the configure port installed in a cluster
-    Example: zk.host1.org:2181,zk.host2.org:2181,zk.host3.org:2181
-    include_port boolean param -> If port is also needed.
-    """
-    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
-    include_zookeeper = "ZOOKEEPER" in servicesList
-    zookeeper_host_port = ''
-
-    if include_zookeeper:
-      zookeeper_hosts = self.getHostNamesWithComponent("ZOOKEEPER", "ZOOKEEPER_SERVER", services)
-      zookeeper_host_port_arr = []
-
-      if include_port:
-        zookeeper_port = self.getZKPort(services)
-        for i in range(len(zookeeper_hosts)):
-          zookeeper_host_port_arr.append(zookeeper_hosts[i] + ':' + zookeeper_port)
-      else:
-        for i in range(len(zookeeper_hosts)):
-          zookeeper_host_port_arr.append(zookeeper_hosts[i])
-
-      zookeeper_host_port = ",".join(zookeeper_host_port_arr)
-    return zookeeper_host_port
-
-  def getZKPort(self, services):
-    zookeeper_port = '2181'     #default port
-    if 'zoo.cfg' in services['configurations'] and ('clientPort' in services['configurations']['zoo.cfg']['properties']):
-      zookeeper_port = services['configurations']['zoo.cfg']['properties']['clientPort']
-    return zookeeper_port
-
-  def getConfigurationClusterSummary(self, servicesList, hosts, components, services):
-
-    hBaseInstalled = False
-    if 'HBASE' in servicesList:
-      hBaseInstalled = True
-
-    cluster = {
-      "cpu": 0,
-      "disk": 0,
-      "ram": 0,
-      "hBaseInstalled": hBaseInstalled,
-      "components": components
-    }
-
-    if len(hosts["items"]) > 0:
-      nodeManagerHosts = self.getHostsWithComponent("YARN", "NODEMANAGER", services, hosts)
-      # NodeManager host with least memory is generally used in calculations as it will work in larger hosts.
-      if nodeManagerHosts is not None and len(nodeManagerHosts) > 0:
-        nodeManagerHost = nodeManagerHosts[0];
-        for nmHost in nodeManagerHosts:
-          if nmHost["Hosts"]["total_mem"] < nodeManagerHost["Hosts"]["total_mem"]:
-            nodeManagerHost = nmHost
-        host = nodeManagerHost["Hosts"]
-        cluster["referenceNodeManagerHost"] = host
-      else:
-        host = hosts["items"][0]["Hosts"]
-      cluster["referenceHost"] = host
-      cluster["cpu"] = host["cpu_count"]
-      cluster["disk"] = len(host["disk_info"])
-      cluster["ram"] = int(host["total_mem"] / (1024 * 1024))
-
-    ramRecommendations = [
-      {"os":1, "hbase":1},
-      {"os":2, "hbase":1},
-      {"os":2, "hbase":2},
-      {"os":4, "hbase":4},
-      {"os":6, "hbase":8},
-      {"os":8, "hbase":8},
-      {"os":8, "hbase":8},
-      {"os":12, "hbase":16},
-      {"os":24, "hbase":24},
-      {"os":32, "hbase":32},
-      {"os":64, "hbase":32}
-    ]
-    index = {
-      cluster["ram"] <= 4: 0,
-      4 < cluster["ram"] <= 8: 1,
-      8 < cluster["ram"] <= 16: 2,
-      16 < cluster["ram"] <= 24: 3,
-      24 < cluster["ram"] <= 48: 4,
-      48 < cluster["ram"] <= 64: 5,
-      64 < cluster["ram"] <= 72: 6,
-      72 < cluster["ram"] <= 96: 7,
-      96 < cluster["ram"] <= 128: 8,
-      128 < cluster["ram"] <= 256: 9,
-      256 < cluster["ram"]: 10
-    }[1]
-
-
-    cluster["reservedRam"] = ramRecommendations[index]["os"]
-    cluster["hbaseRam"] = ramRecommendations[index]["hbase"]
-
-
-    cluster["minContainerSize"] = {
-      cluster["ram"] <= 4: 256,
-      4 < cluster["ram"] <= 8: 512,
-      8 < cluster["ram"] <= 24: 1024,
-      24 < cluster["ram"]: 2048
-    }[1]
-
-    totalAvailableRam = cluster["ram"] - cluster["reservedRam"]
-    if cluster["hBaseInstalled"]:
-      totalAvailableRam -= cluster["hbaseRam"]
-    cluster["totalAvailableRam"] = max(512, totalAvailableRam * 1024)
-    '''containers = max(3, min (2*cores,min (1.8*DISKS,(Total available RAM) / MIN_CONTAINER_SIZE))))'''
-    cluster["containers"] = round(max(3,
-                                min(2 * cluster["cpu"],
-                                    min(ceil(1.8 * cluster["disk"]),
-                                            cluster["totalAvailableRam"] / cluster["minContainerSize"]))))
-
-    '''ramPerContainers = max(2GB, RAM - reservedRam - hBaseRam) / containers'''
-    cluster["ramPerContainer"] = abs(cluster["totalAvailableRam"] / cluster["containers"])
-    '''If greater than 1GB, value will be in multiples of 512.'''
-    if cluster["ramPerContainer"] > 1024:
-      cluster["ramPerContainer"] = int(cluster["ramPerContainer"] / 512) * 512
-
-    cluster["mapMemory"] = int(cluster["ramPerContainer"])
-    cluster["reduceMemory"] = cluster["ramPerContainer"]
-    cluster["amMemory"] = max(cluster["mapMemory"], cluster["reduceMemory"])
-
-    return cluster
-
-  def getServiceConfigurationValidators(self):
-    return {
-      "HDFS": { "hdfs-site": self.validateHDFSConfigurations,
-                "hadoop-env": self.validateHDFSConfigurationsEnv},
-      "MAPREDUCE2": {"mapred-site": self.validateMapReduce2Configurations},
-      "YARN": {"yarn-site": self.validateYARNConfigurations,
-               "yarn-env": self.validateYARNEnvConfigurations},
-      "HBASE": {"hbase-env": self.validateHbaseEnvConfigurations},
-      "STORM": {"storm-site": self.validateStormConfigurations},
-      "AMBARI_METRICS": {"ams-hbase-site": self.validateAmsHbaseSiteConfigurations,
-              "ams-hbase-env": self.validateAmsHbaseEnvConfigurations,
-              "ams-site": self.validateAmsSiteConfigurations}
-    }
-
-  def validateMinMax(self, items, recommendedDefaults, configurations):
-
-    # required for casting to the proper numeric type before comparison
-    def convertToNumber(number):
-      try:
-        return int(number)
-      except ValueError:
-        return float(number)
-
-    for configName in configurations:
-      validationItems = []
-      if configName in recommendedDefaults and "property_attributes" in recommendedDefaults[configName]:
-        for propertyName in recommendedDefaults[configName]["property_attributes"]:
-          if propertyName in configurations[configName]["properties"]:
-            if "maximum" in recommendedDefaults[configName]["property_attributes"][propertyName] and \
-                propertyName in recommendedDefaults[configName]["properties"]:
-              userValue = convertToNumber(configurations[configName]["properties"][propertyName])
-              maxValue = convertToNumber(recommendedDefaults[configName]["property_attributes"][propertyName]["maximum"])
-              if userValue > maxValue:
-                validationItems.extend([{"config-name": propertyName, "item": self.getWarnItem("Value is greater than the recommended maximum of {0} ".format(maxValue))}])
-            if "minimum" in recommendedDefaults[configName]["property_attributes"][propertyName] and \
-                    propertyName in recommendedDefaults[configName]["properties"]:
-              userValue = convertToNumber(configurations[configName]["properties"][propertyName])
-              minValue = convertToNumber(recommendedDefaults[configName]["property_attributes"][propertyName]["minimum"])
-              if userValue < minValue:
-                validationItems.extend([{"config-name": propertyName, "item": self.getWarnItem("Value is less than the recommended minimum of {0} ".format(minValue))}])
-      items.extend(self.toConfigurationValidationProblems(validationItems, configName))
-    pass
-
-  def validateAmsSiteConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
-    validationItems = []
-
-    op_mode = properties.get("timeline.metrics.service.operation.mode")
-    correct_op_mode_item = None
-    if op_mode not in ("embedded", "distributed"):
-      correct_op_mode_item = self.getErrorItem("Correct value should be set.")
-      pass
-
-    validationItems.extend([{"config-name":'timeline.metrics.service.operation.mode', "item": correct_op_mode_item }])
-    return self.toConfigurationValidationProblems(validationItems, "ams-site")
-
-  def validateAmsHbaseSiteConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
-
-    amsCollectorHosts = self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")
-    ams_site = getSiteProperties(configurations, "ams-site")
-    core_site = getSiteProperties(configurations, "core-site")
-
-    collector_heapsize, hbase_heapsize, total_sinks_count = self.getAmsMemoryRecommendation(services, hosts)
-    recommendedDiskSpace = 10485760
-    # TODO validate configuration for multiple AMBARI_METRICS collectors
-    if len(amsCollectorHosts) > 1:
-      pass
-    else:
-      if total_sinks_count > 2000:
-        recommendedDiskSpace  = 104857600  # * 1k == 100 Gb
-      elif total_sinks_count > 500:
-        recommendedDiskSpace  = 52428800  # * 1k == 50 Gb
-      elif total_sinks_count > 250:
-        recommendedDiskSpace  = 20971520  # * 1k == 20 Gb
-
-    validationItems = []
-
-    rootdir_item = None
-    op_mode = ams_site.get("timeline.metrics.service.operation.mode")
-    default_fs = core_site.get("fs.defaultFS") if core_site else "file:///"
-    hbase_rootdir = properties.get("hbase.rootdir")
-    hbase_tmpdir = properties.get("hbase.tmp.dir")
-    distributed = properties.get("hbase.cluster.distributed")
-    is_local_root_dir = hbase_rootdir.startswith("file://") or (default_fs.startswith("file://") and hbase_rootdir.startswith("/"))
-
-    if op_mode == "distributed" and is_local_root_dir:
-      rootdir_item = self.getWarnItem("In distributed mode hbase.rootdir should point to HDFS.")
-    elif op_mode == "embedded":
-      if distributed.lower() == "false" and hbase_rootdir.startswith('/') or hbase_rootdir.startswith("hdfs://"):
-        rootdir_item = self.getWarnItem("In embedded mode hbase.rootdir cannot point to schemaless values or HDFS, "
-                                        "Example - file:// for localFS")
-      pass
-
-    distributed_item = None
-    if op_mode == "distributed" and not distributed.lower() == "true":
-      distributed_item = self.getErrorItem("hbase.cluster.distributed property should be set to true for "
-                                           "distributed mode")
-    if op_mode == "embedded" and distributed.lower() == "true":
-      distributed_item = self.getErrorItem("hbase.cluster.distributed property should be set to false for embedded mode")
-
-    hbase_zk_client_port = properties.get("hbase.zookeeper.property.clientPort")
-    zkPort = self.getZKPort(services)
-    hbase_zk_client_port_item = None
-    if distributed.lower() == "true" and op_mode == "distributed" and \
-        hbase_zk_client_port != zkPort and hbase_zk_client_port != "{{zookeeper_clientPort}}":
-      hbase_zk_client_port_item = self.getErrorItem("In AMS distributed mode, hbase.zookeeper.property.clientPort "
-                                                    "should be the cluster zookeeper server port : {0}".format(zkPort))
-
-    if distributed.lower() == "false" and op_mode == "embedded" and \
-        hbase_zk_client_port == zkPort and hbase_zk_client_port != "{{zookeeper_clientPort}}":
-      hbase_zk_client_port_item = self.getErrorItem("In AMS embedded mode, hbase.zookeeper.property.clientPort "
-                                                    "should be a different port than cluster zookeeper port."
-                                                    "(default:61181)")
-
-    validationItems.extend([{"config-name":'hbase.rootdir', "item": rootdir_item },
-                            {"config-name":'hbase.cluster.distributed', "item": distributed_item },
-                            {"config-name":'hbase.zookeeper.property.clientPort', "item": hbase_zk_client_port_item }])
-
-    for collectorHostName in amsCollectorHosts:
-      for host in hosts["items"]:
-        if host["Hosts"]["host_name"] == collectorHostName:
-          if op_mode == 'embedded' or is_local_root_dir:
-            validationItems.extend([{"config-name": 'hbase.rootdir', "item": self.validatorEnoughDiskSpace(properties, 'hbase.rootdir', host["Hosts"], recommendedDiskSpace)}])
-            validationItems.extend([{"config-name": 'hbase.rootdir', "item": self.validatorNotRootFs(properties, recommendedDefaults, 'hbase.rootdir', host["Hosts"])}])
-            validationItems.extend([{"config-name": 'hbase.tmp.dir', "item": self.validatorNotRootFs(properties, recommendedDefaults, 'hbase.tmp.dir', host["Hosts"])}])
-
-          dn_hosts = self.getComponentHostNames(services, "HDFS", "DATANODE")
-          if is_local_root_dir:
-            mountPoints = []
-            for mountPoint in host["Hosts"]["disk_info"]:
-              mountPoints.append(mountPoint["mountpoint"])
-            hbase_rootdir_mountpoint = getMountPointForDir(hbase_rootdir, mountPoints)
-            hbase_tmpdir_mountpoint = getMountPointForDir(hbase_tmpdir, mountPoints)
-            preferred_mountpoints = self.getPreferredMountPoints(host['Hosts'])
-            # hbase.rootdir and hbase.tmp.dir shouldn't point to the same partition
-            # if multiple preferred_mountpoints exist
-            if hbase_rootdir_mountpoint == hbase_tmpdir_mountpoint and \
-              len(preferred_mountpoints) > 1:
-              item = self.getWarnItem("Consider not using {0} partition for storing metrics temporary data. "
-                                      "{0} partition is already used as hbase.rootdir to store metrics data".format(hbase_tmpdir_mountpoint))
-              validationItems.extend([{"config-name":'hbase.tmp.dir', "item": item}])
-
-            # if METRICS_COLLECTOR is co-hosted with DATANODE
-            # cross-check dfs.datanode.data.dir and hbase.rootdir
-            # they shouldn't share same disk partition IO
-            hdfs_site = getSiteProperties(configurations, "hdfs-site")
-            dfs_datadirs = hdfs_site.get("dfs.datanode.data.dir").split(",") if hdfs_site and "dfs.datanode.data.dir" in hdfs_site else []
-            if dn_hosts and collectorHostName in dn_hosts and ams_site and \
-              dfs_datadirs and len(preferred_mountpoints) > len(dfs_datadirs):
-              for dfs_datadir in dfs_datadirs:
-                dfs_datadir_mountpoint = getMountPointForDir(dfs_datadir, mountPoints)
-                if dfs_datadir_mountpoint == hbase_rootdir_mountpoint:
-                  item = self.getWarnItem("Consider not using {0} partition for storing metrics data. "
-                                          "{0} is already used by datanode to store HDFS data".format(hbase_rootdir_mountpoint))
-                  validationItems.extend([{"config-name": 'hbase.rootdir', "item": item}])
-                  break
-          # If no local DN in distributed mode
-          elif collectorHostName not in dn_hosts and distributed.lower() == "true":
-            item = self.getWarnItem("It's recommended to install Datanode component on {0} "
-                                    "to speed up IO operations between HDFS and Metrics "
-                                    "Collector in distributed mode ".format(collectorHostName))
-            validationItems.extend([{"config-name": "hbase.cluster.distributed", "item": item}])
-          # Short circuit read should be enabled in distibuted mode
-          # if local DN installed
-          else:
-            validationItems.extend([{"config-name": "dfs.client.read.shortcircuit", "item": self.validatorEqualsToRecommendedItem(properties, recommendedDefaults, "dfs.client.read.shortcircuit")}])
-
-    return self.toConfigurationValidationProblems(validationItems, "ams-hbase-site")
-
-  def validateStormConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
-    validationItems = []
-    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
-    # Storm AMS integration
-    if 'AMBARI_METRICS' in servicesList and "metrics.reporter.register" in properties and \
-      "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter" not in properties.get("metrics.reporter.register"):
-
-      validationItems.append({"config-name": 'metrics.reporter.register',
-                              "item": self.getWarnItem(
-                                "Should be set to org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter to report the metrics to Ambari Metrics service.")})
-
-    return self.toConfigurationValidationProblems(validationItems, "storm-site")
-
-  def validateAmsHbaseEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
-
-    ams_env = getSiteProperties(configurations, "ams-env")
-    amsHbaseSite = getSiteProperties(configurations, "ams-hbase-site")
-    validationItems = []
-    mb = 1024 * 1024
-    gb = 1024 * mb
-
-    regionServerItem = self.validatorLessThenDefaultValue(properties, recommendedDefaults, "hbase_regionserver_heapsize") ## FIXME if new service added
-    if regionServerItem:
-      validationItems.extend([{"config-name": "hbase_regionserver_heapsize", "item": regionServerItem}])
-
-    hbaseMasterHeapsizeItem = self.validatorLessThenDefaultValue(properties, recommendedDefaults, "hbase_master_heapsize")
-    if hbaseMasterHeapsizeItem:
-      validationItems.extend([{"config-name": "hbase_master_heapsize", "item": hbaseMasterHeapsizeItem}])
-
-    logDirItem = self.validatorEqualsPropertyItem(properties, "hbase_log_dir", ams_env, "metrics_collector_log_dir")
-    if logDirItem:
-      validationItems.extend([{"config-name": "hbase_log_dir", "item": logDirItem}])
-
-    collector_heapsize = to_number(ams_env.get("metrics_collector_heapsize"))
-    hbase_master_heapsize = to_number(properties["hbase_master_heapsize"])
-    hbase_master_xmn_size = to_number(properties["hbase_master_xmn_size"])
-    hbase_regionserver_heapsize = to_number(properties["hbase_regionserver_heapsize"])
-    hbase_regionserver_xmn_size = to_number(properties["regionserver_xmn_size"])
-
-    # Validate Xmn settings.
-    masterXmnItem = None
-    regionServerXmnItem = None
-    is_hbase_distributed = amsHbaseSite.get("hbase.cluster.distributed").lower() == 'true'
-
-    if is_hbase_distributed:
-      minMasterXmn = 0.12 * hbase_master_heapsize
-      maxMasterXmn = 0.2 * hbase_master_heapsize
-      if hbase_master_xmn_size < minMasterXmn:
-        masterXmnItem = self.getWarnItem("Value is lesser than the recommended minimum Xmn size of {0} "
-                                         "(12% of hbase_master_heapsize)".format(int(ceil(minMasterXmn))))
-
-      if hbase_master_xmn_size > maxMasterXmn:
-        masterXmnItem = self.getWarnItem("Value is greater than the recommended maximum Xmn size of {0} "
-                                         "(20% of hbase_master_heapsize)".format(int(floor(maxMasterXmn))))
-
-      minRegionServerXmn = 0.12 * hbase_regionserver_heapsize
-      maxRegionServerXmn = 0.2 * hbase_regionserver_heapsize
-      if hbase_regionserver_xmn_size < minRegionServerXmn:
-        regionServerXmnItem = self.getWarnItem("Value is lesser than the recommended minimum Xmn size of {0} "
-                                               "(12% of hbase_regionserver_heapsize)"
-                                               .format(int(ceil(minRegionServerXmn))))
-
-      if hbase_regionserver_xmn_size > maxRegionServerXmn:
-        regionServerXmnItem = self.getWarnItem("Value is greater than the recommended maximum Xmn size of {0} "
-                                               "(20% of hbase_regionserver_heapsize)"
-                                               .format(int(floor(maxRegionServerXmn))))
-    else:
-      minMasterXmn = 0.12 * (hbase_master_heapsize + hbase_regionserver_heapsize)
-      maxMasterXmn = 0.2 *  (hbase_master_heapsize + hbase_regionserver_heapsize)
-      if hbase_master_xmn_size < minMasterXmn:
-        masterXmnItem = self.getWarnItem("Value is lesser than the recommended minimum Xmn size of {0} "
-                                         "(12% of hbase_master_heapsize + hbase_regionserver_heapsize)"
-                                         .format(int(ceil(minMasterXmn))))
-
-      if hbase_master_xmn_size > maxMasterXmn:
-        masterXmnItem = self.getWarnItem("Value is greater than the recommended maximum Xmn size of {0} "
-                                         "(20% of hbase_master_heapsize + hbase_regionserver_heapsize)"
-                                         .format(int(floor(maxMasterXmn))))
-    if masterXmnItem:
-      validationItems.extend([{"config-name": "hbase_master_xmn_size", "item": masterXmnItem}])
-
-    if regionServerXmnItem:
-      validationItems.extend([{"config-name": "regionserver_xmn_size", "item": regionServerXmnItem}])
-
-    if hbaseMasterHeapsizeItem is None:
-      hostMasterComponents = {}
-
-      for service in services["services"]:
-        for component in service["components"]:
-          if component["StackServiceComponents"]["hostnames"] is not None:
-            for hostName in component["StackServiceComponents"]["hostnames"]:
-              if self.isMasterComponent(component):
-                if hostName not in hostMasterComponents.keys():
-                  hostMasterComponents[hostName] = []
-                hostMasterComponents[hostName].append(component["StackServiceComponents"]["component_name"])
-
-      amsCollectorHosts = self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")
-      for collectorHostName in amsCollectorHosts:
-        for host in hosts["items"]:
-          if host["Hosts"]["host_name"] == collectorHostName:
-            # AMS Collector co-hosted with other master components in bigger clusters
-            if len(hosts['items']) > 31 and \
-                            len(hostMasterComponents[collectorHostName]) > 2 and \
-                            host["Hosts"]["total_mem"] < 32*mb: # < 32Gb(total_mem in k)
-              masterHostMessage = "Host {0} is used by multiple master components ({1}). " \
-                                  "It is recommended to use a separate host for the " \
-                                  "Ambari Metrics Collector component and ensure " \
-                                  "the host has sufficient memory available."
-
-              hbaseMasterHeapsizeItem = self.getWarnItem(masterHostMessage.format(
-                  collectorHostName, str(", ".join(hostMasterComponents[collectorHostName]))))
-              if hbaseMasterHeapsizeItem:
-                validationItems.extend([{"config-name": "hbase_master_heapsize", "item": hbaseMasterHeapsizeItem}])
-
-            # Check for unused RAM on AMS Collector node
-            hostComponents = []
-            for service in services["services"]:
-              for component in service["components"]:
-                if component["StackServiceComponents"]["hostnames"] is not None:
-                  if collectorHostName in component["StackServiceComponents"]["hostnames"]:
-                    hostComponents.append(component["StackServiceComponents"]["component_name"])
-
-            requiredMemory = getMemorySizeRequired(hostComponents, configurations)
-            unusedMemory = host["Hosts"]["total_mem"] * 1024 - requiredMemory # in bytes
-            if unusedMemory > 4*gb:  # warn user, if more than 4GB RAM is unused
-              heapPropertyToIncrease = "hbase_regionserver_heapsize" if is_hbase_distributed else "hbase_master_heapsize"
-              xmnPropertyToIncrease = "regionserver_xmn_size" if is_hbase_distributed else "hbase_master_xmn_size"
-              recommended_collector_heapsize = int((unusedMemory - 4*gb)/5) + collector_heapsize*mb
-              recommended_hbase_heapsize = int((unusedMemory - 4*gb)*4/5) + to_number(properties.get(heapPropertyToIncrease))*mb
-              recommended_hbase_heapsize = min(32*gb, recommended_hbase_heapsize) #Make sure heapsize <= 32GB
-              recommended_xmn_size = round_to_n(0.12*recommended_hbase_heapsize/mb,128)
-
-              if collector_heapsize < recommended_collector_heapsize or \
-                  to_number(properties[heapPropertyToIncrease]) < recommended_hbase_heapsize:
-                collectorHeapsizeItem = self.getWarnItem("{0} MB RAM is unused on the host {1} based on components " \
-                                                         "assigned. Consider allocating  {2} MB to " \
-                                                         "metrics_collector_heapsize in ams-env, " \
-                                                         "{3} MB to {4} in ams-hbase-env"
-                                                         .format(unusedMemory/mb, collectorHostName,
-                                                                 recommended_collector_heapsize/mb,
-                                                                 recommended_hbase_heapsize/mb,
-                                                                 heapPropertyToIncrease))
-                validationItems.extend([{"config-name": heapPropertyToIncrease, "item": collectorHeapsizeItem}])
-
-              if to_number(properties[xmnPropertyToIncrease]) < recommended_hbase_heapsize:
-                xmnPropertyToIncreaseItem = self.getWarnItem("Consider allocating {0} MB to use up some unused memory "
-                                                             "on host".format(recommended_xmn_size))
-                validationItems.extend([{"config-name": xmnPropertyToIncrease, "item": xmnPropertyToIncreaseItem}])
-      pass
-
-    return self.toConfigurationValidationProblems(validationItems, "ams-hbase-env")
-
-
-  def getPreferredMountPoints(self, hostInfo):
-
-    # '/etc/resolv.conf', '/etc/hostname', '/etc/hosts' are docker specific mount points
-    undesirableMountPoints = ["/", "/home", "/etc/resolv.conf", "/etc/hosts",
-                              "/etc/hostname", "/tmp"]
-    undesirableFsTypes = ["devtmpfs", "tmpfs", "vboxsf", "CDFS"]
-    mountPoints = []
-    if hostInfo and "disk_info" in hostInfo:
-      mountPointsDict = {}
-      for mountpoint in hostInfo["disk_info"]:
-        if not (mountpoint["mountpoint"] in undesirableMountPoints or
-                mountpoint["mountpoint"].startswith(("/boot", "/mnt")) or
-                mountpoint["type"] in undesirableFsTypes or
-                mountpoint["available"] == str(0)):
-          mountPointsDict[mountpoint["mountpoint"]] = to_number(mountpoint["available"])
-      if mountPointsDict:
-        mountPoints = sorted(mountPointsDict, key=mountPointsDict.get, reverse=True)
-    mountPoints.append("/")
-    return mountPoints
-
-  def validatorNotRootFs(self, properties, recommendedDefaults, propertyName, hostInfo):
-    if not propertyName in properties:
-      return self.getErrorItem("Value should be set")
-    dir = properties[propertyName]
-    if not dir.startswith("file://") or dir == recommendedDefaults.get(propertyName):
-      return None
-
-    dir = re.sub("^file://", "", dir, count=1)
-    mountPoints = []
-    for mountPoint in hostInfo["disk_info"]:
-      mountPoints.append(mountPoint["mountpoint"])
-    mountPoint = getMountPointForDir(dir, mountPoints)
-
-    if "/" == mountPoint and self.getPreferredMountPoints(hostInfo)[0] != mountPoint:
-      return self.getWarnItem("It is not recommended to use root partition for {0}".format(propertyName))
-
-    return None
-
-  def validatorEnoughDiskSpace(self, properties, propertyName, hostInfo, reqiuredDiskSpace):
-    if not propertyName in properties:
-      return self.getErrorItem("Value should be set")
-    dir = properties[propertyName]
-    if not dir.startswith("file://"):
-      return None
-
-    dir = re.sub("^file://", "", dir, count=1)
-    mountPoints = {}
-    for mountPoint in hostInfo["disk_info"]:
-      mountPoints[mountPoint["mountpoint"]] = to_number(mountPoint["available"])
-    mountPoint = getMountPointForDir(dir, mountPoints.keys())
-
-    if not mountPoints:
-      return self.getErrorItem("No disk info found on host %s" % hostInfo["host_name"])
-
-    if mountPoints[mountPoint] < reqiuredDiskSpace:
-      msg = "Ambari Metrics disk space requirements not met. \n" \
-            "Recommended disk space for partition {0} is {1}G"
-      return self.getWarnItem(msg.format(mountPoint, reqiuredDiskSpace/1048576)) # in Gb
-    return None
-
-  def validatorLessThenDefaultValue(self, properties, recommendedDefaults, propertyName):
-    if propertyName not in recommendedDefaults:
-      # If a property name exists in say hbase-env and hbase-site (which is allowed), then it will exist in the
-      # "properties" dictionary, but not necessarily in the "recommendedDefaults" dictionary". In this case, ignore it.
-      return None
-
-    if not propertyName in properties:
-      return self.getErrorItem("Value should be set")
-    value = to_number(properties[propertyName])
-    if value is None:
-      return self.getErrorItem("Value should be integer")
-    defaultValue = to_number(recommendedDefaults[propertyName])
-    if defaultValue is None:
-      return None
-    if value < defaultValue:
-      return self.getWarnItem("Value is less than the recommended default of {0}".format(defaultValue))
-    return None
-
-  def validatorEqualsPropertyItem(self, properties1, propertyName1,
-                                  properties2, propertyName2,
-                                  emptyAllowed=False):
-    if not propertyName1 in properties1:
-      return self.getErrorItem("Value should be set for %s" % propertyName1)
-    if not propertyName2 in properties2:
-      return self.getErrorItem("Value should be set for %s" % propertyName2)
-    value1 = properties1.get(propertyName1)
-    if value1 is None and not emptyAllowed:
-      return self.getErrorItem("Empty value for %s" % propertyName1)
-    value2 = properties2.get(propertyName2)
-    if value2 is None and not emptyAllowed:
-      return self.getErrorItem("Empty value for %s" % propertyName2)
-    if value1 != value2:
-      return self.getWarnItem("It is recommended to set equal values "
-             "for properties {0} and {1}".format(propertyName1, propertyName2))
-
-    return None
-
-  def validatorEqualsToRecommendedItem(self, properties, recommendedDefaults,
-                                       propertyName):
-    if not propertyName in properties:
-      return self.getErrorItem("Value should be set for %s" % propertyName)
-    value = properties.get(propertyName)
-    if not propertyName in recommendedDefaults:
-      return self.getErrorItem("Value should be recommended for %s" % propertyName)
-    recommendedValue = recommendedDefaults.get(propertyName)
-    if value != recommendedValue:
-      return self.getWarnItem("It is recommended to set value {0} "
-             "for property {1}".format(recommendedValue, propertyName))
-    return None
-
-  def validateMinMemorySetting(self, properties, defaultValue, propertyName):
-    if not propertyName in properties:
-      return self.getErrorItem("Value should be set")
-    if defaultValue is None:
-      return self.getErrorItem("Config's default value can't be null or undefined")
-
-    value = properties[propertyName]
-    if value is None:
-      return self.getErrorItem("Value can't be null or undefined")
-    try:
-      valueInt = to_number(value)
-      # TODO: generify for other use cases
-      defaultValueInt = int(str(defaultValue).strip())
-      if valueInt < defaultValueInt:
-        return self.getWarnItem("Value is less than the minimum recommended default of -Xmx" + str(defaultValue))
-    except:
-      return None
-
-    return None
-
-  def validatorYarnQueue(self, properties, recommendedDefaults, propertyName, services):
-    if propertyName not in properties:
-      return self.getErrorItem("Value should be set")
-
-    capacity_scheduler_properties, _ = self.getCapacitySchedulerProperties(services)
-    leaf_queue_names = self.getAllYarnLeafQueues(capacity_scheduler_properties)
-    queue_name = properties[propertyName]
-
-    if len(leaf_queue_names) == 0:
-      return None
-    elif queue_name not in leaf_queue_names:
-      return self.getErrorItem("Queue is not exist or not corresponds to existing YARN leaf queue")
-
-    return None
-
-  def recommendYarnQueue(self, services, catalog_name=None, queue_property=None):
-    old_queue_name = None
-
-    if services and 'configurations' in services:
-        configurations = services["configurations"]
-        if catalog_name in configurations and queue_property in configurations[catalog_name]["properties"]:
-          old_queue_name = configurations[catalog_name]["properties"][queue_property]
-
-        capacity_scheduler_properties, _ = self.getCapacitySchedulerProperties(services)
-        leaf_queues = sorted(self.getAllYarnLeafQueues(capacity_scheduler_properties))
-
-        if leaf_queues and (old_queue_name is None or old_queue_name not in leaf_queues):
-          return leaf_queues.pop()
-        elif old_queue_name and old_queue_name in leaf_queues:
-          return None
-
-    return "default"
-
-  def validateXmxValue(self, properties, recommendedDefaults, propertyName):
-    if not propertyName in properties:
-      return self.getErrorItem("Value should be set")
-    value = properties[propertyName]
-    defaultValue = recommendedDefaults[propertyName]
-    if defaultValue is None:
-      return self.getErrorItem("Config's default value can't be null or undefined")
-    if not checkXmxValueFormat(value) and checkXmxValueFormat(defaultValue):
-      # Xmx is in the default-value but not the value, should be an error
-      return self.getErrorItem('Invalid value format')
-    if not checkXmxValueFormat(defaultValue):
-      # if default value does not contain Xmx, then there is no point in validating existing value
-      return None
-    valueInt = formatXmxSizeToBytes(getXmxSize(value))
-    defaultValueXmx = getXmxSize(defaultValue)
-    defaultValueInt = formatXmxSizeToBytes(defaultValueXmx)
-    if valueInt < defaultValueInt:
-      return self.getWarnItem("Value is less than the recommended default of -Xmx" + defaultValueXmx)
-    return None
-
-  def validateMapReduce2Configurations(self, properties, recommendedDefaults, configurations, services, hosts):
-    validationItems = [ {"config-name": 'mapreduce.map.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'mapreduce.map.java.opts')},
-                        {"config-name": 'mapreduce.reduce.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'mapreduce.reduce.java.opts')},
-                        {"config-name": 'mapreduce.task.io.sort.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.task.io.sort.mb')},
-                        {"config-name": 'mapreduce.map.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.map.memory.mb')},
-                        {"config-name": 'mapreduce.reduce.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.reduce.memory.mb')},
-                        {"config-name": 'yarn.app.mapreduce.am.resource.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.app.mapreduce.am.resource.mb')},
-                        {"config-name": 'yarn.app.mapreduce.am.command-opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'yarn.app.mapreduce.am.command-opts')},
-                        {"config-name": 'mapreduce.job.queuename', "item": self.validatorYarnQueue(properties, recommendedDefaults, 'mapreduce.job.queuename', services)} ]
-    return self.toConfigurationValidationProblems(validationItems, "mapred-site")
-
-  def validateYARNConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
-    clusterEnv = getSiteProperties(configurations, "cluster-env")
-    validationItems = [ {"config-name": 'yarn.nodemanager.resource.memory-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.nodemanager.resource.memory-mb')},
-                        {"config-name": 'yarn.scheduler.minimum-allocation-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.scheduler.minimum-allocation-mb')},
-                        {"config-name": 'yarn.nodemanager.linux-container-executor.group', "item": self.validatorEqualsPropertyItem(properties, "yarn.nodemanager.linux-container-executor.group", clusterEnv, "user_group")},
-                        {"config-name": 'yarn.scheduler.maximum-allocation-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.scheduler.maximum-allocation-mb')} ]
-    return self.toConfigurationValidationProblems(validationItems, "yarn-site")
-
-  def validateYARNEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
-    validationItems = [{"config-name": 'service_check.queue.name', "item": self.validatorYarnQueue(properties, recommendedDefaults, 'service_check.queue.name', services)} ]
-    return self.toConfigurationValidationProblems(validationItems, "yarn-env")
-
-  def validateHbaseEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
-    hbase_site = getSiteProperties(configurations, "hbase-site")
-    validationItems = [ {"config-name": 'hbase_regionserver_heapsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hbase_regionserver_heapsize')},
-                        {"config-name": 'hbase_master_heapsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hbase_master_heapsize')},
-                        {"config-name": "hbase_user", "item": self.validatorEqualsPropertyItem(properties, "hbase_user", hbase_site, "hbase.superuser")} ]
-    return self.toConfigurationValidationProblems(validationItems, "hbase-env")
-
-  def validateHDFSConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
-    clusterEnv = getSiteProperties(configurations, "cluster-env")
-    validationItems = [{"config-name": 'dfs.datanode.du.reserved', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'dfs.datanode.du.reserved')},
-                       {"config-name": 'dfs.datanode.data.dir', "item": self.validatorOneDataDirPerPartition(properties, 'dfs.datanode.data.dir', services, hosts, clusterEnv)}]
-    return self.toConfigurationValidationProblems(validationItems, "hdfs-site")
-
-  def validateHDFSConfigurationsEnv(self, properties, recommendedDefaults, configurations, services, hosts):
-    validationItems = [ {"config-name": 'namenode_heapsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_heapsize')},
-                        {"config-name": 'namenode_opt_newsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_opt_newsize')},
-                        {"config-name": 'namenode_opt_maxnewsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_opt_maxnewsize')}]
-    return self.toConfigurationValidationProblems(validationItems, "hadoop-env")
-
-  def validatorOneDataDirPerPartition(self, properties, propertyName, services, hosts, clusterEnv):
-    if not propertyName in properties:
-      return self.getErrorItem("Value should be set")
-    dirs = properties[propertyName]
-
-    if not (clusterEnv and "one_dir_per_partition" in clusterEnv and clusterEnv["one_dir_per_partition"].lower() == "true"):
-      return None
-
-    dataNodeHosts = self.getDataNodeHosts(services, hosts)
-
-    warnings = set()
-    for host in dataNodeHosts:
-      hostName = host["Hosts"]["host_name"]
-
-      mountPoints = []
-      for diskInfo in host["Hosts"]["disk_info"]:
-        mountPoints.append(diskInfo["mountpoint"])
-
-      if get_mounts_with_multiple_data_dirs(mountPoints, dirs):
-        # A detailed message can be too long on large clusters:
-        # warnings.append("Host: " + hostName + "; Mount: " + mountPoint + "; Data directories: " + ", ".join(dirList))
-        warnings.add(hostName)
-        break;
-
-    if len(warnings) > 0:
-      return self.getWarnItem("cluster-env/one_dir_per_partition is

<TRUNCATED>

[11/52] bigtop git commit: ODPI-5. Integrate Ambari packaging into Bigtop

Posted by rv...@apache.org.
ODPI-5. Integrate Ambari packaging into Bigtop

(cherry picked from commit 60fa93488643cd999a0c48dc53b06b41528c14f5)


Project: http://git-wip-us.apache.org/repos/asf/bigtop/repo
Commit: http://git-wip-us.apache.org/repos/asf/bigtop/commit/b1d707c2
Tree: http://git-wip-us.apache.org/repos/asf/bigtop/tree/b1d707c2
Diff: http://git-wip-us.apache.org/repos/asf/bigtop/diff/b1d707c2

Branch: refs/heads/master
Commit: b1d707c22a686bf103d23fa1bca1898a2238e779
Parents: 0aeea97
Author: Sergey Soldatov <se...@gmail.com>
Authored: Fri Jan 8 11:10:17 2016 -0800
Committer: Roman Shaposhnik <rv...@apache.org>
Committed: Tue Mar 21 23:08:07 2017 -0700

----------------------------------------------------------------------
 .../ODPi/1.0/blueprints/multinode-default.json  |  108 +
 .../ODPi/1.0/blueprints/singlenode-default.json |   65 +
 .../ODPi/1.0/configuration/cluster-env.xml      |  232 +++
 .../src/common/ambari/ODPi/1.0/hooks/.hash      |    1 +
 .../1.0/hooks/after-INSTALL/scripts/hook.py     |   37 +
 .../1.0/hooks/after-INSTALL/scripts/params.py   |  101 +
 .../scripts/shared_initialization.py            |  108 +
 .../hooks/before-ANY/files/changeToSecureUid.sh |   53 +
 .../ODPi/1.0/hooks/before-ANY/scripts/hook.py   |   36 +
 .../ODPi/1.0/hooks/before-ANY/scripts/params.py |  230 +++
 .../before-ANY/scripts/shared_initialization.py |  224 ++
 .../1.0/hooks/before-INSTALL/scripts/hook.py    |   37 +
 .../1.0/hooks/before-INSTALL/scripts/params.py  |  113 +
 .../scripts/repo_initialization.py              |   68 +
 .../scripts/shared_initialization.py            |   37 +
 .../1.0/hooks/before-RESTART/scripts/hook.py    |   29 +
 .../hooks/before-START/files/checkForFormat.sh  |   65 +
 .../before-START/files/fast-hdfs-resource.jar   |  Bin 0 -> 19285850 bytes
 .../before-START/files/task-log4j.properties    |  134 ++
 .../hooks/before-START/files/topology_script.py |   66 +
 .../ODPi/1.0/hooks/before-START/scripts/hook.py |   39 +
 .../1.0/hooks/before-START/scripts/params.py    |  318 +++
 .../before-START/scripts/rack_awareness.py      |   47 +
 .../scripts/shared_initialization.py            |  175 ++
 .../templates/commons-logging.properties.j2     |   43 +
 .../templates/exclude_hosts_list.j2             |   21 +
 .../templates/hadoop-metrics2.properties.j2     |  104 +
 .../before-START/templates/health_check.j2      |   81 +
 .../templates/include_hosts_list.j2             |   21 +
 .../templates/topology_mappings.data.j2         |   24 +
 .../src/common/ambari/ODPi/1.0/metainfo.xml     |   22 +
 .../ODPi/1.0/properties/stack_features.json     |  308 +++
 .../ambari/ODPi/1.0/properties/stack_tools.json |    4 +
 .../common/ambari/ODPi/1.0/repos/repoinfo.xml   |   33 +
 .../ambari/ODPi/1.0/role_command_order.json     |   41 +
 .../ambari/ODPi/1.0/services/HDFS/metainfo.xml  |   27 +
 .../ambari/ODPi/1.0/services/YARN/metainfo.xml  |   33 +
 .../ODPi/1.0/services/ZOOKEEPER/metainfo.xml    |   27 +
 .../ambari/ODPi/1.0/services/stack_advisor.py   | 1947 ++++++++++++++++++
 .../src/common/ambari/ambari-server.svc         |    0
 .../src/common/ambari/ambari.defaults           |    0
 .../src/common/ambari/do-component-build        |   21 +
 .../src/common/ambari/install_ambari.sh         |  157 ++
 .../src/deb/ambari/ambari-agent.install         |    8 +
 .../src/deb/ambari/ambari-agent.postinst        |   33 +
 .../src/deb/ambari/ambari-agent.postrm          |   24 +
 .../src/deb/ambari/ambari-agent.posttrm         |   15 +
 .../src/deb/ambari/ambari-agent.preinst         |   55 +
 .../src/deb/ambari/ambari-agent.prerm           |   35 +
 .../src/deb/ambari/ambari-client.install        |    2 +
 .../src/deb/ambari/ambari-server.install        |   10 +
 .../src/deb/ambari/ambari-server.postinst       |   27 +
 .../src/deb/ambari/ambari-server.postrm         |   15 +
 .../src/deb/ambari/ambari-server.posttrm        |   15 +
 .../src/deb/ambari/ambari-server.preinst        |   94 +
 .../src/deb/ambari/ambari-server.prerm          |   27 +
 bigtop-packages/src/deb/ambari/changelog        |    1 +
 bigtop-packages/src/deb/ambari/compat           |    1 +
 bigtop-packages/src/deb/ambari/control          |   37 +
 bigtop-packages/src/deb/ambari/copyright        |   15 +
 bigtop-packages/src/deb/ambari/rules            |   42 +
 bigtop-packages/src/deb/ambari/source/format    |    1 +
 .../src/deb/ambari/source/include-binaries      |    1 +
 bigtop-packages/src/rpm/ambari/RPMS/.gitignore  |    1 +
 bigtop-packages/src/rpm/ambari/SPECS/.gitignore |    3 +
 .../src/rpm/ambari/SPECS/ambari.spec            |  504 +++++
 66 files changed, 6203 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/blueprints/multinode-default.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/blueprints/multinode-default.json b/bigtop-packages/src/common/ambari/ODPi/1.0/blueprints/multinode-default.json
new file mode 100755
index 0000000..53248e4
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/blueprints/multinode-default.json
@@ -0,0 +1,108 @@
+{
+    "configurations" : [
+    ],
+    "host_groups" : [
+        {
+            "name" : "master_1",
+            "components" : [
+                {
+                    "name" : "NAMENODE"
+                },
+                {
+                    "name" : "ZOOKEEPER_SERVER"
+                },
+                {
+                    "name" : "HDFS_CLIENT"
+                },
+                {
+                    "name" : "YARN_CLIENT"
+                }
+            ],
+            "cardinality" : "1"
+        },
+        {
+            "name" : "master_2",
+            "components" : [
+
+                {
+                    "name" : "ZOOKEEPER_CLIENT"
+                },
+                {
+                    "name" : "HISTORYSERVER"
+                },
+                {
+                    "name" : "SECONDARY_NAMENODE"
+                },
+                {
+                    "name" : "HDFS_CLIENT"
+                },
+                {
+                    "name" : "YARN_CLIENT"
+                },
+                {
+                    "name" : "POSTGRESQL_SERVER"
+                }
+            ],
+            "cardinality" : "1"
+        },
+        {
+            "name" : "master_3",
+            "components" : [
+                {
+                    "name" : "RESOURCEMANAGER"
+                },
+                {
+                    "name" : "ZOOKEEPER_SERVER"
+                }
+            ],
+            "cardinality" : "1"
+        },
+        {
+            "name" : "master_4",
+            "components" : [
+                {
+                    "name" : "ZOOKEEPER_SERVER"
+                }
+            ],
+            "cardinality" : "1"
+        },
+        {
+            "name" : "slave",
+            "components" : [
+                {
+                    "name" : "NODEMANAGER"
+                },
+                {
+                    "name" : "DATANODE"
+                }
+            ],
+            "cardinality" : "${slavesCount}"
+        },
+        {
+            "name" : "gateway",
+            "components" : [
+                {
+                    "name" : "AMBARI_SERVER"
+                },
+                {
+                    "name" : "ZOOKEEPER_CLIENT"
+                },
+                {
+                    "name" : "HDFS_CLIENT"
+                },
+                {
+                    "name" : "YARN_CLIENT"
+                },
+                {
+                    "name" : "MAPREDUCE2_CLIENT"
+                }
+            ],
+            "cardinality" : "1"
+        }
+    ],
+    "Blueprints" : {
+        "blueprint_name" : "blueprint-multinode-default",
+        "stack_name" : "ODPi",
+        "stack_version" : "1.0"
+    }
+}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/blueprints/singlenode-default.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/blueprints/singlenode-default.json b/bigtop-packages/src/common/ambari/ODPi/1.0/blueprints/singlenode-default.json
new file mode 100755
index 0000000..6aeb516
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/blueprints/singlenode-default.json
@@ -0,0 +1,65 @@
+{
+    "configurations" : [
+    ],
+    "host_groups" : [
+        {
+            "name" : "host_group_1",
+            "components" : [
+                {
+                    "name" : "HISTORYSERVER"
+                },
+                {
+                    "name" : "NAMENODE"
+                },
+                {
+                    "name" : "SUPERVISOR"
+                },
+                {
+                    "name" : "AMBARI_SERVER"
+                },
+                {
+                    "name" : "APP_TIMELINE_SERVER"
+                },
+                {
+                    "name" : "HDFS_CLIENT"
+                },
+                {
+                    "name" : "NODEMANAGER"
+                },
+                {
+                    "name" : "DATANODE"
+                },
+                {
+                    "name" : "RESOURCEMANAGER"
+                },
+                {
+                    "name" : "ZOOKEEPER_SERVER"
+                },
+                {
+                    "name" : "ZOOKEEPER_CLIENT"
+                },
+                {
+                    "name" : "SECONDARY_NAMENODE"
+                },
+                {
+                    "name" : "YARN_CLIENT"
+                },
+                {
+                    "name" : "MAPREDUCE2_CLIENT"
+                },
+                {
+                    "name" : "POSTGRESQL_SERVER"
+                },
+                {
+                    "name" : "DRPC_SERVER"
+                }
+            ],
+            "cardinality" : "1"
+        }
+    ],
+    "Blueprints" : {
+        "blueprint_name" : "blueprint-singlenode-default",
+        "stack_name" : "ODPi",
+        "stack_version" : "1.0"
+    }
+}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/configuration/cluster-env.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/configuration/cluster-env.xml
new file mode 100755
index 0000000..81cb175
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/configuration/cluster-env.xml
@@ -0,0 +1,232 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>recovery_enabled</name>
+    <value>true</value>
+    <description>Auto start enabled or not for this cluster.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>recovery_type</name>
+    <value>AUTO_START</value>
+    <description>Auto start type.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>recovery_lifetime_max_count</name>
+    <value>1024</value>
+    <description>Auto start lifetime maximum count of recovery attempt allowed per host component. This is reset when agent is restarted.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>recovery_max_count</name>
+    <value>6</value>
+    <description>Auto start maximum count of recovery attempt allowed per host component in a window. This is reset when agent is restarted.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>recovery_window_in_minutes</name>
+    <value>60</value>
+    <description>Auto start recovery window size in minutes.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>recovery_retry_interval</name>
+    <value>5</value>
+    <description>Auto start recovery retry gap between tries per host component.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>security_enabled</name>
+    <value>false</value>
+    <description>Hadoop Security</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ignore_groupsusers_create</name>
+    <display-name>Skip group modifications during install</display-name>
+    <value>false</value>
+    <property-type>ADDITIONAL_USER_PROPERTY</property-type>
+    <description>Whether to ignore failures on users and group creation</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>smokeuser</name>
+    <display-name>Smoke User</display-name>
+    <value>ambari-qa</value>
+    <property-type>USER</property-type>
+    <description>User executing service checks</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>smokeuser_keytab</name>
+    <value>/etc/security/keytabs/smokeuser.headless.keytab</value>
+    <description>Path to smoke test user keytab file</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>user_group</name>
+    <display-name>Hadoop Group</display-name>
+    <value>hadoop</value>
+    <property-type>GROUP</property-type>
+    <description>Hadoop user group.</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>repo_suse_rhel_template</name>
+    <value>[{{repo_id}}]
+name={{repo_id}}
+{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}
+
+path=/
+enabled=1
+gpgcheck=0</value>
+    <description>Template of repositories for rhel and suse.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>repo_ubuntu_template</name>
+    <value>{{package_type}} {{base_url}} {{components}}</value>
+    <description>Template of repositories for ubuntu.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>override_uid</name>
+    <value>true</value>
+    <property-type>ADDITIONAL_USER_PROPERTY</property-type>
+    <display-name>Have Ambari manage UIDs</display-name>
+    <description>Have Ambari manage UIDs</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>fetch_nonlocal_groups</name>
+    <value>true</value>
+    <display-name>Ambari fetch nonlocal groups</display-name>
+    <description>Ambari requires fetching all the groups. This can be slow
+        on envs with enabled ldap. Setting this option to false will enable Ambari,
+        to skip user/group management connected with ldap groups.</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>managed_hdfs_resource_property_names</name>
+    <value/>
+    <description>Comma separated list of property names with HDFS resource paths.
+        Resource from this list will be managed even if it is marked as not managed in the stack</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- Define stack_tools property in the base stack. DO NOT override this property for each stack version -->
+  <property>
+    <name>stack_tools</name>
+    <value/>
+    <description>Stack specific tools</description>
+    <property-type>VALUE_FROM_PROPERTY_FILE</property-type>
+    <value-attributes>
+      <property-file-name>stack_tools.json</property-file-name>
+      <property-file-type>json</property-file-type>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- Define stack_features property in the base stack. DO NOT override this property for each stack version -->
+  <property>
+    <name>stack_features</name>
+    <value/>
+    <description>List of features supported by the stack</description>
+    <property-type>VALUE_FROM_PROPERTY_FILE</property-type>
+    <value-attributes>
+      <property-file-name>stack_features.json</property-file-name>
+      <property-file-type>json</property-file-type>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>stack_root</name>
+    <value>/usr/hdp</value>
+    <description>Stack root folder</description>
+    <value-attributes>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>alerts_repeat_tolerance</name>
+    <value>1</value>
+    <description>The number of consecutive alerts required to transition an alert from the SOFT to the HARD state.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ignore_bad_mounts</name>
+    <value>false</value>
+    <description>For properties handled by handle_mounted_dirs this will make Ambari not to create any directories.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>manage_dirs_on_root</name>
+    <value>true</value>
+    <description>For properties handled by handle_mounted_dirs this will make Ambari to manage (create and set permissions) unknown directories on / partition</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>one_dir_per_partition</name>
+    <value>false</value>
+    <description>For properties handled by handle_mounted_dirs this will make Ambari </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/.hash
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/.hash b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/.hash
new file mode 100755
index 0000000..f8c8c1f
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/.hash
@@ -0,0 +1 @@
+18a52d08dc963523592f7f1f2997089b6655de71
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/hook.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/hook.py
new file mode 100755
index 0000000..8a583b3
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/hook.py
@@ -0,0 +1,37 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script.hook import Hook
+from shared_initialization import link_configs
+from shared_initialization import setup_config
+from shared_initialization import setup_stack_symlinks
+
+class AfterInstallHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    env.set_params(params)
+    setup_stack_symlinks()
+    setup_config()
+
+    link_configs(self.stroutfile)
+
+if __name__ == "__main__":
+  AfterInstallHook().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/params.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/params.py
new file mode 100755
index 0000000..819d8f7
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/params.py
@@ -0,0 +1,101 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions.version import format_stack_version
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+dfs_type = default("/commandParams/dfs_type", "")
+
+is_parallel_execution_enabled = int(default("/agentConfigParams/agent/parallel_execution", 0)) == 1
+
+sudo = AMBARI_SUDO_BINARY
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+# current host stack version
+current_version = default("/hostLevelParams/current_version", None)
+
+# default hadoop params
+mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
+hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
+
+# HDP 2.2+ params
+if Script.is_stack_greater_or_equal("2.2"):
+  mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
+
+  # not supported in HDP 2.2+
+  hadoop_conf_empty_dir = None
+
+versioned_stack_root = '/usr/hdp/current'
+
+#security params
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+#java params
+java_home = config['hostLevelParams']['java_home']
+
+#hadoop params
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
+
+jsvc_path = "/usr/lib/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
+namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
+
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize =  "1024m"
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+
+#users and groups
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+user_group = config['configurations']['cluster-env']['user_group']
+
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+has_namenode = not len(namenode_host) == 0
+
+if has_namenode or dfs_type == 'HCFS':
+  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+
+link_configs_lock_file = os.path.join(tmp_dir, "link_configs_lock_file")
+stack_select_lock_file = os.path.join(tmp_dir, "stack_select_lock_file")
+
+upgrade_suspended = default("/roleParams/upgrade_suspended", False)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/shared_initialization.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/shared_initialization.py
new file mode 100755
index 0000000..9982dc6
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/shared_initialization.py
@@ -0,0 +1,108 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+
+import ambari_simplejson as json
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.libraries.functions.fcntl_based_process_lock import FcntlBasedProcessLock
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.script import Script
+
+
+def setup_stack_symlinks():
+  """
+  Invokes <stack-selector-tool> set all against a calculated fully-qualified, "normalized" version based on a
+  stack version, such as "2.3". This should always be called after a component has been
+  installed to ensure that all HDP pointers are correct. The stack upgrade logic does not
+  interact with this since it's done via a custom command and will not trigger this hook.
+  :return:
+  """
+  import params
+  if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0:
+    # try using the exact version first, falling back in just the stack if it's not defined
+    # which would only be during an intial cluster installation
+    version = params.current_version if params.current_version is not None else params.stack_version_unformatted
+
+    if not params.upgrade_suspended:
+      # On parallel command execution this should be executed by a single process at a time.
+      with FcntlBasedProcessLock(params.stack_select_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
+        stack_select.select_all(version)
+
+def setup_config():
+  import params
+  stackversion = params.stack_version_unformatted
+  Logger.info("FS Type: {0}".format(params.dfs_type))
+
+  is_hadoop_conf_dir_present = False
+  if hasattr(params, "hadoop_conf_dir") and params.hadoop_conf_dir is not None and os.path.exists(params.hadoop_conf_dir):
+    is_hadoop_conf_dir_present = True
+  else:
+    Logger.warning("Parameter hadoop_conf_dir is missing or directory does not exist. This is expected if this host does not have any Hadoop components.")
+
+  if is_hadoop_conf_dir_present and (params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS'):
+    # create core-site only if the hadoop config diretory exists
+    XmlConfig("core-site.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['core-site'],
+              configuration_attributes=params.config['configuration_attributes']['core-site'],
+              owner=params.hdfs_user,
+              group=params.user_group,
+              only_if=format("ls {hadoop_conf_dir}"))
+
+
+def load_version(struct_out_file):
+  """
+  Load version from file.  Made a separate method for testing
+  """
+  json_version = None
+  try:
+    if os.path.exists(struct_out_file):
+      with open(struct_out_file, 'r') as fp:
+        json_info = json.load(fp)
+        json_version = json_info['version']
+  except:
+    pass
+
+  return json_version
+  
+
+def link_configs(struct_out_file):
+  """
+  Links configs, only on a fresh install of HDP-2.3 and higher
+  """
+  import params
+
+  if not Script.is_stack_greater_or_equal("2.3"):
+    Logger.info("Can only link configs for HDP-2.3 and higher.")
+    return
+
+  json_version = load_version(struct_out_file)
+
+  if not json_version:
+    Logger.info("Could not load 'version' from {0}".format(struct_out_file))
+    return
+
+  # On parallel command execution this should be executed by a single process at a time.
+  with FcntlBasedProcessLock(params.link_configs_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
+    for k, v in conf_select.get_package_dirs().iteritems():
+      conf_select.convert_conf_directories_to_symlinks(k, json_version, v)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/files/changeToSecureUid.sh
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/files/changeToSecureUid.sh b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/files/changeToSecureUid.sh
new file mode 100755
index 0000000..08542c4
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/files/changeToSecureUid.sh
@@ -0,0 +1,53 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+username=$1
+directories=$2
+
+function find_available_uid() {
+ for ((i=1001; i<=2000; i++))
+ do
+   grep -q $i /etc/passwd
+   if [ "$?" -ne 0 ]
+   then
+    newUid=$i
+    break
+   fi
+ done
+}
+
+find_available_uid
+
+if [ $newUid -eq 0 ]
+then
+  echo "Failed to find Uid between 1000 and 2000"
+  exit 1
+fi
+
+set -e
+
+dir_array=($(echo $directories | sed 's/,/\n/g'))
+old_uid=$(id -u $username)
+sudo_prefix="/var/lib/ambari-agent/ambari-sudo.sh -H -E"
+echo "Changing uid of $username from $old_uid to $newUid"
+echo "Changing directory permisions for ${dir_array[@]}"
+$sudo_prefix usermod -u $newUid $username && for dir in ${dir_array[@]} ; do ls $dir 2> /dev/null && echo "Changing permission for $dir" && $sudo_prefix chown -Rh $newUid $dir ; done
+exit 0

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/hook.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/hook.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/hook.py
new file mode 100755
index 0000000..c34be0b
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/hook.py
@@ -0,0 +1,36 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from shared_initialization import *
+
+class BeforeAnyHook(Hook):
+
+  def hook(self, env):
+    import params
+    env.set_params(params)
+
+    setup_users()
+    if params.has_namenode or params.dfs_type == 'HCFS':
+      setup_hadoop_env()
+    setup_java()
+
+if __name__ == "__main__":
+  BeforeAnyHook().execute()
+

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/params.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/params.py
new file mode 100755
index 0000000..5544085
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/params.py
@@ -0,0 +1,230 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import collections
+import re
+import os
+
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.libraries.functions.expect import expect
+from ambari_commons.os_check import OSCheck
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+dfs_type = default("/commandParams/dfs_type", "")
+
+artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
+jdk_name = default("/hostLevelParams/jdk_name", None)
+java_home = config['hostLevelParams']['java_home']
+java_version = expect("/hostLevelParams/java_version", int)
+jdk_location = config['hostLevelParams']['jdk_location']
+
+sudo = AMBARI_SUDO_BINARY
+
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+restart_type = default("/commandParams/restart_type", "")
+version = default("/commandParams/version", None)
+# Handle upgrade and downgrade
+if (restart_type.lower() == "rolling_upgrade" or restart_type.lower() == "nonrolling_upgrade") and version:
+  stack_version_formatted = format_stack_version(version)
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+
+# Some datanode settings
+dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
+dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)
+dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None)
+dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
+secure_dn_ports_are_in_use = False
+
+def get_port(address):
+  """
+  Extracts port from the address like 0.0.0.0:1019
+  """
+  if address is None:
+    return None
+  m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
+  if m is not None:
+    return int(m.group(2))
+  else:
+    return None
+
+def is_secure_port(port):
+  """
+  Returns True if port is root-owned at *nix systems
+  """
+  if port is not None:
+    return port < 1024
+  else:
+    return False
+
+# hadoop default params
+mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+
+# upgrades would cause these directories to have a version instead of "current"
+# which would cause a lot of problems when writing out hadoop-env.sh; instead
+# force the use of "current" in the hook
+hdfs_user_nofile_limit = default("/configurations/hadoop-env/hdfs_user_nofile_limit", "128000")
+hadoop_home = stack_select.get_hadoop_dir("home", force_latest_on_upgrade=True)
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec", force_latest_on_upgrade=True)
+
+hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
+hadoop_secure_dn_user = hdfs_user
+hadoop_dir = "/etc/hadoop"
+versioned_stack_root = '/usr/hdp/current'
+hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
+datanode_max_locked_memory = config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory']
+is_datanode_max_locked_memory_set = not is_empty(config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory'])
+
+# HDP 2.2+ params
+if Script.is_stack_greater_or_equal("2.2"):
+  mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
+
+  # not supported in HDP 2.2+
+  hadoop_conf_empty_dir = None
+
+  if not security_enabled:
+    hadoop_secure_dn_user = '""'
+  else:
+    dfs_dn_port = get_port(dfs_dn_addr)
+    dfs_dn_http_port = get_port(dfs_dn_http_addr)
+    dfs_dn_https_port = get_port(dfs_dn_https_addr)
+    # We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
+    if dfs_http_policy == "HTTPS_ONLY":
+      secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_https_port)
+    elif dfs_http_policy == "HTTP_AND_HTTPS":
+      secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port) or is_secure_port(dfs_dn_https_port)
+    else:   # params.dfs_http_policy == "HTTP_ONLY" or not defined:
+      secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port)
+    if secure_dn_ports_are_in_use:
+      hadoop_secure_dn_user = hdfs_user
+    else:
+      hadoop_secure_dn_user = '""'
+
+#hadoop params
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
+
+jsvc_path = "/usr/lib/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
+namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
+
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize =  "1024m"
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+nfsgateway_heapsize = config['configurations']['hadoop-env']['nfsgateway_heapsize']
+mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
+
+#users and groups
+hbase_user = config['configurations']['hbase-env']['hbase_user']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
+gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
+gmond_user = config['configurations']['ganglia-env']["gmond_user"]
+tez_user = config['configurations']['tez-env']["tez_user"]
+oozie_user = config['configurations']['oozie-env']["oozie_user"]
+falcon_user = config['configurations']['falcon-env']["falcon_user"]
+ranger_user = config['configurations']['ranger-env']["ranger_user"]
+
+user_group = config['configurations']['cluster-env']['user_group']
+
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+falcon_server_hosts = default("/clusterHostInfo/falcon_server_hosts", [])
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+
+has_namenode = not len(namenode_host) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_tez = 'tez-site' in config['configurations']
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_oozie_server = not len(oozie_servers) == 0
+has_falcon_server_hosts = not len(falcon_server_hosts) == 0
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+
+if has_namenode or dfs_type == 'HCFS':
+  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+
+hbase_tmp_dir = "/tmp/hbase-hbase"
+
+proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
+ranger_group = config['configurations']['ranger-env']['ranger_group']
+dfs_cluster_administrators_group = config['configurations']['hdfs-site']["dfs.cluster.administrators"]
+
+ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
+fetch_nonlocal_groups = config['configurations']['cluster-env']["fetch_nonlocal_groups"]
+
+smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
+if has_hbase_masters:
+  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
+#repo params
+repo_info = config['hostLevelParams']['repo_info']
+service_repo_info = default("/hostLevelParams/service_repo_info",None)
+
+user_to_groups_dict = collections.defaultdict(lambda:[user_group])
+user_to_groups_dict[smoke_user] = [proxyuser_group]
+if has_ganglia_server:
+  user_to_groups_dict[gmond_user] = [gmond_user]
+  user_to_groups_dict[gmetad_user] = [gmetad_user]
+if has_tez:
+  user_to_groups_dict[tez_user] = [proxyuser_group]
+if has_oozie_server:
+  user_to_groups_dict[oozie_user] = [proxyuser_group]
+if has_falcon_server_hosts:
+  user_to_groups_dict[falcon_user] = [proxyuser_group]
+if has_ranger_admin:
+  user_to_groups_dict[ranger_user] = [ranger_group]
+
+user_to_gid_dict = collections.defaultdict(lambda:user_group)
+
+user_list = json.loads(config['hostLevelParams']['user_list'])
+group_list = json.loads(config['hostLevelParams']['group_list'])
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+tez_am_view_acls = config['configurations']['tez-site']["tez.am.view-acls"]
+override_uid = str(default("/configurations/cluster-env/override_uid", "true")).lower()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/shared_initialization.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/shared_initialization.py
new file mode 100755
index 0000000..1a7d21a
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/shared_initialization.py
@@ -0,0 +1,224 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import re
+import getpass
+import tempfile
+from copy import copy
+from resource_management.libraries.functions.version import compare_versions
+from resource_management import *
+
+def setup_users():
+  """
+  Creates users before cluster installation
+  """
+  import params
+  should_create_users_and_groups = not params.host_sys_prepped and not params.ignore_groupsusers_create
+
+  if should_create_users_and_groups:
+    for group in params.group_list:
+      Group(group,
+      )
+
+    for user in params.user_list:
+      User(user,
+          gid = params.user_to_gid_dict[user],
+          groups = params.user_to_groups_dict[user],
+          fetch_nonlocal_groups = params.fetch_nonlocal_groups
+      )
+
+    if params.override_uid == "true":
+      set_uid(params.smoke_user, params.smoke_user_dirs)
+    else:
+      Logger.info('Skipping setting uid for smoke user as host is sys prepped')
+  else:
+    Logger.info('Skipping creation of User and Group as host is sys prepped or ignore_groupsusers_create flag is on')
+    pass
+
+
+  if params.has_hbase_masters:
+    Directory (params.hbase_tmp_dir,
+               owner = params.hbase_user,
+               mode=0775,
+               create_parents = True,
+               cd_access="a",
+    )
+    if not params.host_sys_prepped and params.override_uid == "true":
+      set_uid(params.hbase_user, params.hbase_user_dirs)
+    else:
+      Logger.info('Skipping setting uid for hbase user as host is sys prepped')      
+      pass
+
+  if not params.host_sys_prepped:
+    if params.has_namenode:
+      if should_create_users_and_groups:
+        create_dfs_cluster_admins()
+    if params.has_tez and params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.3') >= 0:
+      if should_create_users_and_groups:
+        create_tez_am_view_acls()
+  else:
+    Logger.info('Skipping setting dfs cluster admin and tez view acls as host is sys prepped')
+
+def create_dfs_cluster_admins():
+  """
+  dfs.cluster.administrators support format <comma-delimited list of usernames><space><comma-delimited list of group names>
+  """
+  import params
+
+  groups_list = create_users_and_groups(params.dfs_cluster_administrators_group)
+
+  User(params.hdfs_user,
+    groups = params.user_to_groups_dict[params.hdfs_user] + groups_list,
+          fetch_nonlocal_groups = params.fetch_nonlocal_groups
+  )
+
+def create_tez_am_view_acls():
+
+  """
+  tez.am.view-acls support format <comma-delimited list of usernames><space><comma-delimited list of group names>
+  """
+  import params
+
+  if not params.tez_am_view_acls.startswith("*"):
+    create_users_and_groups(params.tez_am_view_acls)
+
+def create_users_and_groups(user_and_groups):
+
+  import params
+
+  parts = re.split('\s', user_and_groups)
+  if len(parts) == 1:
+    parts.append("")
+
+  users_list = parts[0].split(",") if parts[0] else []
+  groups_list = parts[1].split(",") if parts[1] else []
+
+  if users_list:
+    User(users_list,
+          fetch_nonlocal_groups = params.fetch_nonlocal_groups
+    )
+
+  if groups_list:
+    Group(copy(groups_list),
+    )
+  return groups_list
+    
+def set_uid(user, user_dirs):
+  """
+  user_dirs - comma separated directories
+  """
+  import params
+
+  File(format("{tmp_dir}/changeUid.sh"),
+       content=StaticFile("changeToSecureUid.sh"),
+       mode=0555)
+  ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
+  Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs}"),
+          not_if = format("(test $(id -u {user}) -gt 1000) || ({ignore_groupsusers_create_str})"))
+    
+def setup_hadoop_env():
+  import params
+  stackversion = params.stack_version_unformatted
+  Logger.info("FS Type: {0}".format(params.dfs_type))
+  if params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS':
+    if params.security_enabled:
+      tc_owner = "root"
+    else:
+      tc_owner = params.hdfs_user
+
+    # create /etc/hadoop
+    Directory(params.hadoop_dir, mode=0755)
+
+    # HDP < 2.2 used a conf -> conf.empty symlink for /etc/hadoop/
+    if Script.is_stack_less_than("2.2"):
+      Directory(params.hadoop_conf_empty_dir, create_parents = True, owner="root",
+        group=params.user_group )
+
+      Link(params.hadoop_conf_dir, to=params.hadoop_conf_empty_dir,
+         not_if=format("ls {hadoop_conf_dir}"))
+
+    # write out hadoop-env.sh, but only if the directory exists
+    if os.path.exists(params.hadoop_conf_dir):
+      File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'), owner=tc_owner,
+        group=params.user_group,
+        content=InlineTemplate(params.hadoop_env_sh_template))
+
+    # Create tmp dir for java.io.tmpdir
+    # Handle a situation when /tmp is set to noexec
+    Directory(params.hadoop_java_io_tmpdir,
+              owner=params.hdfs_user,
+              group=params.user_group,
+              mode=01777
+    )
+
+def setup_java():
+  """
+  Installs jdk using specific params, that comes from ambari-server
+  """
+  import params
+
+  java_exec = format("{java_home}/bin/java")
+
+  if not os.path.isfile(java_exec):
+    if not params.jdk_name: # if custom jdk is used.
+      raise Fail(format("Unable to access {java_exec}. Confirm you have copied jdk to this host."))
+
+    jdk_curl_target = format("{tmp_dir}/{jdk_name}")
+    java_dir = os.path.dirname(params.java_home)
+
+    Directory(params.artifact_dir,
+              create_parents = True,
+              )
+
+    File(jdk_curl_target,
+         content = DownloadSource(format("{jdk_location}/{jdk_name}")),
+         not_if = format("test -f {jdk_curl_target}")
+    )
+
+    tmp_java_dir = tempfile.mkdtemp(prefix="jdk_tmp_", dir=params.tmp_dir)
+
+    try:
+      if params.jdk_name.endswith(".bin"):
+        chmod_cmd = ("chmod", "+x", jdk_curl_target)
+        install_cmd = format("cd {tmp_java_dir} && echo A | {jdk_curl_target} -noregister && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
+      elif params.jdk_name.endswith(".gz"):
+        chmod_cmd = ("chmod","a+x", java_dir)
+        install_cmd = format("cd {tmp_java_dir} && tar -xf {jdk_curl_target} && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
+
+      Directory(java_dir
+      )
+
+      Execute(chmod_cmd,
+              sudo = True,
+              )
+
+      Execute(install_cmd,
+              )
+
+    finally:
+      Directory(tmp_java_dir, action="delete")
+
+    File(format("{java_home}/bin/java"),
+         mode=0755,
+         cd_access="a",
+         )
+    Execute(('chmod', '-R', '755', params.java_home),
+      sudo = True,
+    )

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/hook.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/hook.py
new file mode 100755
index 0000000..ce17776
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/hook.py
@@ -0,0 +1,37 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from shared_initialization import *
+from repo_initialization import *
+
+class BeforeInstallHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    self.run_custom_hook('before-ANY')
+    env.set_params(params)
+    
+    install_repos()
+    install_packages()
+
+if __name__ == "__main__":
+  BeforeInstallHook().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/params.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/params.py
new file mode 100755
index 0000000..6193c11
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/params.py
@@ -0,0 +1,113 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
+from resource_management.core.system import System
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import default, format
+from resource_management.libraries.functions.expect import expect
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+sudo = AMBARI_SUDO_BINARY
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
+agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+#users and groups
+hbase_user = config['configurations']['hbase-env']['hbase_user']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
+gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
+gmond_user = config['configurations']['ganglia-env']["gmond_user"]
+tez_user = config['configurations']['tez-env']["tez_user"]
+
+user_group = config['configurations']['cluster-env']['user_group']
+proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
+
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+
+# repo templates
+repo_rhel_suse =  config['configurations']['cluster-env']['repo_suse_rhel_template']
+repo_ubuntu =  config['configurations']['cluster-env']['repo_ubuntu_template']
+
+#hosts
+hostname = config["hostname"]
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+storm_server_hosts = default("/clusterHostInfo/nimbus_hosts", [])
+falcon_host =  default('/clusterHostInfo/falcon_server_hosts', [])
+
+has_sqoop_client = 'sqoop-env' in config['configurations']
+has_namenode = not len(namenode_host) == 0
+has_hs = not len(hs_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_slaves = not len(slave_hosts) == 0
+has_oozie_server = not len(oozie_servers)  == 0
+has_hcat_server_host = not len(hcat_server_hosts)  == 0
+has_hive_server_host = not len(hive_server_host)  == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_zk_host = not len(zk_hosts) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_storm_server = not len(storm_server_hosts) == 0
+has_falcon_server = not len(falcon_host) == 0
+has_tez = 'tez-site' in config['configurations']
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+
+hbase_tmp_dir = "/tmp/hbase-hbase"
+
+#security params
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+#java params
+java_home = config['hostLevelParams']['java_home']
+artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
+jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already installed by user
+jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
+jce_location = config['hostLevelParams']['jdk_location']
+jdk_location = config['hostLevelParams']['jdk_location']
+ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
+if has_hbase_masters:
+  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
+#repo params
+repo_info = config['hostLevelParams']['repo_info']
+service_repo_info = default("/hostLevelParams/service_repo_info",None)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/repo_initialization.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/repo_initialization.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/repo_initialization.py
new file mode 100755
index 0000000..a35dce7
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/repo_initialization.py
@@ -0,0 +1,68 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from ambari_commons.os_check import OSCheck
+from resource_management.libraries.resources.repository import Repository
+from resource_management.core.logger import Logger
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+
+# components_lits = repoName + postfix
+_UBUNTU_REPO_COMPONENTS_POSTFIX = ["main"]
+
+def _alter_repo(action, repo_string, repo_template):
+  """
+  @param action: "delete" or "create"
+  @param repo_string: e.g. "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]"
+  """
+  repo_dicts = json.loads(repo_string)
+
+  if not isinstance(repo_dicts, list):
+    repo_dicts = [repo_dicts]
+
+  if 0 == len(repo_dicts):
+    Logger.info("Repository list is empty. Ambari may not be managing the repositories.")
+  else:
+    Logger.info("Initializing {0} repositories".format(str(len(repo_dicts))))
+
+  for repo in repo_dicts:
+    if not 'baseUrl' in repo:
+      repo['baseUrl'] = None
+    if not 'mirrorsList' in repo:
+      repo['mirrorsList'] = None
+    
+    ubuntu_components = [ repo['repoName'] ] + _UBUNTU_REPO_COMPONENTS_POSTFIX
+    
+    Repository(repo['repoId'],
+               action = action,
+               base_url = repo['baseUrl'],
+               mirror_list = repo['mirrorsList'],
+               repo_file_name = repo['repoName'],
+               repo_template = repo_template,
+               components = ubuntu_components, # ubuntu specific
+    )
+
+def install_repos():
+  import params
+  if params.host_sys_prepped:
+    return
+
+  template = params.repo_rhel_suse if OSCheck.is_suse_family() or OSCheck.is_redhat_family() else params.repo_ubuntu
+  _alter_repo("create", params.repo_info, template)
+  if params.service_repo_info:
+    _alter_repo("create", params.service_repo_info, template)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/shared_initialization.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/shared_initialization.py
new file mode 100755
index 0000000..1609050
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/shared_initialization.py
@@ -0,0 +1,37 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management.libraries.functions import stack_tools
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.core.resources.packaging import Package
+
+def install_packages():
+  import params
+  if params.host_sys_prepped:
+    return
+
+  packages = ['unzip', 'curl']
+  if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0:
+    stack_selector_package = stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME)
+    packages.append(stack_selector_package)
+  Package(packages,
+          retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+          retry_count=params.agent_stack_retry_count)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-RESTART/scripts/hook.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-RESTART/scripts/hook.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-RESTART/scripts/hook.py
new file mode 100755
index 0000000..14b9d99
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-RESTART/scripts/hook.py
@@ -0,0 +1,29 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+class BeforeRestartHook(Hook):
+
+  def hook(self, env):
+    self.run_custom_hook('before-START')
+
+if __name__ == "__main__":
+  BeforeRestartHook().execute()
+

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/checkForFormat.sh b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/checkForFormat.sh
new file mode 100755
index 0000000..68aa96d
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/checkForFormat.sh
@@ -0,0 +1,65 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export hdfs_user=$1
+shift
+export conf_dir=$1
+shift
+export bin_dir=$1
+shift
+export mark_dir=$1
+shift
+export name_dirs=$*
+
+export EXIT_CODE=0
+export command="namenode -format"
+export list_of_non_empty_dirs=""
+
+mark_file=/var/run/hadoop/hdfs/namenode-formatted
+if [[ -f ${mark_file} ]] ; then
+  /var/lib/ambari-agent/ambari-sudo.sh rm -f ${mark_file}
+  /var/lib/ambari-agent/ambari-sudo.sh mkdir -p ${mark_dir}
+fi
+
+if [[ ! -d $mark_dir ]] ; then
+  for dir in `echo $name_dirs | tr ',' ' '` ; do
+    echo "NameNode Dirname = $dir"
+    cmd="ls $dir | wc -l  | grep -q ^0$"
+    eval $cmd
+    if [[ $? -ne 0 ]] ; then
+      (( EXIT_CODE = $EXIT_CODE + 1 ))
+      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
+    fi
+  done
+
+  if [[ $EXIT_CODE == 0 ]] ; then
+    /var/lib/ambari-agent/ambari-sudo.sh su ${hdfs_user} - -s /bin/bash -c "export PATH=$PATH:$bin_dir ; yes Y | hdfs --config ${conf_dir} ${command}"
+    (( EXIT_CODE = $EXIT_CODE | $? ))
+  else
+    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
+  fi
+else
+  echo "${mark_dir} exists. Namenode DFS already formatted"
+fi
+
+exit $EXIT_CODE
+

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/fast-hdfs-resource.jar
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/fast-hdfs-resource.jar b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/fast-hdfs-resource.jar
new file mode 100755
index 0000000..c90890b
Binary files /dev/null and b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/fast-hdfs-resource.jar differ

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/task-log4j.properties
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/task-log4j.properties b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/task-log4j.properties
new file mode 100755
index 0000000..7e12962
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/task-log4j.properties
@@ -0,0 +1,134 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+#
+# Job Summary Appender 
+#
+# Use following logger to send summary to separate file defined by 
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+# 
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this 
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+# Rolling File Appender
+#
+
+#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+#log4j.appender.RFA.MaxFileSize=1MB
+#log4j.appender.RFA.MaxBackupIndex=30
+
+#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+ 
+# Removes "deprecated" messages
+log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/topology_script.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/topology_script.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/topology_script.py
new file mode 100755
index 0000000..0f7a55c
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/topology_script.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import sys, os
+from string import join
+import ConfigParser
+
+
+DEFAULT_RACK = "/default-rack"
+DATA_FILE_NAME =  os.path.dirname(os.path.abspath(__file__)) + "/topology_mappings.data"
+SECTION_NAME = "network_topology"
+
+class TopologyScript():
+
+  def load_rack_map(self):
+    try:
+      #RACK_MAP contains both host name vs rack and ip vs rack mappings
+      mappings = ConfigParser.ConfigParser()
+      mappings.read(DATA_FILE_NAME)
+      return dict(mappings.items(SECTION_NAME))
+    except ConfigParser.NoSectionError:
+      return {}
+
+  def get_racks(self, rack_map, args):
+    if len(args) == 1:
+      return DEFAULT_RACK
+    else:
+      return join([self.lookup_by_hostname_or_ip(input_argument, rack_map) for input_argument in args[1:]],)
+
+  def lookup_by_hostname_or_ip(self, hostname_or_ip, rack_map):
+    #try looking up by hostname
+    rack = rack_map.get(hostname_or_ip)
+    if rack is not None:
+      return rack
+    #try looking up by ip
+    rack = rack_map.get(self.extract_ip(hostname_or_ip))
+    #try by localhost since hadoop could be passing in 127.0.0.1 which might not be mapped
+    return rack if rack is not None else rack_map.get("localhost.localdomain", DEFAULT_RACK)
+
+  #strips out port and slashes in case hadoop passes in something like 127.0.0.1/127.0.0.1:50010
+  def extract_ip(self, container_string):
+    return container_string.split("/")[0].split(":")[0]
+
+  def execute(self, args):
+    rack_map = self.load_rack_map()
+    rack = self.get_racks(rack_map, args)
+    print rack
+
+if __name__ == "__main__":
+  TopologyScript().execute(sys.argv)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/hook.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/hook.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/hook.py
new file mode 100755
index 0000000..f21e4b1
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/hook.py
@@ -0,0 +1,39 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from rack_awareness import create_topology_script_and_mapping
+from shared_initialization import setup_hadoop, setup_configs, create_javahome_symlink
+
+class BeforeStartHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    self.run_custom_hook('before-ANY')
+    env.set_params(params)
+
+    setup_hadoop()
+    setup_configs()
+    create_javahome_symlink()
+    create_topology_script_and_mapping()
+
+if __name__ == "__main__":
+  BeforeStartHook().execute()


[24/52] bigtop git commit: ODPI-193. Add Hive 1.2 to ODPi ambari reference implementation

Posted by rv...@apache.org.
ODPI-193. Add Hive 1.2 to ODPi ambari reference implementation

(cherry picked from commit d03f5b8b9796da11b2a0ade72704f7f3739af9ec)


Project: http://git-wip-us.apache.org/repos/asf/bigtop/repo
Commit: http://git-wip-us.apache.org/repos/asf/bigtop/commit/ba8d7f50
Tree: http://git-wip-us.apache.org/repos/asf/bigtop/tree/ba8d7f50
Diff: http://git-wip-us.apache.org/repos/asf/bigtop/diff/ba8d7f50

Branch: refs/heads/master
Commit: ba8d7f50d581c74c17b0799a813ef753d50c767d
Parents: 490bcb6
Author: Roman Shaposhnik <rv...@apache.org>
Authored: Sat Oct 22 20:33:46 2016 -0700
Committer: Roman Shaposhnik <rv...@apache.org>
Committed: Tue Mar 21 23:08:08 2017 -0700

----------------------------------------------------------------------
 .../ambari/ODPi/1.0/services/HIVE/alerts.json   |  232 ++
 .../HIVE/configuration/beeline-log4j2.xml       |   62 +
 .../services/HIVE/configuration/hcat-env.xml    |   41 +
 .../hive-atlas-application.properties.xml       |   67 +
 .../services/HIVE/configuration/hive-env.xml    |  540 ++++
 .../HIVE/configuration/hive-exec-log4j.xml      |   96 +
 .../HIVE/configuration/hive-exec-log4j2.xml     |   83 +
 .../HIVE/configuration/hive-interactive-env.xml |  373 +++
 .../configuration/hive-interactive-site.xml     |  909 ++++++
 .../services/HIVE/configuration/hive-log4j.xml  |  106 +
 .../services/HIVE/configuration/hive-log4j2.xml |   90 +
 .../services/HIVE/configuration/hive-site.xml   | 2796 ++++++++++++++++++
 .../HIVE/configuration/hivemetastore-site.xml   |   43 +
 .../hiveserver2-interactive-site.xml            |   56 +
 .../HIVE/configuration/hiveserver2-site.xml     |  122 +
 .../HIVE/configuration/llap-cli-log4j2.xml      |   91 +
 .../HIVE/configuration/llap-daemon-log4j.xml    |  158 +
 .../HIVE/configuration/ranger-hive-audit.xml    |  136 +
 .../ranger-hive-plugin-properties.xml           |   63 +
 .../configuration/ranger-hive-policymgr-ssl.xml |   71 +
 .../HIVE/configuration/ranger-hive-security.xml |   81 +
 .../HIVE/configuration/tez-interactive-site.xml |  144 +
 .../services/HIVE/configuration/webhcat-env.xml |   38 +
 .../HIVE/configuration/webhcat-log4j.xml        |   63 +
 .../HIVE/configuration/webhcat-site.xml         |  287 ++
 .../HIVE/etc/hive-schema-0.12.0.mysql.sql       |  777 +++++
 .../HIVE/etc/hive-schema-0.12.0.oracle.sql      |  718 +++++
 .../HIVE/etc/hive-schema-0.12.0.postgres.sql    | 1406 +++++++++
 .../HIVE/etc/hive-schema-0.13.0.mysql.sql       |  889 ++++++
 .../HIVE/etc/hive-schema-0.13.0.oracle.sql      |  835 ++++++
 .../HIVE/etc/hive-schema-0.13.0.postgres.sql    | 1538 ++++++++++
 .../etc/upgrade-0.12.0-to-0.13.0.oracle.sql     |  165 ++
 .../services/HIVE/etc/upgrade-0.13.0.oracle.sql |   38 +
 .../ambari/ODPi/1.0/services/HIVE/kerberos.json |  149 +
 .../ambari/ODPi/1.0/services/HIVE/metainfo.xml  |  518 ++++
 .../alert_hive_interactive_thrift_port.py       |  216 ++
 .../HIVE/package/alerts/alert_hive_metastore.py |  270 ++
 .../package/alerts/alert_hive_thrift_port.py    |  274 ++
 .../package/alerts/alert_llap_app_status.py     |  299 ++
 .../HIVE/package/alerts/alert_webhcat_server.py |  228 ++
 .../package/etc/hive-schema-0.12.0.mysql.sql    |  777 +++++
 .../package/etc/hive-schema-0.12.0.oracle.sql   |  718 +++++
 .../package/etc/hive-schema-0.12.0.postgres.sql | 1406 +++++++++
 .../services/HIVE/package/files/addMysqlUser.sh |   39 +
 .../services/HIVE/package/files/hcatSmoke.sh    |   41 +
 .../services/HIVE/package/files/hiveSmoke.sh    |   24 +
 .../HIVE/package/files/hiveTezSetup.cmd         |   58 +
 .../services/HIVE/package/files/hiveserver2.sql |   23 +
 .../HIVE/package/files/hiveserver2Smoke.sh      |   32 +
 .../1.0/services/HIVE/package/files/pigSmoke.sh |   18 +
 .../HIVE/package/files/removeMysqlUser.sh       |   33 +
 .../HIVE/package/files/startMetastore.sh        |   25 +
 .../HIVE/package/files/templetonSmoke.sh        |   95 +
 .../services/HIVE/package/scripts/__init__.py   |   19 +
 .../1.0/services/HIVE/package/scripts/hcat.py   |   81 +
 .../HIVE/package/scripts/hcat_client.py         |   85 +
 .../HIVE/package/scripts/hcat_service_check.py  |   86 +
 .../1.0/services/HIVE/package/scripts/hive.py   |  481 +++
 .../HIVE/package/scripts/hive_client.py         |   68 +
 .../HIVE/package/scripts/hive_interactive.py    |  302 ++
 .../HIVE/package/scripts/hive_metastore.py      |  259 ++
 .../HIVE/package/scripts/hive_server.py         |  211 ++
 .../package/scripts/hive_server_interactive.py  |  535 ++++
 .../HIVE/package/scripts/hive_server_upgrade.py |  141 +
 .../HIVE/package/scripts/hive_service.py        |  187 ++
 .../package/scripts/hive_service_interactive.py |  109 +
 .../HIVE/package/scripts/mysql_server.py        |   64 +
 .../HIVE/package/scripts/mysql_service.py       |   49 +
 .../HIVE/package/scripts/mysql_users.py         |   70 +
 .../HIVE/package/scripts/mysql_utils.py         |   35 +
 .../1.0/services/HIVE/package/scripts/params.py |   29 +
 .../HIVE/package/scripts/params_linux.py        |  736 +++++
 .../HIVE/package/scripts/params_windows.py      |   74 +
 .../HIVE/package/scripts/service_check.py       |  190 ++
 .../HIVE/package/scripts/setup_ranger_hive.py   |   98 +
 .../scripts/setup_ranger_hive_interactive.py    |   78 +
 .../HIVE/package/scripts/status_params.py       |  118 +
 .../services/HIVE/package/scripts/webhcat.py    |  145 +
 .../HIVE/package/scripts/webhcat_server.py      |  164 +
 .../HIVE/package/scripts/webhcat_service.py     |   96 +
 .../package/scripts/webhcat_service_check.py    |  128 +
 .../hadoop-metrics2-hivemetastore.properties.j2 |   54 +
 .../hadoop-metrics2-hiveserver2.properties.j2   |   54 +
 .../templates/hadoop-metrics2-llapdaemon.j2     |   52 +
 .../hadoop-metrics2-llaptaskscheduler.j2        |   52 +
 .../HIVE/package/templates/hive.conf.j2         |   35 +
 .../package/templates/startHiveserver2.sh.j2    |   24 +
 .../templates/startHiveserver2Interactive.sh.j2 |   24 +
 .../package/templates/templeton_smoke.pig.j2    |   24 +
 89 files changed, 23314 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/alerts.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/alerts.json b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/alerts.json
new file mode 100755
index 0000000..e2431c3
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/alerts.json
@@ -0,0 +1,232 @@
+{
+  "HIVE": {
+    "service": [],
+    "HIVE_METASTORE": [
+      {
+        "name": "hive_metastore_process",
+        "label": "Hive Metastore Process",
+        "description": "This host-level alert is triggered if the Hive Metastore process cannot be determined to be up and listening on the network.",
+        "interval": 3,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py",
+          "parameters": [
+            {
+              "name": "check.command.timeout",
+              "display_name": "Command Timeout",
+              "value": 60.0,
+              "type": "NUMERIC",
+              "description": "The maximum time before check command will be killed by timeout",
+              "units": "seconds",
+              "threshold": "CRITICAL"
+            },
+            {
+              "name": "default.smoke.user",
+              "display_name": "Default Smoke User",
+              "value": "ambari-qa",
+              "type": "STRING",
+              "description": "The user that will run the Hive commands if not specified in cluster-env/smokeuser",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "default.smoke.principal",
+              "display_name": "Default Smoke Principal",
+              "value": "ambari-qa@EXAMPLE.COM",
+              "type": "STRING",
+              "description": "The principal to use when retrieving the kerberos ticket if not specified in cluster-env/smokeuser_principal_name",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "default.smoke.keytab",
+              "display_name": "Default Smoke Keytab",
+              "value": "/etc/security/keytabs/smokeuser.headless.keytab",
+              "type": "STRING",
+              "description": "The keytab to use when retrieving the kerberos ticket if not specified in cluster-env/smokeuser_keytab",
+              "visibility": "HIDDEN"
+            }
+          ]
+        }
+      }
+    ],
+    "HIVE_SERVER": [
+      {
+        "name": "hive_server_process",
+        "label": "HiveServer2 Process",
+        "description": "This host-level alert is triggered if the HiveServer cannot be determined to be up and responding to client requests.",
+        "interval": 3,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HIVE/0.12.0.2.0/package/alerts/alert_hive_thrift_port.py",
+          "parameters": [
+            {
+              "name": "check.command.timeout",
+              "display_name": "Command Timeout",
+              "value": 60.0,
+              "type": "NUMERIC",
+              "description": "The maximum time before check command will be killed by timeout",
+              "units": "seconds",
+              "threshold": "CRITICAL"
+            },
+            {
+              "name": "default.smoke.user",
+              "display_name": "Default Smoke User",
+              "value": "ambari-qa",
+              "type": "STRING",
+              "description": "The user that will run the Hive commands if not specified in cluster-env/smokeuser",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "default.smoke.principal",
+              "display_name": "Default Smoke Principal",
+              "value": "ambari-qa@EXAMPLE.COM",
+              "type": "STRING",
+              "description": "The principal to use when retrieving the kerberos ticket if not specified in cluster-env/smokeuser_principal_name",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "default.smoke.keytab",
+              "display_name": "Default Smoke Keytab",
+              "value": "/etc/security/keytabs/smokeuser.headless.keytab",
+              "type": "STRING",
+              "description": "The keytab to use when retrieving the kerberos ticket if not specified in cluster-env/smokeuser_keytab",
+              "visibility": "HIDDEN"
+            }
+          ]
+        }
+      }
+    ],
+    "HIVE_SERVER_INTERACTIVE": [
+      {
+        "name": "hive_server_interactive_process",
+        "label": "HiveServer2 Interactive Process",
+        "description": "This host-level alert is triggered if the HiveServerInteractive cannot be determined to be up and responding to client requests.",
+        "interval": 3,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HIVE/0.12.0.2.0/package/alerts/alert_hive_interactive_thrift_port.py",
+          "parameters": [
+            {
+              "name": "check.command.timeout",
+              "display_name": "Command Timeout",
+              "value": 60.0,
+              "type": "NUMERIC",
+              "description": "The maximum time before check command will be killed by timeout",
+              "units": "seconds",
+              "threshold": "CRITICAL"
+            },
+            {
+              "name": "default.smoke.user",
+              "display_name": "Default Smoke User",
+              "value": "ambari-qa",
+              "type": "STRING",
+              "description": "The user that will run the Hive commands if not specified in cluster-env/smokeuser",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "default.smoke.principal",
+              "display_name": "Default Smoke Principal",
+              "value": "ambari-qa@EXAMPLE.COM",
+              "type": "STRING",
+              "description": "The principal to use when retrieving the kerberos ticket if not specified in cluster-env/smokeuser_principal_name",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "default.smoke.keytab",
+              "display_name": "Default Smoke Keytab",
+              "value": "/etc/security/keytabs/smokeuser.headless.keytab",
+              "type": "STRING",
+              "description": "The keytab to use when retrieving the kerberos ticket if not specified in cluster-env/smokeuser_keytab",
+              "visibility": "HIDDEN"
+            }
+          ]
+        }
+      },
+      {
+        "name": "llap_application",
+        "label": "LLAP Application",
+        "description": "This alert is triggered if the LLAP Application cannot be determined to be up and responding to requests.",
+        "interval": 3,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py",
+          "parameters": [
+            {
+              "name": "check.command.timeout",
+              "display_name": "Command Timeout",
+              "value": 120.0,
+              "type": "NUMERIC",
+              "description": "The maximum time before check command will be killed by timeout",
+              "units": "seconds",
+              "threshold": "CRITICAL"
+            },
+            {
+              "name": "default.hive.user",
+              "display_name": "Default HIVE User",
+              "value": "hive",
+              "type": "STRING",
+              "description": "The user that will run the Hive commands if not specified in cluster-env",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "default.hive.principal",
+              "display_name": "Default HIVE Principal",
+              "value": "hive/_HOST@EXAMPLE.COM",
+              "type": "STRING",
+              "description": "The principal to use when retrieving the kerberos ticket if not specified in cluster-env",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "default.hive.keytab",
+              "display_name": "Default HIVE Keytab",
+              "value": "/etc/security/keytabs/hive.llap.zk.sm.keytab",
+              "type": "STRING",
+              "description": "The keytab to use when retrieving the kerberos ticket if not specified in cluster-env.",
+              "visibility": "HIDDEN"
+            }
+          ]
+        }
+      }
+    ],
+    "WEBHCAT_SERVER": [
+      {
+        "name": "hive_webhcat_server_status",
+        "label": "WebHCat Server Status",
+        "description": "This host-level alert is triggered if the templeton server status is not healthy.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HIVE/0.12.0.2.0/package/alerts/alert_webhcat_server.py",
+          "parameters": [
+            {
+              "name": "default.smoke.user",
+              "display_name": "Default Smoke User",
+              "value": "ambari-qa",
+              "type": "STRING",
+              "description": "The user that will run the Hive commands if not specified in cluster-env/smokeuser",
+              "visibility": "HIDDEN"
+            },
+            {
+              "name": "connection.timeout",
+              "display_name": "Connection Timeout",
+              "value": 5.0,
+              "type": "NUMERIC",
+              "description": "The maximum time before this alert is considered to be CRITICAL",
+              "units": "seconds",
+              "threshold": "CRITICAL"
+            }
+          ]
+        }
+      }    
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/beeline-log4j2.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/beeline-log4j2.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/beeline-log4j2.xml
new file mode 100755
index 0000000..03de64e
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/beeline-log4j2.xml
@@ -0,0 +1,62 @@
+<configuration><property require-input="false">
+    <name>content</name>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+status = INFO
+name = BeelineLog4j2
+packages = org.apache.hadoop.hive.ql.log
+
+# list of properties
+property.hive.log.level = WARN
+property.hive.root.logger = console
+
+# list of all appenders
+appenders = console
+
+# console appender
+appender.console.type = Console
+appender.console.name = console
+appender.console.target = SYSTEM_ERR
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = %d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n
+
+# list of all loggers
+loggers = HiveConnection
+
+# HiveConnection logs useful info for dynamic service discovery
+logger.HiveConnection.name = org.apache.hive.jdbc.HiveConnection
+logger.HiveConnection.level = INFO
+
+# root logger
+rootLogger.level = ${sys:hive.log.level}
+rootLogger.appenderRefs = root
+rootLogger.appenderRef.root.ref = ${sys:hive.root.logger}
+  </value>
+    <description>Custom beeline-log4j2.properties</description>
+    <display-name>beeline-log4j template</display-name>
+    <filename>beeline-log4j2.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>content</type>
+        <show-property-name>false</show-property-name>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hcat-env.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hcat-env.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hcat-env.xml
new file mode 100755
index 0000000..3908d61
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hcat-env.xml
@@ -0,0 +1,41 @@
+<configuration><property require-input="false">
+    <name>content</name>
+    <value>
+      # Licensed to the Apache Software Foundation (ASF) under one
+      # or more contributor license agreements. See the NOTICE file
+      # distributed with this work for additional information
+      # regarding copyright ownership. The ASF licenses this file
+      # to you under the Apache License, Version 2.0 (the
+      # "License"); you may not use this file except in compliance
+      # with the License. You may obtain a copy of the License at
+      #
+      # http://www.apache.org/licenses/LICENSE-2.0
+      #
+      # Unless required by applicable law or agreed to in writing, software
+      # distributed under the License is distributed on an "AS IS" BASIS,
+      # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+      # See the License for the specific language governing permissions and
+      # limitations under the License.
+
+      JAVA_HOME={{java64_home}}
+      HCAT_PID_DIR={{hcat_pid_dir}}/
+      HCAT_LOG_DIR={{hcat_log_dir}}/
+      HCAT_CONF_DIR={{hcat_conf_dir}}
+      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+      #DBROOT is the path where the connector jars are downloaded
+      DBROOT={{hcat_dbroot}}
+      USER={{hcat_user}}
+      METASTORE_PORT={{hive_metastore_port}}
+    </value>
+    <description>This is the jinja template for hcat-env.sh file</description>
+    <display-name>hcat-env template</display-name>
+    <filename>hcat-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>content</type>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-atlas-application.properties.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-atlas-application.properties.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-atlas-application.properties.xml
new file mode 100755
index 0000000..7eb72ef
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-atlas-application.properties.xml
@@ -0,0 +1,67 @@
+<configuration><property require-input="false">
+    <name>atlas.hook.hive.synchronous</name>
+    <value>false</value>
+    <description></description>
+    <filename>hive-atlas-application.properties.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>atlas.hook.hive.numRetries</name>
+    <value>3</value>
+    <description></description>
+    <filename>hive-atlas-application.properties.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>atlas.hook.hive.minThreads</name>
+    <value>5</value>
+    <description></description>
+    <filename>hive-atlas-application.properties.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>atlas.hook.hive.maxThreads</name>
+    <value>5</value>
+    <description></description>
+    <filename>hive-atlas-application.properties.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>atlas.hook.hive.keepAliveTime</name>
+    <value>10</value>
+    <description></description>
+    <filename>hive-atlas-application.properties.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>atlas.hook.hive.queueSize</name>
+    <value>1000</value>
+    <description></description>
+    <filename>hive-atlas-application.properties.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-env.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-env.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-env.xml
new file mode 100755
index 0000000..e5ed319
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-env.xml
@@ -0,0 +1,540 @@
+<configuration><property require-input="false">
+    <name>content</name>
+    <value>
+      export HADOOP_USER_CLASSPATH_FIRST=true  #this prevents old metrics libs from mapreduce lib from bringing in old jar deps overriding HIVE_LIB
+      if [ "$SERVICE" = "cli" ]; then
+      if [ -z "$DEBUG" ]; then
+      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseNUMA -XX:+UseParallelGC -XX:-UseGCOverheadLimit"
+      else
+      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+      fi
+      fi
+
+      # The heap size of the jvm stared by hive shell script can be controlled via:
+
+      if [ "$SERVICE" = "metastore" ]; then
+      export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
+      else
+      export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client
+      fi
+
+      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
+
+      # Larger heap size may be required when running queries over large number of files or partitions.
+      # By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+      # appropriate for hive server (hwi etc).
+
+
+      # Set HADOOP_HOME to point to a specific hadoop install directory
+      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+      export HIVE_HOME=${HIVE_HOME:-{{hive_home_dir}}}
+
+      # Hive Configuration Directory can be controlled by:
+      export HIVE_CONF_DIR=${HIVE_CONF_DIR:-{{hive_config_dir}}}
+
+      # Folder containing extra libraries required for hive compilation/execution can be controlled by:
+      if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
+      if [ -f "${HIVE_AUX_JARS_PATH}" ]; then
+      export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
+      elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
+      export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
+      fi
+      elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
+      export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
+      fi
+
+      export METASTORE_PORT={{hive_metastore_port}}
+
+      {% if sqla_db_used or lib_dir_available %}
+      export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:{{jdbc_libs_dir}}"
+      export JAVA_LIBRARY_PATH="$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}"
+      {% endif %}
+        </value>
+    <description>This is the jinja template for hive-env.sh file</description>
+    <display-name>hive-env template</display-name>
+    <filename>hive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>content</type>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.heapsize</name>
+    <value>512</value>
+    <description>Hive Java heap size</description>
+    <display-name>HiveServer2 Heap Size</display-name>
+    <filename>hive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>int</type>
+        <maximum>2048</maximum>
+        <minimum>512</minimum>
+        <unit>MB</unit>
+        <overridable>false</overridable>
+        <increment-step>512</increment-step>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive_exec_orc_storage_strategy</name>
+    <value>SPEED</value>
+    <description>
+      Define the encoding strategy to use while writing data. Changing this will only affect the light weight encoding for integers.
+      This flag will not change the compression level of higher level compression codec (like ZLIB). Possible options are SPEED and COMPRESSION.
+    </description>
+    <display-name>ORC Storage Strategy</display-name>
+    <filename>hive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>value-list</type>
+        <entries>
+            <entry>
+                <value>SPEED</value>
+                <label>Speed</label>
+            </entry>
+            <entry>
+                <value>COMPRESSION</value>
+                <label>Compression</label>
+            </entry>
+        </entries>
+        <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by>
+        <dependedByProperties>
+            <name>hive.exec.orc.encoding.strategy</name>
+            <type>hive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.exec.orc.compression.strategy</name>
+            <type>hive-site</type>
+        </dependedByProperties>
+    </property_depended_by>
+</property><property require-input="false">
+    <name>hive_txn_acid</name>
+    <value>off</value>
+    <display-name>ACID Transactions</display-name>
+    <filename>hive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>value-list</type>
+        <entries>
+            <entry>
+                <value>on</value>
+                <label>On</label>
+            </entry>
+            <entry>
+                <value>off</value>
+                <label>Off</label>
+            </entry>
+        </entries>
+        <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by>
+        <dependedByProperties>
+            <name>hive.enforce.bucketing</name>
+            <type>hive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.exec.dynamic.partition.mode</name>
+            <type>hive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.support.concurrency</name>
+            <type>hive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.txn.manager</name>
+            <type>hive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.compactor.initiator.on</name>
+            <type>hive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.compactor.worker.threads</name>
+            <type>hive-site</type>
+        </dependedByProperties>
+    </property_depended_by>
+</property><property require-input="false">
+    <name>hive_security_authorization</name>
+    <value>None</value>
+    <description>
+      Authorization mode, default NONE. Options are NONE, Ranger, SQLStdAuth.
+      SQL standard authorization provides grant/revoke functionality at database, table level. 
+      Ranger provides a centralized authorization interface for Hive and provides more granular
+      access control at column level through the Hive plugin.
+    </description>
+    <display-name>Choose Authorization</display-name>
+    <filename>hive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>value-list</type>
+        <entries>
+            <entry>
+                <value>None</value>
+                <label>None</label>
+            </entry>
+            <entry>
+                <value>SQLStdAuth</value>
+                <label>SQLStdAuth</label>
+            </entry>
+            <entry>
+                <value>Ranger</value>
+                <label>Ranger</label>
+            </entry>
+        </entries>
+    </value-attributes>
+    <depends-on>
+        <property>
+            <name>ranger-hive-plugin-enabled</name>
+            <type>ranger-env</type>
+        </property>
+    </depends-on>
+    <property_depended_by>
+        <dependedByProperties>
+            <name>hive.server2.enable.doAs</name>
+            <type>hive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.security.authenticator.manager</name>
+            <type>hiveserver2-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.server2.enable.doAs</name>
+            <type>hive-interactive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.security.authorization.enabled</name>
+            <type>hive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.conf.restricted.list</name>
+            <type>hiveserver2-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.security.authenticator.manager</name>
+            <type>hive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.security.authorization.manager</name>
+            <type>hiveserver2-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.security.authorization.manager</name>
+            <type>hive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.security.authorization.enabled</name>
+            <type>hiveserver2-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.security.metastore.authorization.manager</name>
+            <type>hive-site</type>
+        </dependedByProperties>
+    </property_depended_by>
+</property><property require-input="false">
+    <name>hive_timeline_logging_enabled</name>
+    <value>true</value>
+    <display-name>Use ATS Logging</display-name>
+    <filename>hive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>value-list</type>
+        <entries>
+            <entry>
+                <value>true</value>
+                <label>True</label>
+            </entry>
+            <entry>
+                <value>false</value>
+                <label>False</label>
+            </entry>
+        </entries>
+        <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by>
+        <dependedByProperties>
+            <name>hive.exec.pre.hooks</name>
+            <type>hive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.exec.post.hooks</name>
+            <type>hive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.exec.failure.hooks</name>
+            <type>hive-site</type>
+        </dependedByProperties>
+    </property_depended_by>
+</property><property require-input="false">
+    <name>hive.client.heapsize</name>
+    <value>512</value>
+    <description>Hive Client Java heap size</description>
+    <display-name>Client Heap Size</display-name>
+    <filename>hive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>int</type>
+        <maximum>2048</maximum>
+        <minimum>512</minimum>
+        <unit>MB</unit>
+        <overridable>false</overridable>
+        <increment-step>512</increment-step>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.metastore.heapsize</name>
+    <value>1024</value>
+    <description>Hive Metastore Java heap size</description>
+    <display-name>Metastore Heap Size</display-name>
+    <filename>hive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>int</type>
+        <maximum>2048</maximum>
+        <minimum>512</minimum>
+        <unit>MB</unit>
+        <increment-step>512</increment-step>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive_database_type</name>
+    <value>mysql</value>
+    <description>Default HIVE DB type.</description>
+    <display-name>Hive Database Type</display-name>
+    <filename>hive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <overridable>false</overridable>
+    </value-attributes>
+    <depends-on>
+        <property>
+            <name>hive_database</name>
+            <type>hive-env</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive_database</name>
+    <value>New MySQL Database</value>
+    <description>
+      Property that determines whether the HIVE DB is managed by Ambari.
+    </description>
+    <display-name>Hive Database</display-name>
+    <filename>hive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <overridable>false</overridable>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by>
+        <dependedByProperties>
+            <name>hive_database_type</name>
+            <type>hive-env</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>javax.jdo.option.ConnectionURL</name>
+            <type>hive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>javax.jdo.option.ConnectionDriverName</name>
+            <type>hive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>datanucleus.rdbms.datastoreAdapterClassName</name>
+            <type>hive-site</type>
+        </dependedByProperties>
+    </property_depended_by>
+</property><property require-input="false">
+    <name>hive_ambari_database</name>
+    <value>MySQL</value>
+    <description>Database type.</description>
+    <filename>hive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive_database_name</name>
+    <value>hive</value>
+    <description>Database name.</description>
+    <filename>hive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>database</type>
+        <visible>false</visible>
+        <overridable>false</overridable>
+        <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive_log_dir</name>
+    <value>/var/log/hive</value>
+    <description>Directory for Hive Log files.</description>
+    <display-name>Hive Log Dir</display-name>
+    <filename>hive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>directory</type>
+        <overridable>false</overridable>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive_pid_dir</name>
+    <value>/var/run/hive</value>
+    <description>Hive PID Dir.</description>
+    <display-name>Hive PID Dir</display-name>
+    <filename>hive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>directory</type>
+        <overridable>false</overridable>
+        <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive_user</name>
+    <value>hive</value>
+    <description>Hive User.</description>
+    <display-name>Hive User</display-name>
+    <filename>hive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type>USER</property-type>
+    <value-attributes>
+        <type>user</type>
+        <overridable>false</overridable>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by>
+        <dependedByProperties>
+            <name>ranger.plugins.hive.serviceuser</name>
+            <type>ranger-admin-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>ranger.kms.service.user.hive</name>
+            <type>ranger-admin-site</type>
+        </dependedByProperties>
+    </property_depended_by>
+</property><property require-input="false">
+    <name>hcat_log_dir</name>
+    <value>/var/log/webhcat</value>
+    <description>WebHCat Log Dir.</description>
+    <display-name>WebHCat Log Dir</display-name>
+    <filename>hive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>directory</type>
+        <overridable>false</overridable>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hcat_pid_dir</name>
+    <value>/var/run/webhcat</value>
+    <description>WebHCat Pid Dir.</description>
+    <display-name>WebHCat Pid Dir</display-name>
+    <filename>hive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>directory</type>
+        <overridable>false</overridable>
+        <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hcat_user</name>
+    <value>hcat</value>
+    <description>HCat User.</description>
+    <display-name>HCat User</display-name>
+    <filename>hive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type>USER</property-type>
+    <value-attributes>
+        <type>user</type>
+        <overridable>false</overridable>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>webhcat_user</name>
+    <value>hcat</value>
+    <description>WebHCat User.</description>
+    <display-name>WebHCat User</display-name>
+    <filename>hive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type>USER</property-type>
+    <value-attributes>
+        <type>user</type>
+        <overridable>false</overridable>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive_user_nofile_limit</name>
+    <value>32000</value>
+    <description>Max open files limit setting for HIVE user.</description>
+    <filename>hive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive_user_nproc_limit</name>
+    <value>16000</value>
+    <description>Max number of processes limit setting for HIVE user.</description>
+    <filename>hive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-exec-log4j.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-exec-log4j.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-exec-log4j.xml
new file mode 100755
index 0000000..538334c
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-exec-log4j.xml
@@ -0,0 +1,96 @@
+<configuration><property require-input="false">
+    <name>content</name>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+
+hive.log.threshold=ALL
+hive.root.logger=INFO,FA
+hive.log.dir=${java.io.tmpdir}/${user.name}
+hive.query.id=hadoop
+hive.log.file=${hive.query.id}.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hive.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=${hive.log.threshold}
+
+#
+# File Appender
+#
+
+log4j.appender.FA=org.apache.log4j.FileAppender
+log4j.appender.FA.File=${hive.log.dir}/${hive.log.file}
+log4j.appender.FA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+log4j.appender.FA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#custom logging levels
+#log4j.logger.xxx=DEBUG
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter
+
+
+log4j.category.DataNucleus=ERROR,FA
+log4j.category.Datastore=ERROR,FA
+log4j.category.Datastore.Schema=ERROR,FA
+log4j.category.JPOX.Datastore=ERROR,FA
+log4j.category.JPOX.Plugin=ERROR,FA
+log4j.category.JPOX.MetaData=ERROR,FA
+log4j.category.JPOX.Query=ERROR,FA
+log4j.category.JPOX.General=ERROR,FA
+log4j.category.JPOX.Enhancer=ERROR,FA
+
+
+# Silence useless ZK logs
+log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,FA
+log4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,FA
+
+    </value>
+    <description>Custom hive-exec-log4j</description>
+    <display-name>hive-exec-log4j template</display-name>
+    <filename>hive-exec-log4j.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>content</type>
+        <show-property-name>false</show-property-name>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-exec-log4j2.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-exec-log4j2.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-exec-log4j2.xml
new file mode 100755
index 0000000..c818d43
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-exec-log4j2.xml
@@ -0,0 +1,83 @@
+<configuration><property require-input="false">
+    <name>content</name>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+status = INFO
+name = HiveExecLog4j2
+packages = org.apache.hadoop.hive.ql.log
+
+# list of properties
+property.hive.log.level = INFO
+property.hive.root.logger = FA
+property.hive.query.id = hadoop
+property.hive.log.dir = ${sys:java.io.tmpdir}/${sys:user.name}
+property.hive.log.file = ${sys:hive.query.id}.log
+
+# list of all appenders
+appenders = console, FA
+
+# console appender
+appender.console.type = Console
+appender.console.name = console
+appender.console.target = SYSTEM_ERR
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = %d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n
+
+# simple file appender
+appender.FA.type = File
+appender.FA.name = FA
+appender.FA.fileName = ${sys:hive.log.dir}/${sys:hive.log.file}
+appender.FA.layout.type = PatternLayout
+appender.FA.layout.pattern = %d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n
+
+# list of all loggers
+loggers = NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX
+
+logger.NIOServerCnxn.name = org.apache.zookeeper.server.NIOServerCnxn
+logger.NIOServerCnxn.level = WARN
+
+logger.ClientCnxnSocketNIO.name = org.apache.zookeeper.ClientCnxnSocketNIO
+logger.ClientCnxnSocketNIO.level = WARN
+
+logger.DataNucleus.name = DataNucleus
+logger.DataNucleus.level = ERROR
+
+logger.Datastore.name = Datastore
+logger.Datastore.level = ERROR
+
+logger.JPOX.name = JPOX
+logger.JPOX.level = ERROR
+
+# root logger
+rootLogger.level = ${sys:hive.log.level}
+rootLogger.appenderRefs = root
+rootLogger.appenderRef.root.ref = ${sys:hive.root.logger}
+  </value>
+    <description>Custom hive-exec-log4j2.properties</description>
+    <display-name>hive-exec-log4j2 template</display-name>
+    <filename>hive-exec-log4j2.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>content</type>
+        <show-property-name>false</show-property-name>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-interactive-env.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-interactive-env.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-interactive-env.xml
new file mode 100755
index 0000000..7035283
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-interactive-env.xml
@@ -0,0 +1,373 @@
+<configuration><property require-input="false">
+    <name>enable_hive_interactive</name>
+    <value>false</value>
+    <description>Enable or disable interactive query in this cluster.</description>
+    <display-name>Enable Interactive Query (Tech Preview)</display-name>
+    <filename>hive-interactive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>value-list</type>
+        <overridable>false</overridable>
+        <entries>
+            <entry>
+                <value>true</value>
+                <label>Yes</label>
+            </entry>
+            <entry>
+                <value>false</value>
+                <label>No</label>
+            </entry>
+        </entries>
+        <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by>
+        <dependedByProperties>
+            <name>hive.llap.daemon.queue.name</name>
+            <type>hive-interactive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.llap.daemon.num.executors</name>
+            <type>hive-interactive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>llap_queue_capacity</name>
+            <type>hive-interactive-env</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>llap_heap_size</name>
+            <type>hive-interactive-env</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>tez.am.resource.memory.mb</name>
+            <type>tez-interactive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.llap.daemon.yarn.container.mb</name>
+            <type>hive-interactive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>capacity-scheduler</name>
+            <type>capacity-scheduler</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.server2.tez.sessions.per.default.queue</name>
+            <type>hive-interactive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.llap.io.memory.size</name>
+            <type>hive-interactive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>num_llap_nodes</name>
+            <type>hive-interactive-env</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>slider_am_container_mb</name>
+            <type>hive-interactive-env</type>
+        </dependedByProperties>
+    </property_depended_by>
+</property><property require-input="false">
+    <name>hive_server_interactive_host</name>
+    <value>localhost</value>
+    <description>HiveServer2 Interactive Host</description>
+    <display-name>HiveServer2 Interactive Host</display-name>
+    <filename>hive-interactive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <overridable>false</overridable>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>llap_queue_capacity</name>
+    <value>0</value>
+    <description>Percentage of the cluster dedicated to interactive query.</description>
+    <display-name>% of Cluster Capacity</display-name>
+    <filename>hive-interactive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>int</type>
+        <maximum>100</maximum>
+        <minimum>20</minimum>
+        <increment-step>1</increment-step>
+    </value-attributes>
+    <depends-on>
+        <property>
+            <name>yarn.scheduler.capacity.root.queues</name>
+            <type>capacity-scheduler</type>
+        </property>
+        <property>
+            <name>hive.llap.daemon.queue.name</name>
+            <type>hive-interactive-site</type>
+        </property>
+        <property>
+            <name>enable_hive_interactive</name>
+            <type>hive-interactive-env</type>
+        </property>
+    </depends-on>
+    <property_depended_by>
+        <dependedByProperties>
+            <name>hive.llap.daemon.num.executors</name>
+            <type>hive-interactive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>llap_heap_size</name>
+            <type>hive-interactive-env</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>tez.am.resource.memory.mb</name>
+            <type>tez-interactive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.llap.daemon.yarn.container.mb</name>
+            <type>hive-interactive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>capacity-scheduler</name>
+            <type>capacity-scheduler</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.server2.tez.sessions.per.default.queue</name>
+            <type>hive-interactive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.llap.io.memory.size</name>
+            <type>hive-interactive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>num_llap_nodes</name>
+            <type>hive-interactive-env</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>slider_am_container_mb</name>
+            <type>hive-interactive-env</type>
+        </dependedByProperties>
+    </property_depended_by>
+</property><property require-input="false">
+    <name>num_llap_nodes</name>
+    <value>1</value>
+    <description>The number of Hive LLAP daemons to run.</description>
+    <display-name>Number of LLAP Daemons</display-name>
+    <filename>hive-interactive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>int</type>
+        <minimum>1</minimum>
+    </value-attributes>
+    <depends-on>
+        <property>
+            <name>yarn.scheduler.capacity.root.queues</name>
+            <type>capacity-scheduler</type>
+        </property>
+        <property>
+            <name>hive.llap.daemon.queue.name</name>
+            <type>hive-interactive-site</type>
+        </property>
+        <property>
+            <name>llap_queue_capacity</name>
+            <type>hive-interactive-env</type>
+        </property>
+        <property>
+            <name>enable_hive_interactive</name>
+            <type>hive-interactive-env</type>
+        </property>
+        <property>
+            <name>hive.server2.tez.sessions.per.default.queue</name>
+            <type>hive-interactive-site</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>num_retries_for_checking_llap_status</name>
+    <value>10</value>
+    <description>After starting LLAP app, retry count to check LLAP status before starting HiveServer2.</description>
+    <display-name>Number of retries while checking LLAP app status</display-name>
+    <filename>hive-interactive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>int</type>
+        <maximum>20</maximum>
+        <minimum>0</minimum>
+        <increment-step>1</increment-step>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>llap_heap_size</name>
+    <value>0</value>
+    <description>Heap Size used by LLAP app.</description>
+    <display-name>LLAP heap size</display-name>
+    <filename>hive-interactive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>int</type>
+    </value-attributes>
+    <depends-on>
+        <property>
+            <name>tez.am.resource.memory.mb</name>
+            <type>tez-site</type>
+        </property>
+        <property>
+            <name>yarn.scheduler.capacity.root.queues</name>
+            <type>capacity-scheduler</type>
+        </property>
+        <property>
+            <name>hive.llap.daemon.queue.name</name>
+            <type>hive-interactive-site</type>
+        </property>
+        <property>
+            <name>llap_queue_capacity</name>
+            <type>hive-interactive-env</type>
+        </property>
+        <property>
+            <name>enable_hive_interactive</name>
+            <type>hive-interactive-env</type>
+        </property>
+        <property>
+            <name>hive.server2.tez.sessions.per.default.queue</name>
+            <type>hive-interactive-site</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>slider_am_container_mb</name>
+    <value>341</value>
+    <description>Slider's app master container size in MB.</description>
+    <display-name>Slider AM container size</display-name>
+    <filename>hive-interactive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>int</type>
+        <unit>MB</unit>
+    </value-attributes>
+    <depends-on>
+        <property>
+            <name>yarn.scheduler.capacity.root.queues</name>
+            <type>capacity-scheduler</type>
+        </property>
+        <property>
+            <name>hive.llap.daemon.queue.name</name>
+            <type>hive-interactive-site</type>
+        </property>
+        <property>
+            <name>llap_queue_capacity</name>
+            <type>hive-interactive-env</type>
+        </property>
+        <property>
+            <name>enable_hive_interactive</name>
+            <type>hive-interactive-env</type>
+        </property>
+        <property>
+            <name>hive.server2.tez.sessions.per.default.queue</name>
+            <type>hive-interactive-site</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>llap_log_level</name>
+    <value>INFO</value>
+    <description>LLAP app logging level (WARN/INFO/DEBUG/TRACE)</description>
+    <display-name>LLAP app logging level (WARN/INFO/DEBUG/TRACE)</display-name>
+    <filename>hive-interactive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>llap_app_name</name>
+    <value>llap0</value>
+    <description>LLAP app name</description>
+    <display-name>LLAP app name</display-name>
+    <filename>hive-interactive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>llap_java_opts</name>
+    <value>-XX:+AlwaysPreTouch {% if java_version &gt; 7 %}-XX:+UseG1GC -XX:TLABSize=8m -XX:+ResizeTLAB -XX:+UseNUMA -XX:+AggressiveOpts -XX:MetaspaceSize=1024m -XX:InitiatingHeapOccupancyPercent=80 -XX:MaxGCPauseMillis=200{% else %}-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC{% endif %}</value>
+    <description>Java opts for llap application</description>
+    <display-name>LLAP app java opts</display-name>
+    <filename>hive-interactive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>content</name>
+    <value>
+      if [ "$SERVICE" = "cli" ]; then
+      if [ -z "$DEBUG" ]; then
+      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
+      else
+      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+      fi
+      fi
+
+      # The heap size of the jvm stared by hive shell script can be controlled via:
+
+      if [ "$SERVICE" = "metastore" ]; then
+      export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
+      else
+      export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client
+      fi
+
+      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
+
+      # Larger heap size may be required when running queries over large number of files or partitions.
+      # By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+      # appropriate for hive server (hwi etc).
+
+
+      # Set HADOOP_HOME to point to a specific hadoop install directory
+      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+      # Hive Configuration Directory can be controlled by:
+      export HIVE_CONF_DIR={{hive_server_interactive_conf_dir}}
+
+      # Add additional hcatalog jars
+      if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
+      export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
+      else
+      export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-server2-hive2/lib/hive-hcatalog-core.jar
+      fi
+
+      export METASTORE_PORT={{hive_metastore_port}}
+
+      # Spark assembly contains a conflicting copy of HiveConf from hive-1.2
+      export HIVE_SKIP_SPARK_ASSEMBLY=true
+
+    </value>
+    <description>This is the jinja template for hive-env.sh file</description>
+    <display-name>hive-interactive-env template</display-name>
+    <filename>hive-interactive-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>content</type>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property></configuration>
\ No newline at end of file


[49/52] bigtop git commit: BIGTOP-1406. package Ambari in Bigtop

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-site.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-site.xml
deleted file mode 100755
index c1f2a98..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-site.xml
+++ /dev/null
@@ -1,2796 +0,0 @@
-<configuration><property require-input="false">
-    <name>hive.default.fileformat.managed</name>
-    <value>TextFile</value>
-    <description>
-      Default file format for CREATE TABLE statement applied to managed tables only. 
-      External tables will be created with default file format. Leaving this null 
-      will result in using the default file format for all tables.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>datanucleus.rdbms.datastoreAdapterClassName</name>
-    <description>Datanucleus Class, This property used only when hive db is SQL Anywhere</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on>
-        <property>
-            <name>hive_database</name>
-            <type>hive-env</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>atlas.hook.hive.minThreads</name>
-    <value>1</value>
-    <description>
-      Minimum number of threads maintained by Atlas hook.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>atlas.hook.hive.maxThreads</name>
-    <value>1</value>
-    <description>
-      Maximum number of threads used by Atlas hook.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.cbo.enable</name>
-    <value>true</value>
-    <description>Flag to control enabling Cost Based Optimizations using Calcite framework.</description>
-    <display-name>Enable Cost Based Optimizer</display-name>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>value-list</type>
-        <entries>
-            <entry>
-                <value>true</value>
-                <label>On</label>
-            </entry>
-            <entry>
-                <value>false</value>
-                <label>Off</label>
-            </entry>
-        </entries>
-        <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by>
-        <dependedByProperties>
-            <name>hive.compute.query.using.stats</name>
-            <type>hive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.stats.fetch.partition.stats</name>
-            <type>hive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.stats.fetch.column.stats</name>
-            <type>hive-site</type>
-        </dependedByProperties>
-    </property_depended_by>
-</property><property require-input="false">
-    <name>hive.zookeeper.quorum</name>
-    <value>localhost:2181</value>
-    <description>List of ZooKeeper servers to talk to. This is needed for: 1.
-      Read/write locks - when hive.lock.manager is set to
-      org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager,
-      2. When HiveServer2 supports service discovery via Zookeeper.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>multiLine</type>
-        <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.metastore.connect.retries</name>
-    <value>24</value>
-    <description>Number of retries while opening a connection to metastore</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.metastore.failure.retries</name>
-    <value>24</value>
-    <description>Number of retries upon failure of Thrift metastore calls</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.metastore.client.connect.retry.delay</name>
-    <value>5s</value>
-    <description>
-      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
-      Number of seconds for the client to wait between consecutive connection attempts
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.metastore.client.socket.timeout</name>
-    <value>1800s</value>
-    <description>
-      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
-      MetaStore Client socket timeout in seconds
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.mapjoin.bucket.cache.size</name>
-    <value>10000</value>
-    <description></description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.security.authorization.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory</value>
-    <description>
-      The Hive client authorization manager class name. The user defined authorization class should implement
-      interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on>
-        <property>
-            <name>hive_security_authorization</name>
-            <type>hive-env</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.cluster.delegation.token.store.class</name>
-    <value>org.apache.hadoop.hive.thrift.ZooKeeperTokenStore</value>
-    <description>The delegation token store implementation.
-      Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for load-balanced cluster.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.cluster.delegation.token.store.zookeeper.connectString</name>
-    <value>localhost:2181</value>
-    <description>The ZooKeeper token store connect string.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.server2.support.dynamic.service.discovery</name>
-    <value>true</value>
-    <description>Whether HiveServer2 supports dynamic service discovery for its clients.
-      To support this, each instance of HiveServer2 currently uses ZooKeeper to register itself,
-      when it is brought up. JDBC/ODBC clients should use the ZooKeeper ensemble: hive.zookeeper.quorum
-      in their connection string.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>boolean</type>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.exec.scratchdir</name>
-    <value>/tmp/hive</value>
-    <description>HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/&lt;username&gt; is created, with ${hive.scratch.dir.permission}.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.exec.submitviachild</name>
-    <value>false</value>
-    <description></description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.exec.submit.local.task.via.child</name>
-    <value>true</value>
-    <description>
-      Determines whether local tasks (typically mapjoin hashtable generation phase) runs in
-      separate JVM (true recommended) or not.
-      Avoids the overhead of spawning new JVM, but can lead to out-of-memory issues.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.exec.compress.output</name>
-    <value>false</value>
-    <description>
-      This controls whether the final outputs of a query (to a local/HDFS file or a Hive table) is compressed.
-      The compression codec and other options are determined from Hadoop config variables mapred.output.compress*
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.exec.compress.intermediate</name>
-    <value>false</value>
-    <description>
-      This controls whether intermediate files produced by Hive between multiple map-reduce jobs are compressed.
-      The compression codec and other options are determined from Hadoop config variables mapred.output.compress*
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.exec.reducers.bytes.per.reducer</name>
-    <value>67108864</value>
-    <description>Defines the size per reducer. For example, if it is set to 64M, given 256M input size, 4 reducers will be used.</description>
-    <display-name>Data per Reducer</display-name>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>int</type>
-        <maximum>4294967296</maximum>
-        <minimum>64</minimum>
-        <unit>B</unit>
-        <increment-step></increment-step>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.exec.reducers.max</name>
-    <value>1009</value>
-    <description>
-      max number of reducers will be used. If the one specified in the configuration parameter mapred.reduce.tasks is
-      negative, Hive will use this one as the max number of reducers when automatically determine number of reducers.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.exec.pre.hooks</name>
-    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
-    <description>
-      Comma-separated list of pre-execution hooks to be invoked for each statement.
-      A pre-execution hook is specified as the name of a Java class which implements the
-      org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on>
-        <property>
-            <name>hive_timeline_logging_enabled</name>
-            <type>hive-env</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.exec.post.hooks</name>
-    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
-    <description>
-      Comma-separated list of post-execution hooks to be invoked for each statement.
-      A post-execution hook is specified as the name of a Java class which implements the
-      org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on>
-        <property>
-            <name>atlas.server.https.port</name>
-            <type>application-properties</type>
-        </property>
-        <property>
-            <name>atlas.server.http.port</name>
-            <type>application-properties</type>
-        </property>
-        <property>
-            <name>hive_timeline_logging_enabled</name>
-            <type>hive-env</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.exec.failure.hooks</name>
-    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
-    <description>
-      Comma-separated list of on-failure hooks to be invoked for each statement.
-      An on-failure hook is specified as the name of Java class which implements the
-      org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on>
-        <property>
-            <name>hive_timeline_logging_enabled</name>
-            <type>hive-env</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.exec.parallel</name>
-    <value>false</value>
-    <description>Whether to execute jobs in parallel</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.exec.parallel.thread.number</name>
-    <value>8</value>
-    <description>How many jobs at most can be executed in parallel</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.mapred.reduce.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>Whether speculative execution for reducers should be turned on. </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.exec.dynamic.partition</name>
-    <value>true</value>
-    <description>Whether or not to allow dynamic partitions in DML/DDL.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.exec.dynamic.partition.mode</name>
-    <value>nonstrict</value>
-    <description>
-      In strict mode, the user must specify at least one static partition
-      in case the user accidentally overwrites all partitions.
-      NonStrict allows all partitions of a table to be dynamic.
-    </description>
-    <display-name>Allow all partitions to be Dynamic</display-name>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>value-list</type>
-        <entries>
-            <entry>
-                <value>nonstrict</value>
-                <label>On</label>
-            </entry>
-            <entry>
-                <value>strict</value>
-                <label>Off</label>
-            </entry>
-        </entries>
-        <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on>
-        <property>
-            <name>hive_txn_acid</name>
-            <type>hive-env</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.exec.max.dynamic.partitions</name>
-    <value>5000</value>
-    <description>Maximum number of dynamic partitions allowed to be created in total.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.exec.max.dynamic.partitions.pernode</name>
-    <value>2000</value>
-    <description>Maximum number of dynamic partitions allowed to be created in each mapper/reducer node.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.exec.max.created.files</name>
-    <value>100000</value>
-    <description>Maximum number of HDFS files created by all mappers/reducers in a MapReduce job.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.metastore.warehouse.dir</name>
-    <value>/apps/hive/warehouse</value>
-    <description>location of default database for the warehouse</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type>NOT_MANAGED_HDFS_PATH</property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.metastore.uris</name>
-    <value>thrift://localhost:9083</value>
-    <description>Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="true">
-    <name>javax.jdo.option.ConnectionPassword</name>
-    <value></value>
-    <description>password to use against metastore database</description>
-    <display-name>Database Password</display-name>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type>PASSWORD</property-type>
-    <value-attributes>
-        <type>password</type>
-        <overridable>false</overridable>
-        <hidden>HIVE_CLIENT,WEBHCAT_SERVER,HCAT,CONFIG_DOWNLOAD</hidden>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>javax.jdo.option.ConnectionURL</name>
-    <value>jdbc:mysql://localhost/hive?createDatabaseIfNotExist=true</value>
-    <description>JDBC connect string for a JDBC metastore</description>
-    <display-name>Database URL</display-name>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <overridable>false</overridable>
-    </value-attributes>
-    <depends-on>
-        <property>
-            <name>hive_database</name>
-            <type>hive-env</type>
-        </property>
-        <property>
-            <name>ambari.hive.db.schema.name</name>
-            <type>hive-site</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.metastore.server.max.threads</name>
-    <value>100000</value>
-    <description>Maximum number of worker threads in the Thrift server's pool.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.metastore.kerberos.keytab.file</name>
-    <value>/etc/security/keytabs/hive.service.keytab</value>
-    <description>The path to the Kerberos Keytab file containing the metastore Thrift server's service principal.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.metastore.kerberos.principal</name>
-    <value>hive/_HOST@EXAMPLE.COM</value>
-    <description>
-      The service principal for the metastore Thrift server.
-      The special string _HOST will be replaced automatically with the correct host name.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.cluster.delegation.token.store.zookeeper.znode</name>
-    <value>/hive/cluster/delegation</value>
-    <description>The root path for token store data.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.metastore.cache.pinobjtypes</name>
-    <value>Table,Database,Type,FieldSchema,Order</value>
-    <description>List of comma separated metastore object types that should be pinned in the cache</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.metastore.pre.event.listeners</name>
-    <value>org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener</value>
-    <description>List of comma separated listeners for metastore events.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.metastore.authorization.storage.checks</name>
-    <value>false</value>
-    <description>
-      Should the metastore do authorization checks against the underlying storage (usually hdfs)
-      for operations like drop-partition (disallow the drop-partition if the user in
-      question doesn't have permissions to delete the corresponding directory
-      on the storage).
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>javax.jdo.option.ConnectionDriverName</name>
-    <value>com.mysql.jdbc.Driver</value>
-    <description>Driver class name for a JDBC metastore</description>
-    <display-name>JDBC Driver Class</display-name>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <overridable>false</overridable>
-    </value-attributes>
-    <depends-on>
-        <property>
-            <name>hive_database</name>
-            <type>hive-env</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>javax.jdo.option.ConnectionUserName</name>
-    <value>hive</value>
-    <description>username to use against metastore database</description>
-    <display-name>Database Username</display-name>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>db_user</type>
-        <overridable>false</overridable>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.map.aggr</name>
-    <value>true</value>
-    <description>Whether to use map-side aggregation in Hive Group By queries</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.cbo.enable</name>
-    <value>true</value>
-    <description>Flag to control enabling Cost Based Optimizations using Calcite framework.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.mapjoin.optimized.hashtable</name>
-    <value>true</value>
-    <description>
-      Whether Hive should use memory-optimized hash table for MapJoin. Only works on Tez,
-      because memory-optimized hashtable cannot be serialized.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.smbjoin.cache.rows</name>
-    <value>10000</value>
-    <description>How many rows with the same key value should be cached in memory per smb joined table.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.map.aggr.hash.percentmemory</name>
-    <value>0.5</value>
-    <description>Portion of total memory to be used by map-side group aggregation hash table</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.map.aggr.hash.force.flush.memory.threshold</name>
-    <value>0.9</value>
-    <description>
-      The max memory to be used by map-side group aggregation hash table.
-      If the memory usage is higher than this number, force to flush data
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.map.aggr.hash.min.reduction</name>
-    <value>0.5</value>
-    <description>
-      Hash aggregation will be turned off if the ratio between hash  table size and input rows is bigger than this number.
-      Set to 1 to make sure hash aggregation is never turned off.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.merge.mapfiles</name>
-    <value>true</value>
-    <description>Merge small files at the end of a map-only job</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.merge.mapredfiles</name>
-    <value>false</value>
-    <description>Merge small files at the end of a map-reduce job</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.merge.tezfiles</name>
-    <value>false</value>
-    <description>Merge small files at the end of a Tez DAG</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.merge.size.per.task</name>
-    <value>256000000</value>
-    <description>Size of merged files at the end of the job</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.merge.smallfiles.avgsize</name>
-    <value>16000000</value>
-    <description>
-      When the average output file size of a job is less than this number, Hive will start an additional
-      map-reduce job to merge the output files into bigger files. This is only done for map-only jobs
-      if hive.merge.mapfiles is true, and for map-reduce jobs if hive.merge.mapredfiles is true.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.merge.rcfile.block.level</name>
-    <value>true</value>
-    <description></description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.merge.orcfile.stripe.level</name>
-    <value>true</value>
-    <description>
-      When hive.merge.mapfiles or hive.merge.mapredfiles is enabled while writing a
-      table with ORC file format, enabling this config will do stripe level fast merge
-      for small ORC files. Note that enabling this config will not honor padding tolerance
-      config (hive.exec.orc.block.padding.tolerance).
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.exec.orc.default.stripe.size</name>
-    <value>67108864</value>
-    <description>Define the default ORC stripe size</description>
-    <display-name>Default ORC Stripe Size</display-name>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>int</type>
-        <maximum>268435456</maximum>
-        <minimum>8388608</minimum>
-        <unit>B</unit>
-        <increment-step>8388608</increment-step>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.exec.orc.default.compress</name>
-    <value>ZLIB</value>
-    <description>Define the default compression codec for ORC file</description>
-    <display-name>ORC Compression Algorithm</display-name>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>value-list</type>
-        <entries>
-            <entry>
-                <value>ZLIB</value>
-                <label>zlib Compression Library</label>
-            </entry>
-            <entry>
-                <value>SNAPPY</value>
-                <label>Snappy Compression Library</label>
-            </entry>
-        </entries>
-        <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.orc.splits.include.file.footer</name>
-    <value>false</value>
-    <description>
-      If turned on splits generated by orc will include metadata about the stripes in the file. This
-      data is read remotely (from the client or HS2 machine) and sent to all the tasks.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.orc.compute.splits.num.threads</name>
-    <value>10</value>
-    <description>How many threads orc should use to create splits in parallel.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.auto.convert.join</name>
-    <value>true</value>
-    <description>Whether Hive enables the optimization about converting common join into mapjoin based on the input file size</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.auto.convert.join.noconditionaltask</name>
-    <value>true</value>
-    <description>
-      Whether Hive enables the optimization about converting common join into mapjoin based on the input file size.
-      If this parameter is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
-      specified size, the join is directly converted to a mapjoin (there is no conditional task).
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.limit.optimize.enable</name>
-    <value>true</value>
-    <description>Whether to enable to optimization to trying a smaller subset of data for simple LIMIT first.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.tez.cpu.vcores</name>
-    <value>-1</value>
-    <description>By default Tez will ask for however many cpus map-reduce is configured to use per container. This can be used to overwrite.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.tez.log.level</name>
-    <value>INFO</value>
-    <description>
-      The log level to use for tasks executing as part of the DAG.
-      Used only if hive.tez.java.opts is used to configure Java options.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.enforce.bucketing</name>
-    <value>true</value>
-    <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
-    <display-name>Enforce bucketing</display-name>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>value-list</type>
-        <entries>
-            <entry>
-                <value>true</value>
-                <label>True</label>
-            </entry>
-            <entry>
-                <value>false</value>
-                <label>False</label>
-            </entry>
-        </entries>
-        <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on>
-        <property>
-            <name>hive_txn_acid</name>
-            <type>hive-env</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.enforce.sorting</name>
-    <value>true</value>
-    <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.enforce.sortmergebucketmapjoin</name>
-    <value>true</value>
-    <description>If the user asked for sort-merge bucketed map-side join, and it cannot be performed, should the query fail or not ?</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.auto.convert.sortmerge.join</name>
-    <value>true</value>
-    <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass the criteria for sort-merge join.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.auto.convert.sortmerge.join.to.mapjoin</name>
-    <value>false</value>
-    <description>
-      If hive.auto.convert.sortmerge.join is set to true, and a join was converted to a sort-merge join,
-      this parameter decides whether each table should be tried as a big table, and effectively a map-join should be
-      tried. That would create a conditional task with n+1 children for a n-way join (1 child for each table as the
-      big table), and the backup task will be the sort-merge join. In some cases, a map-join would be faster than a
-      sort-merge join, if there is no advantage of having the output bucketed and sorted. For example, if a very big sorted
-      and bucketed table with few files (say 10 files) are being joined with a very small sorter and bucketed table
-      with few files (10 files), the sort-merge join will only use 10 mappers, and a simple map-only join might be faster
-      if the complete small table can fit in memory, and a map-join can be performed.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.optimize.constant.propagation</name>
-    <value>true</value>
-    <description>Whether to enable constant propagation optimizer</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.optimize.metadataonly</name>
-    <value>true</value>
-    <description></description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.optimize.null.scan</name>
-    <value>true</value>
-    <description>Dont scan relations which are guaranteed to not generate any rows</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.optimize.bucketmapjoin</name>
-    <value>true</value>
-    <description>Whether to try bucket mapjoin</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.optimize.reducededuplication</name>
-    <value>true</value>
-    <description>
-      Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again.
-      This should always be set to true. Since it is a new feature, it has been made configurable.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.optimize.reducededuplication.min.reducer</name>
-    <value>4</value>
-    <description>
-      Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
-      That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
-      The optimization will be automatically disabled if number of reducers would be less than specified value.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.optimize.sort.dynamic.partition</name>
-    <value>false</value>
-    <description>
-      When enabled dynamic partitioning column will be globally sorted.
-      This way we can keep only one record writer open for each partition value
-      in the reducer thereby reducing the memory pressure on reducers.
-    </description>
-    <display-name>Sort Partitions Dynamically</display-name>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>value-list</type>
-        <entries>
-            <entry>
-                <value>true</value>
-                <label>True</label>
-            </entry>
-            <entry>
-                <value>false</value>
-                <label>False</label>
-            </entry>
-        </entries>
-        <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.stats.autogather</name>
-    <value>true</value>
-    <description>A flag to gather statistics automatically during the INSERT OVERWRITE command.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.stats.dbclass</name>
-    <value>fs</value>
-    <description>
-      Expects one of the pattern in [jdbc(:.*), hbase, counter, custom, fs].
-      The storage that stores temporary Hive statistics. Currently, jdbc, hbase, counter and custom type are supported.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.stats.fetch.partition.stats</name>
-    <value>true</value>
-    <description>
-      Annotation of operator tree with statistics information requires partition level basic
-      statistics like number of rows, data size and file size. Partition statistics are fetched from
-      metastore. Fetching partition statistics for each needed partition can be expensive when the
-      number of partitions is high. This flag can be used to disable fetching of partition statistics
-      from metastore. When this flag is disabled, Hive will make calls to filesystem to get file sizes
-      and will estimate the number of rows from row schema.
-    </description>
-    <display-name>Fetch partition stats at compiler</display-name>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>value-list</type>
-        <entries>
-            <entry>
-                <value>true</value>
-                <label>On</label>
-            </entry>
-            <entry>
-                <value>false</value>
-                <label>Off</label>
-            </entry>
-        </entries>
-        <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on>
-        <property>
-            <name>hive.cbo.enable</name>
-            <type>hive-site</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.stats.fetch.column.stats</name>
-    <value>false</value>
-    <description>
-      Annotation of operator tree with statistics information requires column statistics.
-      Column statistics are fetched from metastore. Fetching column statistics for each needed column
-      can be expensive when the number of columns is high. This flag can be used to disable fetching
-      of column statistics from metastore.
-    </description>
-    <display-name>Fetch column stats at compiler</display-name>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>value-list</type>
-        <entries>
-            <entry>
-                <value>true</value>
-                <label>On</label>
-            </entry>
-            <entry>
-                <value>false</value>
-                <label>Off</label>
-            </entry>
-        </entries>
-        <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on>
-        <property>
-            <name>hive.cbo.enable</name>
-            <type>hive-site</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.zookeeper.client.port</name>
-    <value>2181</value>
-    <description>The port of ZooKeeper servers to talk to. If the list of Zookeeper servers specified in hive.zookeeper.quorum,does not contain port numbers, this value is used.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.zookeeper.namespace</name>
-    <value>hive_zookeeper_namespace</value>
-    <description>The parent node under which all ZooKeeper nodes are created.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.txn.manager</name>
-    <value>org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager</value>
-    <description></description>
-    <display-name>Transaction Manager</display-name>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>value-list</type>
-        <entries>
-            <entry>
-                <value>org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager</value>
-                <label>org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager (off)</label>
-            </entry>
-            <entry>
-                <value>org.apache.hadoop.hive.ql.lockmgr.DbTxnManager</value>
-                <label>org.apache.hadoop.hive.ql.lockmgr.DbTxnManager (on)</label>
-            </entry>
-        </entries>
-        <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on>
-        <property>
-            <name>hive_txn_acid</name>
-            <type>hive-env</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.txn.max.open.batch</name>
-    <value>1000</value>
-    <description>
-      Maximum number of transactions that can be fetched in one call to open_txns().
-      Increasing this will decrease the number of delta files created when
-      streaming data into Hive.  But it will also increase the number of
-      open transactions at any given time, possibly impacting read performance.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.support.concurrency</name>
-    <value>false</value>
-    <description>
-      Support concurrency and use locks, needed for Transactions. Requires Zookeeper.
-    </description>
-    <display-name>Use Locking</display-name>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>value-list</type>
-        <entries>
-            <entry>
-                <value>true</value>
-                <label>True</label>
-            </entry>
-            <entry>
-                <value>false</value>
-                <label>False</label>
-            </entry>
-        </entries>
-        <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on>
-        <property>
-            <name>hive_txn_acid</name>
-            <type>hive-env</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.cli.print.header</name>
-    <value>false</value>
-    <description>
-      Whether to print the names of the columns in query output.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.compactor.worker.timeout</name>
-    <value>86400L</value>
-    <description>
-      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
-      Time before a given compaction in working state is declared a failure
-      and returned to the initiated state.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.compactor.check.interval</name>
-    <value>300L</value>
-    <description>
-      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
-      Time between checks to see if any partitions need compacted.
-      This should be kept high because each check for compaction requires many calls against the NameNode.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.compactor.delta.pct.threshold</name>
-    <value>0.1f</value>
-    <description>Percentage (by size) of base that deltas can be before major compaction is initiated.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.fetch.task.conversion</name>
-    <value>more</value>
-    <description>
-      Expects one of [none, minimal, more].
-      Some select queries can be converted to single FETCH task minimizing latency.
-      Currently the query should be single sourced not having any subquery and should not have
-      any aggregations or distincts (which incurs RS), lateral views and joins.
-      0. none : disable hive.fetch.task.conversion
-      1. minimal : SELECT STAR, FILTER on partition columns, LIMIT only
-      2. more    : SELECT, FILTER, LIMIT only (support TABLESAMPLE and virtual columns)
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.fetch.task.conversion.threshold</name>
-    <value>1073741824</value>
-    <description>
-      Input threshold for applying hive.fetch.task.conversion. If target table is native, input length
-      is calculated by summation of file lengths. If it's not native, storage handler for the table
-      can optionally implement org.apache.hadoop.hive.ql.metadata.InputEstimator interface.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.fetch.task.aggr</name>
-    <value>false</value>
-    <description>
-      Aggregation queries with no group-by clause (for example, select count(*) from src) execute
-      final aggregations in single reduce task. If this is set true, Hive delegates final aggregation
-      stage to fetch task, possibly decreasing the query time.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.security.authorization.enabled</name>
-    <value>false</value>
-    <description>enable or disable the Hive client authorization</description>
-    <display-name>Enable Authorization</display-name>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>value-list</type>
-        <entries>
-            <entry>
-                <value>true</value>
-                <label>True</label>
-            </entry>
-            <entry>
-                <value>false</value>
-                <label>False</label>
-            </entry>
-        </entries>
-        <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on>
-        <property>
-            <name>hive_security_authorization</name>
-            <type>hive-env</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.security.authenticator.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator</value>
-    <description>
-      hive client authenticator manager class name. The user defined authenticator should implement
-      interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on>
-        <property>
-            <name>hive_security_authorization</name>
-            <type>hive-env</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.security.metastore.authorization.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
-    <description>
-      authorization manager class name to be used in the metastore for authorization.
-      The user defined authorization class should implement interface
-      org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.
-    </description>
-    <display-name>Hive Authorization Manager</display-name>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on>
-        <property>
-            <name>hive_security_authorization</name>
-            <type>hive-env</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.security.metastore.authorization.auth.reads</name>
-    <value>true</value>
-    <description>If this is true, metastore authorizer authorizes read actions on database, table</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.security.metastore.authenticator.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator</value>
-    <description>
-      authenticator manager class name to be used in the metastore for authentication.
-      The user defined authenticator should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.server2.logging.operation.enabled</name>
-    <value>true</value>
-    <description>When true, HS2 will save operation logs</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.server2.logging.operation.log.location</name>
-    <value>/tmp/hive/operation_logs</value>
-    <description>Top level directory where operation logs are stored if logging functionality is enabled</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.server2.zookeeper.namespace</name>
-    <value>hiveserver2</value>
-    <description>The parent node in ZooKeeper used by HiveServer2 when supporting dynamic service discovery.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.server2.thrift.http.port</name>
-    <value>10001</value>
-    <description>Port number of HiveServer2 Thrift interface when hive.server2.transport.mode is 'http'.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.server2.thrift.http.path</name>
-    <value>cliservice</value>
-    <description>Path component of URL endpoint when in HTTP mode.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.server2.thrift.port</name>
-    <value>10000</value>
-    <description>
-      TCP port number to listen on, default 10000.
-    </description>
-    <display-name>HiveServer2 Port</display-name>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>int</type>
-        <overridable>false</overridable>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.server2.thrift.sasl.qop</name>
-    <value>auth</value>
-    <description>
-      Expects one of [auth, auth-int, auth-conf].
-      Sasl QOP value; Set it to one of following values to enable higher levels of
-      protection for HiveServer2 communication with clients.
-      "auth" - authentication only (default)
-      "auth-int" - authentication plus integrity protection
-      "auth-conf" - authentication plus integrity and confidentiality protection
-      This is applicable only if HiveServer2 is configured to use Kerberos authentication.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.server2.thrift.max.worker.threads</name>
-    <value>500</value>
-    <description>Maximum number of Thrift worker threads</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.server2.allow.user.substitution</name>
-    <value>true</value>
-    <description>Allow alternate user to be specified as part of HiveServer2 open connection request.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.server2.authentication.spnego.keytab</name>
-    <value>HTTP/_HOST@EXAMPLE.COM</value>
-    <description>
-      keytab file for SPNego principal, optional,
-      typical value would look like /etc/security/keytabs/spnego.service.keytab,
-      This keytab would be used by HiveServer2 when Kerberos security is enabled and
-      HTTP transport mode is used.
-      This needs to be set only if SPNEGO is to be used in authentication.
-      SPNego authentication would be honored only if valid
-      hive.server2.authentication.spnego.principal
-      and
-      hive.server2.authentication.spnego.keytab
-      are specified.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.server2.authentication</name>
-    <value>NONE</value>
-    <description>Authentication mode, default NONE. Options are NONE, NOSASL, KERBEROS, LDAP, PAM and CUSTOM</description>
-    <display-name>HiveServer2 Authentication</display-name>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>value-list</type>
-        <entries>
-            <entry>
-                <value>NONE</value>
-                <label>None</label>
-            </entry>
-            <entry>
-                <value>LDAP</value>
-                <label>LDAP</label>
-            </entry>
-            <entry>
-                <value>KERBEROS</value>
-                <label>Kerberos</label>
-            </entry>
-            <entry>
-                <value>PAM</value>
-                <label>PAM</label>
-            </entry>
-            <entry>
-                <value>CUSTOM</value>
-                <label>Custom</label>
-            </entry>
-        </entries>
-        <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by>
-        <dependedByProperties>
-            <name>hive.server2.custom.authentication.class</name>
-            <type>hive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.server2.authentication.kerberos.principal</name>
-            <type>hive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.server2.authentication.kerberos.keytab</name>
-            <type>hive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.server2.authentication.ldap.url</name>
-            <type>hive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.server2.authentication.ldap.baseDN</name>
-            <type>hive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.server2.authentication.pam.services</name>
-            <type>hive-site</type>
-        </dependedByProperties>
-    </property_depended_by>
-</property><property require-input="false">
-    <name>hive.server2.authentication.spnego.principal</name>
-    <value>/etc/security/keytabs/spnego.service.keytab</value>
-    <description>
-      SPNego service principal, optional,
-      typical value would look like HTTP/_HOST@EXAMPLE.COM
-      SPNego service principal would be used by HiveServer2 when Kerberos security is enabled
-      and HTTP transport mode is used.
-      This needs to be set only if SPNEGO is to be used in authentication.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.server2.enable.doAs</name>
-    <value>true</value>
-    <description>
-      Setting this property to true will have HiveServer2 execute
-      Hive operations as the user making the calls to it.
-    </description>
-    <display-name>Run as end user instead of Hive user</display-name>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>value-list</type>
-        <entries>
-            <entry>
-                <value>true</value>
-                <label>True</label>
-            </entry>
-            <entry>
-                <value>false</value>
-                <label>False</label>
-            </entry>
-        </entries>
-        <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on>
-        <property>
-            <name>hive_security_authorization</name>
-            <type>hive-env</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.server2.table.type.mapping</name>
-    <value>CLASSIC</value>
-    <description>
-      Expects one of [classic, hive].
-      This setting reflects how HiveServer2 will report the table types for JDBC and other
-      client implementations that retrieve the available tables and supported table types
-      HIVE : Exposes Hive's native table types like MANAGED_TABLE, EXTERNAL_TABLE, VIRTUAL_VIEW
-      CLASSIC : More generic types like TABLE and VIEW
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.server2.use.SSL</name>
-    <value>false</value>
-    <description>
-      Set this to true for using SSL encryption in HiveServer2.
-    </description>
-    <display-name>Use SSL</display-name>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>value-list</type>
-        <entries>
-            <entry>
-                <value>true</value>
-                <label>True</label>
-            </entry>
-            <entry>
-                <value>false</value>
-                <label>False</label>
-            </entry>
-        </entries>
-        <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.conf.restricted.list</name>
-    <value>hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role</value>
-    <description>Comma separated list of configuration options which are immutable at runtime</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.user.install.directory</name>
-    <value>/user/</value>
-    <description>
-      If hive (in tez mode only) cannot find a usable hive jar in "hive.jar.directory",
-      it will upload the hive jar to "hive.user.install.directory/user.name"
-      and use it to run queries.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.vectorized.groupby.maxentries</name>
-    <value>100000</value>
-    <description>
-      Max number of entries in the vector group by aggregation hashtables.
-      Exceeding this will trigger a flush irrelevant of memory pressure condition.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.prewarm.enabled</name>
-    <value>false</value>
-    <description>Enables container prewarm for Tez (Hadoop 2 only)</description>
-    <display-name>Hold Containers to Reduce Latency</display-name>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>value-list</type>
-        <entries>
-            <entry>
-                <value>true</value>
-                <label>True</label>
-            </entry>
-            <entry>
-                <value>false</value>
-                <label>False</label>
-            </entry>
-        </entries>
-        <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.prewarm.numcontainers</name>
-    <value>3</value>
-    <description>Controls the number of containers to prewarm for Tez (Hadoop 2 only)</description>
-    <display-name>Number of Containers Held</display-name>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>int</type>
-        <maximum>20</maximum>
-        <minimum>1</minimum>
-        <increment-step>1</increment-step>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.convert.join.bucket.mapjoin.tez</name>
-    <value>false</value>
-    <description>
-      Whether joins can be automatically converted to bucket map joins in hive
-      when tez is used as the execution engine.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.tez.auto.reducer.parallelism</name>
-    <value>false</value>
-    <description>
-      Turn on Tez' auto reducer parallelism feature. When enabled, Hive will still estimate data sizes
-      and set parallelism estimates. Tez will sample source vertices' output sizes and adjust the estimates at runtime as
-      necessary.
-    </description>
-    <display-name>Allow dynamic numbers of reducers</display-name>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>value-list</type>
-        <entries>
-            <entry>
-                <value>true</value>
-                <label>True</label>
-            </entry>
-            <entry>
-                <value>false</value>
-                <label>False</label>
-            </entry>
-        </entries>
-        <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.tez.max.partition.factor</name>
-    <value>2.0</value>
-    <description>When auto reducer parallelism is enabled this factor will be used to over-partition data in shuffle edges.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.tez.min.partition.factor</name>
-    <value>0.25</value>
-    <description>
-      When auto reducer parallelism is enabled this factor will be used to put a lower limit to the number
-      of reducers that tez specifies.
-    </description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.tez.dynamic.partition.pruning</name>
-    <value>true</value>
-    <description>When dynamic pruning is enabled, joins on partition keys will be processed by sending events from the processing vertices to the tez application master. These events will be used to prune unnecessary partitions.</description>
-    <display-name>Allow dynamic partition pruning</display-name>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>value-list</type>
-        <entries>
-            <entry>
-                <value>true</value>
-                <label>True</label>
-            </entry>
-            <entry>
-                <value>false</value>
-                <label>False</label>
-            </entry>
-        </entries>
-        <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.tez.dynamic.partition.pruning.max.event.size</name>
-    <value>1048576</value>
-    <description>Maximum size of events sent by processors in dynamic pruning. If this size is crossed no pruning will take place.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.tez.dynamic.partition.pruning.max.data.size</name>
-    <value>104857600</value>
-    <description>Maximum total data size of events in dynamic pruning.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.tez.smb.number.waves</name>
-    <value>0.5</value>
-    <description>The number of waves in which to run the SMB join. Account for cluster being occupied. Ideally should be 1 wave.</description>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>ambari.hive.db.schema.name</name>
-    <value>hive</value>
-    <description>Database name used as the Hive Metastore</description>
-    <display-name>Database Name</display-name>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>database</type>
-        <overridable>false</overridable>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by>
-        <dependedByProperties>
-            <name>javax.jdo.option.ConnectionURL</name>
-            <type>hive-site</type>
-        </dependedByProperties>
-    </property_depended_by>
-</property><property require-input="false">
-    <name>hive.vectorized.execution.enabled</name>
-    <value>true</value>
-    <description>
-      This flag should be set to true to enable vectorized mode of query execution.
-      The default value is false.
-    </description>
-    <display-name>Enable Vectorization and Map Vectorization</display-name>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>value-list</type>
-        <entries>
-            <entry>
-                <value>true</value>
-                <label>True</label>
-            </entry>
-            <entry>
-                <value>false</value>
-                <label>False</label>
-            </entry>
-        </entries>
-        <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.auto.convert.join.noconditionaltask.size</name>
-    <value>52428800</value>
-    <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
-      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
-      converted to a mapjoin(there is no conditional task).
-    </description>
-    <display-name>For Map Join, per Map memory threshold</display-name>
-    <filename>hive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>int</type>
-        <maximum>17179869184</maximum>
-   

<TRUNCATED>

[40/52] bigtop git commit: BIGTOP-1406. package Ambari in Bigtop

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/setup_ranger_hive.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/setup_ranger_hive.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/setup_ranger_hive.py
deleted file mode 100755
index 81a4e3e..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/setup_ranger_hive.py
+++ /dev/null
@@ -1,98 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from resource_management.core.logger import Logger
-
-def setup_ranger_hive(upgrade_type = None):
-  import params
-
-  if params.has_ranger_admin:
-
-    stack_version = None
-
-    if upgrade_type is not None:
-      stack_version = params.version
-
-    if params.retryAble:
-      Logger.info("Hive: Setup ranger: command retry enables thus retrying if ranger admin is down !")
-    else:
-      Logger.info("Hive: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
-
-    if params.xml_configurations_supported and params.enable_ranger_hive and params.xa_audit_hdfs_is_enabled:
-      params.HdfsResource("/ranger/audit",
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.hdfs_user,
-                         group=params.hdfs_user,
-                         mode=0755,
-                         recursive_chmod=True
-      )
-      params.HdfsResource("/ranger/audit/hiveServer2",
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.hive_user,
-                         group=params.hive_user,
-                         mode=0700,
-                         recursive_chmod=True
-      )
-      params.HdfsResource(None, action="execute")
-
-    if params.xml_configurations_supported:
-      api_version=None
-      if params.stack_supports_ranger_kerberos:
-        api_version='v2'
-      from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
-      setup_ranger_plugin('hive-server2', 'hive', params.ranger_previous_jdbc_jar,
-                          params.ranger_downloaded_custom_connector, params.ranger_driver_curl_source,
-                          params.ranger_driver_curl_target, params.java64_home,
-                          params.repo_name, params.hive_ranger_plugin_repo,
-                          params.ranger_env, params.ranger_plugin_properties,
-                          params.policy_user, params.policymgr_mgr_url,
-                          params.enable_ranger_hive, conf_dict=params.hive_server_conf_dir,
-                          component_user=params.hive_user, component_group=params.user_group, cache_service_list=['hiveServer2'],
-                          plugin_audit_properties=params.config['configurations']['ranger-hive-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-hive-audit'],
-                          plugin_security_properties=params.config['configurations']['ranger-hive-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-hive-security'],
-                          plugin_policymgr_ssl_properties=params.config['configurations']['ranger-hive-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-hive-policymgr-ssl'],
-                          component_list=['hive-client', 'hive-metastore', 'hive-server2'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
-                          credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password,
-                          ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
-                          stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble, api_version=api_version,
-                          is_security_enabled = params.security_enabled,
-                          is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos,
-                          component_user_principal=params.hive_principal if params.security_enabled else None,
-                          component_user_keytab=params.hive_server2_keytab if params.security_enabled else None)
-    else:
-      from resource_management.libraries.functions.setup_ranger_plugin import setup_ranger_plugin
-      setup_ranger_plugin('hive-server2', 'hive', params.ranger_previous_jdbc_jar,
-                        params.ranger_downloaded_custom_connector, params.ranger_driver_curl_source,
-                        params.ranger_driver_curl_target, params.java64_home,
-                        params.repo_name, params.hive_ranger_plugin_repo,
-                        params.ranger_env, params.ranger_plugin_properties,
-                        params.policy_user, params.policymgr_mgr_url,
-                        params.enable_ranger_hive, conf_dict=params.hive_server_conf_dir,
-                        component_user=params.hive_user, component_group=params.user_group, cache_service_list=['hiveServer2'],
-                        plugin_audit_properties=params.config['configurations']['ranger-hive-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-hive-audit'],
-                        plugin_security_properties=params.config['configurations']['ranger-hive-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-hive-security'],
-                        plugin_policymgr_ssl_properties=params.config['configurations']['ranger-hive-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-hive-policymgr-ssl'],
-                        component_list=['hive-client', 'hive-metastore', 'hive-server2'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
-                        credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password,
-                        ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
-                        stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble)
-  else:
-    Logger.info('Ranger admin not installed')

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/setup_ranger_hive_interactive.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/setup_ranger_hive_interactive.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/setup_ranger_hive_interactive.py
deleted file mode 100755
index 0b5d5db..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/setup_ranger_hive_interactive.py
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from resource_management.core.logger import Logger
-
-def setup_ranger_hive_interactive(upgrade_type = None):
-  import params
-
-  if params.has_ranger_admin:
-
-    stack_version = None
-
-    if upgrade_type is not None:
-      stack_version = params.version
-
-    if params.retryAble:
-      Logger.info("Hive2: Setup ranger: command retry enabled thus retrying if ranger admin is down !")
-    else:
-      Logger.info("Hive2: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
-
-    if params.xml_configurations_supported and params.enable_ranger_hive and params.xa_audit_hdfs_is_enabled:
-      params.HdfsResource("/ranger/audit",
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.hdfs_user,
-                         group=params.hdfs_user,
-                         mode=0755,
-                         recursive_chmod=True
-      )
-      params.HdfsResource("/ranger/audit/hive2",
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.hive_user,
-                         group=params.hive_user,
-                         mode=0700,
-                         recursive_chmod=True
-      )
-      params.HdfsResource(None, action="execute")
-
-      from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
-      setup_ranger_plugin('hive-server2-hive2', 'hive', params.ranger_previous_jdbc_jar,
-                          params.ranger_downloaded_custom_connector, params.ranger_driver_curl_source,
-                          params.ranger_driver_curl_target, params.java64_home,
-                          params.repo_name, params.hive_ranger_plugin_repo,
-                          params.ranger_env, params.ranger_plugin_properties,
-                          params.policy_user, params.policymgr_mgr_url,
-                          params.enable_ranger_hive, conf_dict=params.hive_server_interactive_conf_dir,
-                          component_user=params.hive_user, component_group=params.user_group, cache_service_list=['hive-server2-hive2'],
-                          plugin_audit_properties=params.config['configurations']['ranger-hive-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-hive-audit'],
-                          plugin_security_properties=params.config['configurations']['ranger-hive-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-hive-security'],
-                          plugin_policymgr_ssl_properties=params.config['configurations']['ranger-hive-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-hive-policymgr-ssl'],
-                          component_list=['hive-client', 'hive-metastore', 'hive-server2','hive-server2-hive2'], audit_db_is_enabled=False,
-                          credential_file=params.credential_file, xa_audit_db_password=None,
-                          ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
-                          stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble, api_version='v2',
-                          is_security_enabled = params.security_enabled,
-                          is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos,
-                          component_user_principal=params.hive_principal if params.security_enabled else None,
-                          component_user_keytab=params.hive_server2_keytab if params.security_enabled else None)
-
-  else:
-    Logger.info('Ranger admin not installed')

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/status_params.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/status_params.py
deleted file mode 100755
index a7b2e3f..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/status_params.py
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from ambari_commons import OSCheck
-
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.version import format_stack_version
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions import get_kinit_path
-from resource_management.libraries.script.script import Script
-
-
-# a map of the Ambari role to the component name
-# for use with <stack-root>/current/<component>
-SERVER_ROLE_DIRECTORY_MAP = {
-  'HIVE_METASTORE' : 'hive-metastore',
-  'HIVE_SERVER' : 'hive-server2',
-  'WEBHCAT_SERVER' : 'hive-webhcat',
-  'HIVE_CLIENT' : 'hive-client',
-  'HCAT' : 'hive-client',
-  'HIVE_SERVER_INTERACTIVE' : 'hive-server2-hive2'
-}
-
-
-# Either HIVE_METASTORE, HIVE_SERVER, WEBHCAT_SERVER, HIVE_CLIENT, HCAT, HIVE_SERVER_INTERACTIVE
-role = default("/role", None)
-component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "HIVE_CLIENT")
-component_directory_interactive = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "HIVE_SERVER_INTERACTIVE")
-
-config = Script.get_config()
-
-stack_root = Script.get_stack_root()
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted_major = format_stack_version(stack_version_unformatted)
-
-if OSCheck.is_windows_family():
-  hive_metastore_win_service_name = "metastore"
-  hive_client_win_service_name = "hwi"
-  hive_server_win_service_name = "hiveserver2"
-  webhcat_server_win_service_name = "templeton"
-else:
-  hive_pid_dir = config['configurations']['hive-env']['hive_pid_dir']
-  hive_pid = 'hive-server.pid'
-  hive_interactive_pid = 'hive-interactive.pid'
-  hive_metastore_pid = 'hive.pid'
-
-  hcat_pid_dir = config['configurations']['hive-env']['hcat_pid_dir'] #hcat_pid_dir
-  webhcat_pid_file = format('{hcat_pid_dir}/webhcat.pid')
-
-  process_name = 'mysqld'
-  if OSCheck.is_suse_family() or OSCheck.is_ubuntu_family():
-    daemon_name = 'mysql'
-  else:
-    daemon_name = 'mysqld'
-
-  # Security related/required params
-  hostname = config['hostname']
-  security_enabled = config['configurations']['cluster-env']['security_enabled']
-  kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-  tmp_dir = Script.get_tmp_dir()
-  hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-  hive_user = config['configurations']['hive-env']['hive_user']
-  webhcat_user = config['configurations']['hive-env']['webhcat_user']
-
-  # default configuration directories
-  hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-  hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
-  hive_etc_dir_prefix = "/etc/hive"
-  hive_interactive_etc_dir_prefix = "/etc/hive2"
-
-  hive_server_conf_dir = "/etc/hive/conf.server"
-  hive_server_interactive_conf_dir = "/etc/hive2/conf.server"
-
-#  webhcat_conf_dir = format("{stack_root}/current/hive-webhcat/conf")
-#  hive_home_dir = format("{stack_root}/current/{component_directory}")
-#  hive_conf_dir = format("{stack_root}/current/{component_directory}/conf")
-#  hive_client_conf_dir = format("{stack_root}/current/{component_directory}/conf")
-  webhcat_conf_dir = '/etc/hive/conf'
-  hive_home_dir = '/usr/lib/hive'
-  hive_conf_dir = '/usr/lib/hive/conf'
-  hive_client_conf_dir = '/etc/hive/conf'
-
-  if check_stack_feature(StackFeature.CONFIG_VERSIONING, stack_version_formatted_major):
-    hive_server_conf_dir = format("{stack_root}/current/{component_directory}/conf/conf.server")
-    hive_conf_dir = hive_server_conf_dir
-
-  if check_stack_feature(StackFeature.HIVE_WEBHCAT_SPECIFIC_CONFIGS, stack_version_formatted_major):
-    # this is NOT a typo. Configs for hcatalog/webhcat point to a
-    # specific directory which is NOT called 'conf'
-    #  FIXME ODPi: webhcat_conf_dir = format("{stack_root}/current/hive-webhcat/etc/webhcat")
-    webhcat_conf_dir = format("/etc/hive-webhcat/conf")
-
-  # if stack version supports hive serve interactive
-  if check_stack_feature(StackFeature.HIVE_SERVER_INTERACTIVE, stack_version_formatted_major):
-    hive_server_interactive_conf_dir = format("{stack_root}/current/{component_directory_interactive}/conf/conf.server")
-
-  hive_config_dir = hive_client_conf_dir
-
-  if 'role' in config and config['role'] in ["HIVE_SERVER", "HIVE_METASTORE", "HIVE_SERVER_INTERACTIVE"]:
-    hive_config_dir = hive_server_conf_dir
-    
-stack_name = default("/hostLevelParams/stack_name", None)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat.py
deleted file mode 100755
index fe3f34a..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat.py
+++ /dev/null
@@ -1,145 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import sys
-import os.path
-from resource_management import *
-from resource_management.core.resources.system import Execute
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster, setup_atlas_hook
-from ambari_commons import OSConst
-from ambari_commons.constants import SERVICE
-
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def webhcat():
-  import params
-  XmlConfig("webhcat-site.xml",
-            conf_dir=params.hcat_config_dir,
-            configurations=params.config['configurations']['webhcat-site']
-  )
-  # Manually overriding service logon user & password set by the installation package
-  ServiceConfig(params.webhcat_server_win_service_name,
-                action="change_user",
-                username = params.hcat_user,
-                password = Script.get_password(params.hcat_user))
-
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def webhcat():
-  import params
-
-  Directory(params.templeton_pid_dir,
-            owner=params.webhcat_user,
-            mode=0755,
-            group=params.user_group,
-            create_parents = True)
-
-  Directory(params.templeton_log_dir,
-            owner=params.webhcat_user,
-            mode=0755,
-            group=params.user_group,
-            create_parents = True)
-
-  Directory(params.config_dir,
-            create_parents = True,
-            owner=params.webhcat_user,
-            group=params.user_group,
-            cd_access="a")
-
-  if params.security_enabled:
-    kinit_if_needed = format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name};")
-  else:
-    kinit_if_needed = ""
-
-  if kinit_if_needed:
-    Execute(kinit_if_needed,
-            user=params.webhcat_user,
-            path='/bin'
-    )
-
-  # Replace _HOST with hostname in relevant principal-related properties
-  webhcat_site = params.config['configurations']['webhcat-site'].copy()
-  for prop_name in ['templeton.hive.properties', 'templeton.kerberos.principal']:
-    if prop_name in webhcat_site:
-      webhcat_site[prop_name] = webhcat_site[prop_name].replace("_HOST", params.hostname)
-
-  XmlConfig("webhcat-site.xml",
-            conf_dir=params.config_dir,
-            configurations=webhcat_site,
-            configuration_attributes=params.config['configuration_attributes']['webhcat-site'],
-            owner=params.webhcat_user,
-            group=params.user_group,
-            )
-
-  # if we're in an upgrade of a secure cluster, make sure hive-site and yarn-site are created
-  if params.stack_version_formatted_major  and check_stack_feature(StackFeature.CONFIG_VERSIONING, params.stack_version_formatted_major) and \
-       params.version and params.stack_root:
-    XmlConfig("hive-site.xml",
-      conf_dir = format("{stack_root}/{version}/hive/conf"),
-      configurations = params.config['configurations']['hive-site'],
-      configuration_attributes = params.config['configuration_attributes']['hive-site'],
-      owner = params.hive_user,
-      group = params.user_group,
-      )
-
-    XmlConfig("yarn-site.xml",
-      conf_dir = format("{stack_root}/{version}/hadoop/conf"),
-      configurations = params.config['configurations']['yarn-site'],
-      configuration_attributes = params.config['configuration_attributes']['yarn-site'],
-      owner = params.yarn_user,
-      group = params.user_group,    
-  )
-  
-
-  File(format("{config_dir}/webhcat-env.sh"),
-       owner=params.webhcat_user,
-       group=params.user_group,
-       content=InlineTemplate(params.webhcat_env_sh_template)
-  )
-  
-  Directory(params.webhcat_conf_dir,
-       cd_access='a',
-       create_parents = True
-  )
-
-  log4j_webhcat_filename = 'webhcat-log4j.properties'
-  if (params.log4j_webhcat_props != None):
-    File(format("{config_dir}/{log4j_webhcat_filename}"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.webhcat_user,
-         content=params.log4j_webhcat_props
-    )
-  elif (os.path.exists("{config_dir}/{log4j_webhcat_filename}.template")):
-    File(format("{config_dir}/{log4j_webhcat_filename}"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.webhcat_user,
-         content=StaticFile(format("{config_dir}/{log4j_webhcat_filename}.template"))
-    )
-
-  # Generate atlas-application.properties.xml file
-  if has_atlas_in_cluster():
-    # WebHCat uses a different config dir than the rest of the daemons in Hive.
-    atlas_hook_filepath = os.path.join(params.config_dir, params.atlas_hook_filename)
-    setup_atlas_hook(SERVICE.HIVE, params.hive_atlas_application_properties, atlas_hook_filepath, params.hive_user, params.user_group)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat_server.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat_server.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat_server.py
deleted file mode 100755
index 34687c4..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat_server.py
+++ /dev/null
@@ -1,164 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management import *
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.security_commons import build_expectations, \
-  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
-  FILE_TYPE_XML
-from webhcat import webhcat
-from webhcat_service import webhcat_service
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-
-
-class WebHCatServer(Script):
-  def install(self, env):
-    import params
-    self.install_packages(env)
-
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    webhcat_service(action='start', upgrade_type=upgrade_type)
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    webhcat_service(action='stop')
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    webhcat()
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class WebHCatServerWindows(WebHCatServer):
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_windows_service_status(status_params.webhcat_server_win_service_name)
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class WebHCatServerDefault(WebHCatServer):
-  def get_component_name(self):
-    return "hive-webhcat"
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.webhcat_pid_file)
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    Logger.info("Executing WebHCat Stack Upgrade pre-restart")
-    import params
-    env.set_params(params)
-
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version): 
-      # webhcat has no conf, but uses hadoop home, so verify that regular hadoop conf is set
-      conf_select.select(params.stack_name, "hive-hcatalog", params.version)
-      conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hive-webhcat", params.version)
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-
-    if status_params.security_enabled:
-      expectations ={}
-      expectations.update(
-        build_expectations(
-          'webhcat-site',
-          {
-            "templeton.kerberos.secret": "secret"
-          },
-          [
-            "templeton.kerberos.keytab",
-            "templeton.kerberos.principal"
-          ],
-          [
-            "templeton.kerberos.keytab"
-          ]
-        )
-      )
-      expectations.update(
-        build_expectations(
-          'hive-site',
-          {
-            "hive.server2.authentication": "KERBEROS",
-            "hive.metastore.sasl.enabled": "true",
-            "hive.security.authorization.enabled": "true"
-          },
-          None,
-          None
-        )
-      )
-
-      security_params = {}
-      security_params.update(get_params_from_filesystem(status_params.hive_conf_dir,
-                                                        {'hive-site.xml': FILE_TYPE_XML}))
-      security_params.update(get_params_from_filesystem(status_params.webhcat_conf_dir,
-                                                        {'webhcat-site.xml': FILE_TYPE_XML}))
-      result_issues = validate_security_config_properties(security_params, expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if 'webhcat-site' not in security_params \
-            or 'templeton.kerberos.keytab' not in security_params['webhcat-site'] \
-            or 'templeton.kerberos.principal' not in security_params['webhcat-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.webhcat_user,
-                                security_params['webhcat-site']['templeton.kerberos.keytab'],
-                                security_params['webhcat-site']['templeton.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
-  def get_log_folder(self):
-    import params
-    return params.hcat_log_dir
-  
-  def get_user(self):
-    import params
-    return params.webhcat_user
-
-if __name__ == "__main__":
-  WebHCatServer().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat_service.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat_service.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat_service.py
deleted file mode 100755
index c24db4c..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat_service.py
+++ /dev/null
@@ -1,96 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management import *
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-from ambari_commons import OSConst
-from resource_management.core.shell import as_user
-from resource_management.core.logger import Logger
-import traceback
-
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def webhcat_service(action='start', rolling_restart=False):
-  import params
-  if action == 'start' or action == 'stop':
-    Service(params.webhcat_server_win_service_name, action=action)
-
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def webhcat_service(action='start', upgrade_type=None):
-  import params
-
-  environ = {
-    'HADOOP_HOME': params.hadoop_home
-  }
-
-  cmd = format('{webhcat_bin_dir}/webhcat_server.sh')
-
-  if action == 'start':
-    if upgrade_type is not None and params.version and params.stack_root:
-      environ['HADOOP_HOME'] = format("{stack_root}/{version}/hadoop")
-
-    daemon_cmd = format('cd {hcat_pid_dir} ; {cmd} start')
-    no_op_test = as_user(format('ls {webhcat_pid_file} >/dev/null 2>&1 && ps -p `cat {webhcat_pid_file}` >/dev/null 2>&1'), user=params.webhcat_user)
-    try:
-      Execute(daemon_cmd,
-              user=params.webhcat_user,
-              not_if=no_op_test,
-              environment = environ)
-    except:
-      show_logs(params.hcat_log_dir, params.webhcat_user)
-      raise
-  elif action == 'stop':
-    try:
-      graceful_stop(cmd, environ)
-    except Fail:
-      show_logs(params.hcat_log_dir, params.webhcat_user)
-      Logger.info(traceback.format_exc())
-
-    pid_expression = "`" + as_user(format("cat {webhcat_pid_file}"), user=params.webhcat_user) + "`"
-    process_id_exists_command = format("ls {webhcat_pid_file} >/dev/null 2>&1 && ps -p {pid_expression} >/dev/null 2>&1")
-    daemon_hard_kill_cmd = format("{sudo} kill -9 {pid_expression}")
-    wait_time = 10
-    Execute(daemon_hard_kill_cmd,
-            not_if = format("! ({process_id_exists_command}) || ( sleep {wait_time} && ! ({process_id_exists_command}) )"),
-            ignore_failures = True
-    )
-
-    try:
-      # check if stopped the process, else fail the task
-      Execute(format("! ({process_id_exists_command})"),
-              tries=20,
-              try_sleep=3,
-      )
-    except:
-      show_logs(params.hcat_log_dir, params.webhcat_user)
-      raise
-
-    File(params.webhcat_pid_file,
-         action="delete",
-    )
-
-def graceful_stop(cmd, environ):
-  import params
-  daemon_cmd = format('{cmd} stop')
-
-  Execute(daemon_cmd,
-          user = params.webhcat_user,
-          environment = environ)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat_service_check.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat_service_check.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat_service_check.py
deleted file mode 100755
index 8e80d48..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat_service_check.py
+++ /dev/null
@@ -1,128 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import urllib2
-
-from resource_management import *
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-from ambari_commons import OSConst
-import time
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def webhcat_service_check():
-  Logger.info("Webhcat smoke test - service status")
-
-  import params
-  # AMBARI-11633 [WinTP2] Webhcat service check fails
-  # Hive doesn't pass the environment variables correctly to child processes, which fails the smoke test.
-  # Reducing the amount of URLs checked to the minimum required.
-  #smoke_cmd = os.path.join(params.stack_root,"Run-SmokeTests.cmd")
-  #service = "WEBHCAT"
-  #Execute(format("cmd /C {smoke_cmd} {service}"), user=params.hcat_user, logoutput=True)
-
-  url_tests = [
-    "status",
-    #These are the failing ones:
-    #"ddl/database?user.name=hadoop",
-    #"ddl/database/default/table?user.name=hadoop"
-  ]
-
-
-  import socket
-
-  url_host = socket.getfqdn()
-  url_port = params.config["configurations"]["webhcat-site"]["templeton.port"]
-
-  for url_test in url_tests:
-    url_request = "http://{0}:{1}/templeton/v1/{2}".format(url_host, url_port, url_test)
-    url_response = None
-
-    try:
-      # execute the query for the JSON that includes WebHCat status
-      url_response = urllib2.urlopen(url_request, timeout=30)
-
-      status = url_response.getcode()
-      response = url_response.read()
-
-      if status != 200:
-        Logger.warning("Webhcat service check status: {0}".format(status))
-      Logger.info("Webhcat service check response: {0}".format(response))
-    except urllib2.HTTPError as he:
-      raise Fail("Webhcat check {0} failed: {1}".format(url_request, he.msg))
-    finally:
-      if url_response is not None:
-        try:
-          url_response.close()
-        except:
-          pass
-
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def webhcat_service_check():
-  import params
-  File(format("{tmp_dir}/templetonSmoke.sh"),
-       content= StaticFile('templetonSmoke.sh'),
-       mode=0755
-  )
-
-  if params.security_enabled:
-    smokeuser_keytab=params.smoke_user_keytab
-    smoke_user_principal=params.smokeuser_principal
-  else:
-    smokeuser_keytab= "no_keytab"
-    smoke_user_principal="no_principal"
-    
-  unique_name = format("{smokeuser}.{timestamp}", timestamp = time.time())
-  templeton_test_script = format("idtest.{unique_name}.pig")
-  templeton_test_input = format("/tmp/idtest.{unique_name}.in")
-  templeton_test_output = format("/tmp/idtest.{unique_name}.out")
-
-  File(format("{tmp_dir}/{templeton_test_script}"),
-       content = Template("templeton_smoke.pig.j2", templeton_test_input=templeton_test_input, templeton_test_output=templeton_test_output),
-       owner=params.hdfs_user
-  )
-  
-  params.HdfsResource(format("/tmp/{templeton_test_script}"),
-                      action = "create_on_execute",
-                      type = "file",
-                      source = format("{tmp_dir}/{templeton_test_script}"),
-                      owner = params.smokeuser
-  )
-  
-  params.HdfsResource(templeton_test_input,
-                      action = "create_on_execute",
-                      type = "file",
-                      source = "/etc/passwd",
-                      owner = params.smokeuser
-  )
-  
-  params.HdfsResource(None, action = "execute")
-
-  cmd = format("{tmp_dir}/templetonSmoke.sh {webhcat_server_host[0]} {smokeuser} {templeton_port} {templeton_test_script} {smokeuser_keytab}"
-               " {security_param} {kinit_path_local} {smoke_user_principal}"
-               " {tmp_dir}")
-
-  Execute(cmd,
-          tries=3,
-          try_sleep=5,
-          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-          logoutput=True)
-
-
-

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-hivemetastore.properties.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-hivemetastore.properties.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-hivemetastore.properties.j2
deleted file mode 100755
index e4d88bc..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-hivemetastore.properties.j2
+++ /dev/null
@@ -1,54 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# syntax: [prefix].[source|sink|jmx].[instance].[options]
-# See package.html for org.apache.hadoop.metrics2 for details
-
-{% if has_metric_collector %}
-
-  *.period={{metrics_collection_period}}
-  *.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
-  *.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
-  *.sink.timeline.period={{metrics_collection_period}}
-  *.sink.timeline.sendInterval={{metrics_report_interval}}000
-  *.sink.timeline.slave.host.name = {{hostname}}
-
-  # HTTPS properties
-  *.sink.timeline.truststore.path = {{metric_truststore_path}}
-  *.sink.timeline.truststore.type = {{metric_truststore_type}}
-  *.sink.timeline.truststore.password = {{metric_truststore_password}}
-
-  hivemetastore.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
-
-
-{% endif %}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-hiveserver2.properties.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-hiveserver2.properties.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-hiveserver2.properties.j2
deleted file mode 100755
index b5c4891..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-hiveserver2.properties.j2
+++ /dev/null
@@ -1,54 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# syntax: [prefix].[source|sink|jmx].[instance].[options]
-# See package.html for org.apache.hadoop.metrics2 for details
-
-{% if has_metric_collector %}
-
-  *.period={{metrics_collection_period}}
-  *.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
-  *.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
-  *.sink.timeline.period={{metrics_collection_period}}
-  *.sink.timeline.sendInterval={{metrics_report_interval}}000
-  *.sink.timeline.slave.host.name = {{hostname}}
-
-  # HTTPS properties
-  *.sink.timeline.truststore.path = {{metric_truststore_path}}
-  *.sink.timeline.truststore.type = {{metric_truststore_type}}
-  *.sink.timeline.truststore.password = {{metric_truststore_password}}
-
-  hiveserver2.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
-
-
-{% endif %}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-llapdaemon.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-llapdaemon.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-llapdaemon.j2
deleted file mode 100755
index 1d75ccf..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-llapdaemon.j2
+++ /dev/null
@@ -1,52 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# syntax: [prefix].[source|sink|jmx].[instance].[options]
-# See package.html for org.apache.hadoop.metrics2 for details
-
-{% if has_metric_collector %}
-
-  *.period={{metrics_collection_period}}
-  *.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
-  *.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
-  *.sink.timeline.period={{metrics_collection_period}}
-  *.sink.timeline.sendInterval={{metrics_report_interval}}000
-
-  # HTTPS properties
-  *.sink.timeline.truststore.path = {{metric_truststore_path}}
-  *.sink.timeline.truststore.type = {{metric_truststore_type}}
-  *.sink.timeline.truststore.password = {{metric_truststore_password}}
-
-  llapdaemon.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
-
-{% endif %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-llaptaskscheduler.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-llaptaskscheduler.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-llaptaskscheduler.j2
deleted file mode 100755
index 5ab787c..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-llaptaskscheduler.j2
+++ /dev/null
@@ -1,52 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# syntax: [prefix].[source|sink|jmx].[instance].[options]
-# See package.html for org.apache.hadoop.metrics2 for details
-
-{% if has_metric_collector %}
-
-  *.period={{metrics_collection_period}}
-  *.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
-  *.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
-  *.sink.timeline.period={{metrics_collection_period}}
-  *.sink.timeline.sendInterval={{metrics_report_interval}}000
-
-  # HTTPS properties
-  *.sink.timeline.truststore.path = {{metric_truststore_path}}
-  *.sink.timeline.truststore.type = {{metric_truststore_type}}
-  *.sink.timeline.truststore.password = {{metric_truststore_password}}
-
-  llaptaskscheduler.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
-
-{% endif %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hive.conf.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hive.conf.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hive.conf.j2
deleted file mode 100755
index 5af53d0..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hive.conf.j2
+++ /dev/null
@@ -1,35 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-{{hive_user}}   - nofile {{hive_user_nofile_limit}}
-{{hive_user}}   - nproc  {{hive_user_nproc_limit}}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/startHiveserver2.sh.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/startHiveserver2.sh.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/startHiveserver2.sh.j2
deleted file mode 100755
index 70b418c..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/startHiveserver2.sh.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-HIVE_SERVER2_OPTS=" -hiveconf hive.log.file=hiveserver2.log -hiveconf hive.log.dir=$5"
-HIVE_CONF_DIR=$4 {{hive_bin}}/hiveserver2 -hiveconf hive.metastore.uris=" " ${HIVE_SERVER2_OPTS} > $1 2> $2 &
-echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/startHiveserver2Interactive.sh.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/startHiveserver2Interactive.sh.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/startHiveserver2Interactive.sh.j2
deleted file mode 100755
index 6062a7e..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/startHiveserver2Interactive.sh.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-HIVE_SERVER2_INTERACTIVE_OPTS=" -hiveconf hive.log.file=hiveserver2Interactive.log -hiveconf hive.log.dir=$5"
-HIVE_INTERACTIVE_CONF_DIR=$4 {{hive_interactive_bin}}/hiveserver2 -hiveconf hive.metastore.uris=" " ${HIVE_SERVER2_INTERACTIVE_OPTS} > $1 2> $2 &
-echo $!|cat>$3
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/templeton_smoke.pig.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/templeton_smoke.pig.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/templeton_smoke.pig.j2
deleted file mode 100755
index 3153e81..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/templeton_smoke.pig.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-A = load '{{templeton_test_input}}' using PigStorage(':');
-B = foreach A generate \$0 as id; 
-store B into '{{templeton_test_output}}';
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/KERBEROS/metainfo.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/KERBEROS/metainfo.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/KERBEROS/metainfo.xml
deleted file mode 100755
index 25cfcc6..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/KERBEROS/metainfo.xml
+++ /dev/null
@@ -1,26 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>KERBEROS</name>
-      <extends>common-services/KERBEROS/1.10.3-10</extends>
-    </service>
-  </services>
-</metainfo>


[08/52] bigtop git commit: ODPI-5. Integrate Ambari packaging into Bigtop

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ambari.defaults
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ambari.defaults b/bigtop-packages/src/common/ambari/ambari.defaults
new file mode 100644
index 0000000..e69de29

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/do-component-build
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/do-component-build b/bigtop-packages/src/common/ambari/do-component-build
new file mode 100644
index 0000000..2760f61
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/do-component-build
@@ -0,0 +1,21 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+. `dirname $0`/bigtop.bom
+
+export _JAVA_OPTIONS="-Xmx2048m -XX:MaxPermSize=512m -Djava.awt.headless=true"
+mvn clean package -DskipTests -Drat.skip

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/install_ambari.sh
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/install_ambari.sh b/bigtop-packages/src/common/ambari/install_ambari.sh
new file mode 100755
index 0000000..1a66af5
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/install_ambari.sh
@@ -0,0 +1,157 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+usage() {
+  echo "
+usage: $0 <options>
+  Required not-so-options:
+     --build-dir=DIR             path to ambari dist.dir
+     --prefix=PREFIX             path to install into
+     --source-dir=DIR            path to the source code
+  "
+  exit 1
+}
+
+OPTS=$(getopt \
+  -n $0 \
+  -o '' \
+  -l 'prefix:' \
+  -l 'source-dir:' \
+  -l 'distro-dir:' \
+  -l 'build-dir:' -- "$@")
+
+if [ $? != 0 ] ; then
+    usage
+fi
+
+eval set -- "$OPTS"
+while true ; do
+    case "$1" in
+        --prefix)
+        PREFIX=$2 ; shift 2
+        ;;
+        --build-dir)
+        BUILD_DIR=$2 ; shift 2
+        ;;
+        --source-dir)
+        SOURCE_DIR=$2 ; shift 2
+        ;;
+        --distro-dir)
+        DISTRO_DIR=$2 ; shift 2
+        ;;
+        --)
+        shift ; break
+        ;;
+        *)
+        echo "Unknown option: $1"
+        usage
+        exit 1
+        ;;
+    esac
+done
+
+for var in PREFIX BUILD_DIR SOURCE_DIR ; do
+  if [ -z "$(eval "echo \$$var")" ]; then
+    echo Missing param: $var
+    usage
+  fi
+done
+
+install -d -m 0755 ${PREFIX}
+
+#Ambari Server
+LIB_DIR=${LIB_DIR:-/usr/lib/ambari-server}
+LIB_DIR_CLIENT=${LIB_DIR_CLIENT:-/usr/lib/ambari-client}
+ETC_DIR=${ETC_DIR:-/etc/ambari-server}
+CONF_DIR=${CONF_DIR:-${ETC_DIR}/conf}
+
+VAR_LIB_DIR=/var/lib/ambari-server
+SBIN_DIR=/usr/sbin
+SERVER_DIR=$BUILD_DIR/ambari-server/target/ambari-server-*-dist
+
+cp -ra $SERVER_DIR/* ${PREFIX}/
+cp -a  $SOURCE_DIR/ambari-common/src/main/unix/ambari-python-wrap ${PREFIX}/${VAR_LIB_DIR}
+rm -rf ${PREFIX}/var/lib/ambari-server/resources/stacks/HDP*
+cp -r  ${DISTRO_DIR}/ODPi ${PREFIX}/var/lib/ambari-server/resources/stacks/
+
+# End of Ambari Server
+
+LIB_DIR=/usr/lib/ambari-agent
+ETC_DIR=/etc/ambari-agent
+VAR_LIB_DIR=/var/lib/ambari-agent
+
+CONF_DIR=${ETC_DIR}/conf
+AGENT_BUILD_DIR=${BUILD_DIR}/ambari-agent/target/ambari-agent-*
+AGENT_DEST_DIR=/usr/lib/python2.6/site-packages/ambari_agent
+
+cp -ra $AGENT_BUILD_DIR/* ${PREFIX}/
+cp -a $SOURCE_DIR/ambari-common/src/main/unix/ambari-python-wrap ${PREFIX}/${VAR_LIB_DIR}
+rm -rf ${PREFIX}/var/lib/ambari-agent/cache/stacks/HDP*
+cp -r  ${DISTRO_DIR}/ODPi ${PREFIX}/var/lib/ambari-agent/cache/stacks/
+
+#Ambari Groovy Client 
+
+
+PREFIX_AMBARI_CLIENT=${PREFIX}/${LIB_DIR_CLIENT}
+CLIENT_BUILD_DIR=${BUILD_DIR}/ambari-client/groovy-client
+SHELL_BUILD_DIR=${BUILD_DIR}/ambari-shell/ambari-groovy-shell/
+
+install -d -m 0755 ${PREFIX_AMBARI_CLIENT}/lib
+cp -a ${CLIENT_BUILD_DIR}/target/groovy-client*.jar ${PREFIX_AMBARI_CLIENT}/lib
+cp -a ${SHELL_BUILD_DIR}/target/ambari-groovy-shell-*.jar ${PREFIX_AMBARI_CLIENT}/lib
+
+install -d -m 0755 ${PREFIX}/usr/bin
+cat > ${PREFIX}/usr/bin/ambari-shell <<'__EOT__'
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Autodetect JAVA_HOME if not defined
+source /usr/lib/bigtop-utils/bigtop-detect-javahome 2>/dev/null || :
+
+JAVA=${JAVA:-java}
+[ -n "$JAVA_HOME" ] && JAVA=${JAVA:-$JAVA_HOME/bin/java}
+
+$JAVA -jar /usr/lib/ambari-client/lib/ambari-groovy-shell-*.jar --ambari.server=${AMBARI_SERVER:-localhost} \
+                                                                --ambari.port=${AMBARI_PORT:-8080}          \
+                                                                --ambari.user=${AMBARI_USER:-admin}         \
+                                                                --ambari.password=${AMBARI_PASSWD:-admin}
+__EOT__
+chmod 755 ${PREFIX}/usr/bin/ambari-shell
+
+#Ambari Python Client
+
+LIB_DIR=/usr/lib
+CLIENT_BUILD_DIR=${SOURCE_DIR}/ambari-client/python-client
+
+#install -d -m 0755 ${PREFIX_GROOVY_CLIENT}/${LIB_DIR}
+#cp -a ${CLIENT_BUILD_DIR}/src/main/python-client ${PREFIX_GROOVY_CLIENT}/${LIB_DIR}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/deb/ambari/ambari-agent.install
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/deb/ambari/ambari-agent.install b/bigtop-packages/src/deb/ambari/ambari-agent.install
new file mode 100644
index 0000000..2cafa38
--- /dev/null
+++ b/bigtop-packages/src/deb/ambari/ambari-agent.install
@@ -0,0 +1,8 @@
+/etc/ambari-agent
+/etc/init.d/ambari-agent
+/usr/sbin/ambari-agent
+/usr/lib/ambari-agent
+/usr/lib/python2.6/site-packages/ambari_agent
+/var/run/
+/var/lib/ambari-agent
+/var/log/

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/deb/ambari/ambari-agent.postinst
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/deb/ambari/ambari-agent.postinst b/bigtop-packages/src/deb/ambari/ambari-agent.postinst
new file mode 100755
index 0000000..ceca9be
--- /dev/null
+++ b/bigtop-packages/src/deb/ambari/ambari-agent.postinst
@@ -0,0 +1,33 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+if [ "$1" == "configure" ]; then  # Action is install
+  if [ -f "/var/lib/ambari-agent/install-helper.sh" ]; then
+    /var/lib/ambari-agent/install-helper.sh install
+  fi
+  update-rc.d ambari-agent defaults
+fi
+
+BAK=/etc/ambari-agent/conf/ambari-agent.ini.old
+ORIG=/etc/ambari-agent/conf/ambari-agent.ini
+
+if [ -f $BAK ];then
+  if [ -f "/var/lib/ambari-agent/upgrade_agent_configs.py" ]; then
+    /var/lib/ambari-agent/upgrade_agent_configs.py
+  fi
+  mv $BAK ${BAK}_$(date '+%d_%m_%y_%H_%M').save
+fi
+exit 0

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/deb/ambari/ambari-agent.postrm
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/deb/ambari/ambari-agent.postrm b/bigtop-packages/src/deb/ambari/ambari-agent.postrm
new file mode 100755
index 0000000..16d5b7c
--- /dev/null
+++ b/bigtop-packages/src/deb/ambari/ambari-agent.postrm
@@ -0,0 +1,24 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+if [ "$1" == "upgrade" ]; # Action is upgrade
+then
+  if [ -d "/etc/ambari-agent/conf.save" ]
+  then
+      cp -f /etc/ambari-agent/conf.save/* /etc/ambari-agent/conf
+      mv /etc/ambari-agent/conf.save /etc/ambari-agent/conf_$(date '+%d_%m_%y_%H_%M').save
+  fi
+fi

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/deb/ambari/ambari-agent.posttrm
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/deb/ambari/ambari-agent.posttrm b/bigtop-packages/src/deb/ambari/ambari-agent.posttrm
new file mode 100644
index 0000000..21a01fa
--- /dev/null
+++ b/bigtop-packages/src/deb/ambari/ambari-agent.posttrm
@@ -0,0 +1,15 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/deb/ambari/ambari-agent.preinst
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/deb/ambari/ambari-agent.preinst b/bigtop-packages/src/deb/ambari/ambari-agent.preinst
new file mode 100755
index 0000000..7265c55
--- /dev/null
+++ b/bigtop-packages/src/deb/ambari/ambari-agent.preinst
@@ -0,0 +1,55 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+STACKS_FOLDER="/var/lib/ambari-agent/cache/stacks"
+STACKS_FOLDER_OLD=/var/lib/ambari-agent/cache/stacks_$(date '+%d_%m_%y_%H_%M').old
+
+COMMON_SERVICES_FOLDER="/var/lib/ambari-agent/cache/common-services"
+COMMON_SERVICES_FOLDER_OLD=/var/lib/ambari-agent/cache/common-services_$(date '+%d_%m_%y_%H_%M').old
+
+AMBARI_ENV="/var/lib/ambari-agent/ambari-env.sh"
+AMBARI_ENV_OLD="$AMBARI_ENV.rpmsave"
+
+if [ -d "/etc/ambari-agent/conf.save" ]
+then
+    mv /etc/ambari-agent/conf.save /etc/ambari-agent/conf_$(date '+%d_%m_%y_%H_%M').save
+fi
+
+BAK=/etc/ambari-agent/conf/ambari-agent.ini.old
+ORIG=/etc/ambari-agent/conf/ambari-agent.ini
+
+BAK_SUDOERS=/etc/sudoers.d/ambari-agent.bak
+ORIG_SUDOERS=/etc/sudoers.d/ambari-agent
+
+[ -f $ORIG ] && mv -f $ORIG $BAK
+[ -f $ORIG_SUDOERS ] && echo "Moving $ORIG_SUDOERS to $BAK_SUDOERS. Please restore the file if you were using it for ambari-agent non-root functionality" && mv -f $ORIG_SUDOERS $BAK_SUDOERS
+
+if [ -d "$STACKS_FOLDER" ]
+then
+    mv -f "$STACKS_FOLDER" "$STACKS_FOLDER_OLD"
+fi
+
+if [ -d "$COMMON_SERVICES_FOLDER_OLD" ]
+then
+    mv -f "$COMMON_SERVICES_FOLDER" "$COMMON_SERVICES_FOLDER_OLD"
+fi
+
+if [ -f "$AMBARI_ENV" ]
+then
+    mv -f "$AMBARI_ENV" "$AMBARI_ENV_OLD"
+fi
+
+exit 0

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/deb/ambari/ambari-agent.prerm
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/deb/ambari/ambari-agent.prerm b/bigtop-packages/src/deb/ambari/ambari-agent.prerm
new file mode 100755
index 0000000..70a9c63
--- /dev/null
+++ b/bigtop-packages/src/deb/ambari/ambari-agent.prerm
@@ -0,0 +1,35 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+# WARNING: This script is performed not only on uninstall, but also
+# during package update. See http://www.ibm.com/developerworks/library/l-rpm2/
+# for details
+
+if [ "$1" == "remove" ]; then # Action is uninstall
+    /usr/sbin/ambari-agent stop > /dev/null 2>&1
+    if [ -d "/etc/ambari-agent/conf.save" ];  then
+        mv /etc/ambari-agent/conf.save /etc/ambari-agent/conf_$(date '+%d_%m_%y_%H_%M').save
+    fi
+    mv /etc/ambari-agent/conf /etc/ambari-agent/conf.save
+
+    if [ -f "/var/lib/ambari-agent/install-helper.sh" ]; then
+      /var/lib/ambari-agent/install-helper.sh remove
+    fi
+
+    update-rc.d -f ambari-agent remove
+fi
+
+exit 0

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/deb/ambari/ambari-client.install
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/deb/ambari/ambari-client.install b/bigtop-packages/src/deb/ambari/ambari-client.install
new file mode 100644
index 0000000..0b76f70
--- /dev/null
+++ b/bigtop-packages/src/deb/ambari/ambari-client.install
@@ -0,0 +1,2 @@
+/usr/lib/ambari-client
+/usr/bin/ambari-shell

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/deb/ambari/ambari-server.install
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/deb/ambari/ambari-server.install b/bigtop-packages/src/deb/ambari/ambari-server.install
new file mode 100644
index 0000000..da52c38
--- /dev/null
+++ b/bigtop-packages/src/deb/ambari/ambari-server.install
@@ -0,0 +1,10 @@
+/var/lib/ambari-server/
+/etc/init.d/ambari-server
+/etc/ambari-server
+/usr/sbin/ambari-server.py
+/usr/sbin/ambari_server_main.py
+/usr/lib/python2.6/site-packages/ambari_server
+/usr/lib/ambari-server
+/var/run/ambari-server
+/var/lib/ambari-server
+/var/log/ambari-server

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/deb/ambari/ambari-server.postinst
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/deb/ambari/ambari-server.postinst b/bigtop-packages/src/deb/ambari/ambari-server.postinst
new file mode 100644
index 0000000..9546009
--- /dev/null
+++ b/bigtop-packages/src/deb/ambari/ambari-server.postinst
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+# Warning: don't add changes to this script directly, please add changes to install-helper.sh.
+
+INSTALL_HELPER="${RPM_INSTALL_PREFIX}/var/lib/ambari-server/install-helper.sh"
+
+if [ "$1" == "configure" ] ; then
+  if [ -f "$INSTALL_HELPER" ]; then
+      "$INSTALL_HELPER" install
+  fi
+fi
+
+exit 0

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/deb/ambari/ambari-server.postrm
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/deb/ambari/ambari-server.postrm b/bigtop-packages/src/deb/ambari/ambari-server.postrm
new file mode 100644
index 0000000..21a01fa
--- /dev/null
+++ b/bigtop-packages/src/deb/ambari/ambari-server.postrm
@@ -0,0 +1,15 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/deb/ambari/ambari-server.posttrm
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/deb/ambari/ambari-server.posttrm b/bigtop-packages/src/deb/ambari/ambari-server.posttrm
new file mode 100644
index 0000000..21a01fa
--- /dev/null
+++ b/bigtop-packages/src/deb/ambari/ambari-server.posttrm
@@ -0,0 +1,15 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/deb/ambari/ambari-server.preinst
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/deb/ambari/ambari-server.preinst b/bigtop-packages/src/deb/ambari/ambari-server.preinst
new file mode 100644
index 0000000..34af1c8
--- /dev/null
+++ b/bigtop-packages/src/deb/ambari/ambari-server.preinst
@@ -0,0 +1,94 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+ROOT="${RPM_INSTALL_PREFIX}"
+
+STACKS_FOLDER="${ROOT}/var/lib/ambari-server/resources/stacks"
+STACKS_FOLDER_OLD=${ROOT}/var/lib/ambari-server/resources/stacks_$(date '+%d_%m_%y_%H_%M').old
+
+COMMON_SERVICES_FOLDER="${ROOT}/var/lib/ambari-server/resources/common-services"
+COMMON_SERVICES_FOLDER_OLD=${ROOT}/var/lib/ambari-server/resources/common-services_$(date '+%d_%m_%y_%H_%M').old
+
+AMBARI_PROPERTIES="${ROOT}/etc/ambari-server/conf/ambari.properties"
+AMBARI_PROPERTIES_OLD="$AMBARI_PROPERTIES.rpmsave"
+
+AMBARI_ENV="${ROOT}/var/lib/ambari-server/ambari-env.sh"
+AMBARI_ENV_OLD="$AMBARI_ENV.rpmsave"
+
+AMBARI_KRB_JAAS_LOGIN_FILE="${ROOT}/etc/ambari-server/conf/krb5JAASLogin.conf"
+AMBARI_KRB_JAAS_LOGIN_FILE_OLD="$AMBARI_KRB_JAAS_LOGIN_FILE.rpmsave"
+
+AMBARI_VIEWS_FOLDER="${ROOT}/var/lib/ambari-server/resources/views"
+AMBARI_VIEWS_BACKUP_FOLDER="$AMBARI_VIEWS_FOLDER/backups"
+
+AMBARI_SERVER_JAR_FILES="/usr/lib/ambari-server/ambari-server-*.jar"
+AMBARI_SERVER_JAR_FILES_BACKUP_FOLDER="/usr/lib/ambari-server-backups"
+SERVER_CONF_SAVE="${ROOT}/etc/ambari-server/conf.save"
+SERVER_CONF_SAVE_BACKUP="${ROOT}/etc/ambari-server/conf_$(date '+%d_%m_%y_%H_%M').save"
+
+if [ -d "$SERVER_CONF_SAVE" ]
+then
+    mv "$SERVER_CONF_SAVE" "$SERVER_CONF_SAVE_BACKUP"
+fi
+
+if [ -f "$AMBARI_PROPERTIES" ]
+then
+    mv -f "$AMBARI_PROPERTIES" "$AMBARI_PROPERTIES_OLD"
+fi
+
+if [ -f "$AMBARI_ENV" ]
+then
+    mv -f "$AMBARI_ENV" "$AMBARI_ENV_OLD"
+fi
+
+if [ -f "$AMBARI_KRB_JAAS_LOGIN_FILE" ]
+then
+    mv -f "$AMBARI_KRB_JAAS_LOGIN_FILE" "$AMBARI_KRB_JAAS_LOGIN_FILE_OLD"
+fi
+
+if [ -d "$STACKS_FOLDER" ]
+then
+    mv -f "$STACKS_FOLDER" "$STACKS_FOLDER_OLD"
+fi
+
+if [ -d "$COMMON_SERVICES_FOLDER" ]
+then
+    mv -f "$COMMON_SERVICES_FOLDER" "$COMMON_SERVICES_FOLDER_OLD"
+fi
+
+if [ ! -d "$AMBARI_VIEWS_BACKUP_FOLDER" ] && [ -d "$AMBARI_VIEWS_FOLDER" ]
+then
+    mkdir "$AMBARI_VIEWS_BACKUP_FOLDER"
+fi
+
+if [ -d "$AMBARI_VIEWS_FOLDER" ] && [ -d "$AMBARI_VIEWS_BACKUP_FOLDER" ]
+then
+    cp -u $AMBARI_VIEWS_FOLDER/*.jar $AMBARI_VIEWS_BACKUP_FOLDER/
+fi
+
+for f in $AMBARI_SERVER_JAR_FILES;
+do
+    if [ -f "$f" ]
+    then
+        if [ ! -d "$AMBARI_SERVER_JAR_FILES_BACKUP_FOLDER" ]
+        then
+            mkdir -p "$AMBARI_SERVER_JAR_FILES_BACKUP_FOLDER"
+        fi
+        mv -f $f $AMBARI_SERVER_JAR_FILES_BACKUP_FOLDER/
+    fi
+done
+
+exit 0

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/deb/ambari/ambari-server.prerm
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/deb/ambari/ambari-server.prerm b/bigtop-packages/src/deb/ambari/ambari-server.prerm
new file mode 100644
index 0000000..b85bd3b
--- /dev/null
+++ b/bigtop-packages/src/deb/ambari/ambari-server.prerm
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+# Warning: don't add changes to this script directly, please add changes to install-helper.sh.
+
+INSTALL_HELPER="${RPM_INSTALL_PREFIX}/var/lib/ambari-server/install-helper.sh"
+
+if [ "$1" == "remove" ] ; then # Action is uninstall
+    if [ -f "$INSTALL_HELPER" ]; then
+      $INSTALL_HELPER remove
+    fi
+fi
+
+exit 0

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/deb/ambari/changelog
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/deb/ambari/changelog b/bigtop-packages/src/deb/ambari/changelog
new file mode 100644
index 0000000..547ed02
--- /dev/null
+++ b/bigtop-packages/src/deb/ambari/changelog
@@ -0,0 +1 @@
+--- This is auto-generated 

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/deb/ambari/compat
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/deb/ambari/compat b/bigtop-packages/src/deb/ambari/compat
new file mode 100644
index 0000000..ec63514
--- /dev/null
+++ b/bigtop-packages/src/deb/ambari/compat
@@ -0,0 +1 @@
+9

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/deb/ambari/control
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/deb/ambari/control b/bigtop-packages/src/deb/ambari/control
new file mode 100644
index 0000000..2ca1fe5
--- /dev/null
+++ b/bigtop-packages/src/deb/ambari/control
@@ -0,0 +1,37 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+Source: ambari
+Section: misc
+Priority: extra
+Maintainer: Bigtop <de...@bigtop.apache.org>
+Build-Depends: debhelper (>= 6)
+Standards-Version: 3.8.0
+Homepage: http://ambari.apache.org/
+
+Package: ambari-server
+Architecture: all
+Depends: openssl, postgresql (>= 8.1), python (>= 2.6), curl
+Description: Ambari Server 
+
+Package: ambari-agent
+Architecture: all
+Depends: openssl, python (>= 2.6), curl
+Description: Ambari Agent 
+
+Package: ambari-client
+Architecture: all
+Depends: bigtop-utils (>= 0.7)
+Description: Ambari Client

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/deb/ambari/copyright
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/deb/ambari/copyright b/bigtop-packages/src/deb/ambari/copyright
new file mode 100644
index 0000000..f0199ed
--- /dev/null
+++ b/bigtop-packages/src/deb/ambari/copyright
@@ -0,0 +1,15 @@
+Format: http://dep.debian.net/deps/dep5
+Source: http://ambari.apache.org/
+Upstream-Name: Ambari Project
+
+Files: *
+Copyright: 2010-2011, The Ambari Project
+License: Apache-2.0
+
+Files debian/*
+Copyright: 2011, The Apache Software Foundation
+License: Apache-2.0
+
+License: Apache-2.0
+ On Debian systems, the complete text of the Apache 2.0 license
+ can be found in "/usr/share/common-licenses/Apache-2.0".

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/deb/ambari/rules
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/deb/ambari/rules b/bigtop-packages/src/deb/ambari/rules
new file mode 100644
index 0000000..3943347
--- /dev/null
+++ b/bigtop-packages/src/deb/ambari/rules
@@ -0,0 +1,42 @@
+#!/usr/bin/make -f
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# -*- makefile -*-
+
+# Uncomment this to turn on verbose mode.
+export DH_VERBOSE=1
+
+# This has to be exported to make some magic below work.
+export DH_OPTIONS
+
+%:
+	dh $@
+
+override_dh_auto_build:
+	bash debian/do-component-build
+
+svcs=ambari-agent
+
+$(svcs): debian/init.d.tmpl
+	# bash $< debian/$@.svc deb debian/$@.init
+	touch $@
+
+override_dh_auto_install: $(svcs)
+	bash -x debian/install_ambari.sh \
+	--build-dir=`pwd`   \
+	--distro-dir=debian \
+	--source-dir=`pwd`  \
+	--prefix=debian/tmp

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/deb/ambari/source/format
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/deb/ambari/source/format b/bigtop-packages/src/deb/ambari/source/format
new file mode 100644
index 0000000..163aaf8
--- /dev/null
+++ b/bigtop-packages/src/deb/ambari/source/format
@@ -0,0 +1 @@
+3.0 (quilt)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/deb/ambari/source/include-binaries
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/deb/ambari/source/include-binaries b/bigtop-packages/src/deb/ambari/source/include-binaries
new file mode 100644
index 0000000..d4ec7e3
--- /dev/null
+++ b/bigtop-packages/src/deb/ambari/source/include-binaries
@@ -0,0 +1 @@
+debian/ODPi/1.0/hooks/before-START/files/fast-hdfs-resource.jar

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/rpm/ambari/RPMS/.gitignore
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/rpm/ambari/RPMS/.gitignore b/bigtop-packages/src/rpm/ambari/RPMS/.gitignore
new file mode 100644
index 0000000..1ab8c79
--- /dev/null
+++ b/bigtop-packages/src/rpm/ambari/RPMS/.gitignore
@@ -0,0 +1 @@
+repodata

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/rpm/ambari/SPECS/.gitignore
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/rpm/ambari/SPECS/.gitignore b/bigtop-packages/src/rpm/ambari/SPECS/.gitignore
new file mode 100644
index 0000000..caa5471
--- /dev/null
+++ b/bigtop-packages/src/rpm/ambari/SPECS/.gitignore
@@ -0,0 +1,3 @@
+hadoop.spec
+pig.spec
+hive.spec

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/rpm/ambari/SPECS/ambari.spec
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/rpm/ambari/SPECS/ambari.spec b/bigtop-packages/src/rpm/ambari/SPECS/ambari.spec
new file mode 100644
index 0000000..6d6be69
--- /dev/null
+++ b/bigtop-packages/src/rpm/ambari/SPECS/ambari.spec
@@ -0,0 +1,504 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+%define ambari_name ambari 
+%define _binaries_in_noarch_packages_terminate_build   0
+%define _unpackaged_files_terminate_build 0
+
+%if  %{?suse_version:1}0
+%define doc_ambari %{_docdir}/ambari-doc
+%global initd_dir %{_sysconfdir}/rc.d
+%else
+%define doc_ambari %{_docdir}/ambari-doc-%{ambari_version}
+%global initd_dir %{_sysconfdir}/rc.d/init.d
+%endif
+
+# disable repacking jars
+%define __os_install_post %{nil}
+
+Name: ambari
+Version: %{ambari_version}
+Release: %{ambari_release}
+Summary: Ambari
+URL: http://ambari.apache.org
+Group: Development
+BuildArch: noarch
+Buildroot: %(mktemp -ud %{_tmppath}/apache-%{ambari_name}-%{version}-%{release}-XXXXXX)
+License: ASL 2.0 
+Source0: apache-%{ambari_name}-%{ambari_version}-src.tar.gz
+Source1: do-component-build 
+Source2: install_%{ambari_name}.sh
+Source3: bigtop.bom
+# FIXME
+AutoProv: no
+AutoReqProv: no
+
+%description
+Ambari
+
+%prep
+%setup -n apache-%{ambari_name}-%{ambari_base_version}-src
+
+%build
+bash $RPM_SOURCE_DIR/do-component-build
+
+%install
+%__rm -rf $RPM_BUILD_ROOT
+AMBARI_VERSION=%{ambari_version} bash $RPM_SOURCE_DIR/install_ambari.sh \
+          --build-dir=`pwd` \
+          --distro-dir=$RPM_SOURCE_DIR \
+          --source-dir=`pwd` \
+          --prefix=$RPM_BUILD_ROOT
+%__install -d -m 0755 $RPM_BUILD_ROOT/%{initd_dir}
+%__mv ${RPM_BUILD_ROOT}/etc/init.d/ambari-server ${RPM_BUILD_ROOT}/%{initd_dir} || :
+ 
+%package server
+Summary: Ambari Server
+Group: Development/Libraries
+Requires: openssl, postgresql-server >= 8.1, python >= 2.6, curl
+AutoProv: no
+AutoReqProv: no
+%description server
+Ambari Server
+
+%pre server
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+STACKS_FOLDER="/var/lib/ambari-server/resources/stacks"
+STACKS_FOLDER_OLD=/var/lib/ambari-server/resources/stacks_$(date '+%d_%m_%y_%H_%M').old
+
+COMMON_SERVICES_FOLDER="/var/lib/ambari-server/resources/common-services"
+COMMON_SERVICES_FOLDER_OLD=/var/lib/ambari-server/resources/common-services_$(date '+%d_%m_%y_%H_%M').old
+
+AMBARI_VIEWS_FOLDER="/var/lib/ambari-server/resources/views"
+AMBARI_VIEWS_BACKUP_FOLDER="$AMBARI_VIEWS_FOLDER/backups"
+
+if [ -d "/etc/ambari-server/conf.save" ]
+then
+    mv /etc/ambari-server/conf.save /etc/ambari-server/conf_$(date '+%d_%m_%y_%H_%M').save
+fi
+
+if [ -d "$STACKS_FOLDER" ]
+then
+    mv -f "$STACKS_FOLDER" "$STACKS_FOLDER_OLD"
+fi
+
+if [ -d "$COMMON_SERVICES_FOLDER_OLD" ]
+then
+    mv -f "$COMMON_SERVICES_FOLDER" "$COMMON_SERVICES_FOLDER_OLD"
+fi
+
+if [ ! -d "$AMBARI_VIEWS_BACKUP_FOLDER" ] && [ -d "$AMBARI_VIEWS_FOLDER" ]
+then
+    mkdir "$AMBARI_VIEWS_BACKUP_FOLDER"
+fi
+
+if [ -d "$AMBARI_VIEWS_FOLDER" ] && [ -d "$AMBARI_VIEWS_BACKUP_FOLDER" ]
+then
+    cp -u $AMBARI_VIEWS_FOLDER/*.jar $AMBARI_VIEWS_BACKUP_FOLDER/
+fi
+
+exit 0
+
+%post server
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+if [ -e "/usr/sbin/ambari-server" ]; then # Check is needed for upgrade
+    # Remove link created by previous package version
+    rm -f /usr/sbin/ambari-server
+fi
+
+ln -s /etc/init.d/ambari-server /usr/sbin/ambari-server
+
+case "$1" in
+  1) # Action install
+    if [ -f "/var/lib/ambari-server/install-helper.sh" ]; then
+        /var/lib/ambari-server/install-helper.sh install
+    fi
+    chkconfig --add ambari-server
+  ;;
+  2) # Action upgrade
+    if [ -f "/var/lib/ambari-server/install-helper.sh" ]; then
+        /var/lib/ambari-server/install-helper.sh upgrade
+    fi
+  ;;
+esac
+
+exit 0
+
+%preun server
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+# WARNING: This script is performed not only on uninstall, but also
+# during package update. See http://www.ibm.com/developerworks/library/l-rpm2/
+# for details
+
+if [ "$1" -eq 0 ]; then  # Action is uninstall
+    /usr/sbin/ambari-server stop > /dev/null 2>&1
+    if [ -d "/etc/ambari-server/conf.save" ]; then
+        mv /etc/ambari-server/conf.save /etc/ambari-server/conf_$(date '+%d_%m_%y_%H_%M').save
+    fi
+
+    if [ -e "/usr/sbin/ambari-server" ]; then
+        # Remove link created during install
+        rm /usr/sbin/ambari-server
+    fi
+
+    mv /etc/ambari-server/conf /etc/ambari-server/conf.save
+
+    if [ -f "/var/lib/ambari-server/install-helper.sh" ]; then
+      /var/lib/ambari-server/install-helper.sh remove
+    fi
+
+    chkconfig --list | grep ambari-server && chkconfig --del ambari-server
+fi
+
+exit 0
+
+%posttrans server
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+
+RESOURCE_MANAGEMENT_DIR="/usr/lib/python2.6/site-packages/resource_management"
+RESOURCE_MANAGEMENT_DIR_SERVER="/usr/lib/ambari-server/lib/resource_management"
+JINJA_DIR="/usr/lib/python2.6/site-packages/ambari_jinja2"
+JINJA_SERVER_DIR="/usr/lib/ambari-server/lib/ambari_jinja2"
+AMBARI_SERVER_EXECUTABLE_LINK="/usr/sbin/ambari-server"
+AMBARI_SERVER_EXECUTABLE="/etc/init.d/ambari-server"
+
+
+# needed for upgrade though ambari-2.2.2
+rm -f "$AMBARI_SERVER_EXECUTABLE_LINK"
+ln -s "$AMBARI_SERVER_EXECUTABLE" "$AMBARI_SERVER_EXECUTABLE_LINK"
+
+# remove RESOURCE_MANAGEMENT_DIR if it's a directory
+if [ -d "$RESOURCE_MANAGEMENT_DIR" ]; then  # resource_management dir exists
+  if [ ! -L "$RESOURCE_MANAGEMENT_DIR" ]; then # resource_management dir is not link
+    rm -rf "$RESOURCE_MANAGEMENT_DIR"
+  fi
+fi
+# setting resource_management shared resource
+if [ ! -d "$RESOURCE_MANAGEMENT_DIR" ]; then
+  ln -s "$RESOURCE_MANAGEMENT_DIR_SERVER" "$RESOURCE_MANAGEMENT_DIR"
+fi
+
+# setting jinja2 shared resource
+if [ ! -d "$JINJA_DIR" ]; then
+  ln -s "$JINJA_SERVER_DIR" "$JINJA_DIR"
+fi
+
+exit 0
+
+%package agent
+Summary: Ambari Agent
+Group: Development/Libraries
+Requires: openssl, zlib, python >= 2.6, rpm-python
+AutoProv: no
+AutoReqProv: no
+%description agent
+Ambari Agent
+
+%pre agent
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+STACKS_FOLDER="/var/lib/ambari-agent/cache/stacks"
+STACKS_FOLDER_OLD=/var/lib/ambari-agent/cache/stacks_$(date '+%d_%m_%y_%H_%M').old
+
+COMMON_SERVICES_FOLDER="/var/lib/ambari-agent/cache/common-services"
+COMMON_SERVICES_FOLDER_OLD=/var/lib/ambari-agent/cache/common-services_$(date '+%d_%m_%y_%H_%M').old
+
+if [ -d "/etc/ambari-agent/conf.save" ]
+then
+    mv /etc/ambari-agent/conf.save /etc/ambari-agent/conf_$(date '+%d_%m_%y_%H_%M').save
+fi
+
+BAK=/etc/ambari-agent/conf/ambari-agent.ini.old
+ORIG=/etc/ambari-agent/conf/ambari-agent.ini
+
+BAK_SUDOERS=/etc/sudoers.d/ambari-agent.bak
+ORIG_SUDOERS=/etc/sudoers.d/ambari-agent
+
+[ -f $ORIG ] && mv -f $ORIG $BAK
+[ -f $ORIG_SUDOERS ] && echo "Moving $ORIG_SUDOERS to $BAK_SUDOERS. Please restore the file if you were using it for ambari-agent non-root functionality" && mv -f $ORIG_SUDOERS $BAK_SUDOERS
+
+if [ -d "$STACKS_FOLDER" ]
+then
+    mv -f "$STACKS_FOLDER" "$STACKS_FOLDER_OLD"
+fi
+
+if [ -d "$COMMON_SERVICES_FOLDER_OLD" ]
+then
+    mv -f "$COMMON_SERVICES_FOLDER" "$COMMON_SERVICES_FOLDER_OLD"
+fi
+
+exit 0
+
+%post agent
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+
+case "$1" in
+  1) # Action install
+    if [ -f "/var/lib/ambari-agent/install-helper.sh" ]; then
+        /var/lib/ambari-agent/install-helper.sh install
+    fi
+  chkconfig --add ambari-agent
+  ;;
+  2) # Action upgrade
+    if [ -d "/etc/ambari-agent/conf.save" ]; then
+        cp -f /etc/ambari-agent/conf.save/* /etc/ambari-agent/conf
+        mv /etc/ambari-agent/conf.save /etc/ambari-agent/conf_$(date '+%d_%m_%y_%H_%M').save
+    fi
+
+    if [ -f "/var/lib/ambari-agent/install-helper.sh" ]; then
+        /var/lib/ambari-agent/install-helper.sh upgrade
+    fi
+  ;;
+esac
+
+
+BAK=/etc/ambari-agent/conf/ambari-agent.ini.old
+ORIG=/etc/ambari-agent/conf/ambari-agent.ini
+
+if [ -f $BAK ]; then
+  if [ -f "/var/lib/ambari-agent/upgrade_agent_configs.py" ]; then
+    /var/lib/ambari-agent/upgrade_agent_configs.py
+  fi
+  mv $BAK ${BAK}_$(date '+%d_%m_%y_%H_%M').save
+fi
+
+
+exit 0
+
+%preun agent
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+# WARNING: This script is performed not only on uninstall, but also
+# during package update. See http://www.ibm.com/developerworks/library/l-rpm2/
+# for details
+
+
+if [ "$1" -eq 0 ]; then  # Action is uninstall
+    /usr/sbin/ambari-agent stop > /dev/null 2>&1
+    if [ -d "/etc/ambari-agent/conf.save" ]; then
+        mv /etc/ambari-agent/conf.save /etc/ambari-agent/conf_$(date '+%d_%m_%y_%H_%M').save
+    fi
+    mv /etc/ambari-agent/conf /etc/ambari-agent/conf.save
+
+    if [ -f "/var/lib/ambari-agent/install-helper.sh" ]; then
+      /var/lib/ambari-agent/install-helper.sh remove
+    fi
+
+    chkconfig --list | grep ambari-server && chkconfig --del ambari-server
+fi
+
+exit 0
+
+%posttrans agent
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+
+RESOURCE_MANAGEMENT_DIR="/usr/lib/python2.6/site-packages/resource_management"
+RESOURCE_MANAGEMENT_DIR_AGENT="/usr/lib/ambari-agent/lib/resource_management"
+JINJA_DIR="/usr/lib/python2.6/site-packages/ambari_jinja2"
+JINJA_AGENT_DIR="/usr/lib/ambari-agent/lib/ambari_jinja2"
+
+# remove RESOURCE_MANAGEMENT_DIR if it's a directory
+if [ -d "$RESOURCE_MANAGEMENT_DIR" ]; then  # resource_management dir exists
+  if [ ! -L "$RESOURCE_MANAGEMENT_DIR" ]; then # resource_management dir is not link
+    rm -rf "$RESOURCE_MANAGEMENT_DIR"
+  fi
+fi
+# setting resource_management shared resource
+if [ ! -d "$RESOURCE_MANAGEMENT_DIR" ]; then
+  ln -s "$RESOURCE_MANAGEMENT_DIR_AGENT" "$RESOURCE_MANAGEMENT_DIR"
+fi
+
+# setting jinja2 shared resource
+if [ ! -d "$JINJA_DIR" ]; then
+  ln -s "$JINJA_AGENT_DIR" "$JINJA_DIR"
+fi
+
+exit 0
+
+%package client
+Summary: Ambari Client
+Group: Development/Libraries
+Requires: bigtop-utils >= 0.7
+%description client
+Ambari Client
+
+%files server
+%attr(644,root,root) /etc/init/ambari-server.conf
+%defattr(644,root,root,755)
+/usr/lib/ambari-server
+%attr(755,root,root) /usr/sbin/ambari-server.py
+%attr(755,root,root) /usr/sbin/ambari_server_main.py
+%attr(755,root,root) %{initd_dir}/ambari-server
+/var/lib/ambari-server
+%attr(755,root,root) /var/lib/ambari-server/ambari-python-wrap
+%config  /etc/ambari-server/conf
+%config %attr(700,root,root) /var/lib/ambari-server//ambari-env.sh
+%attr(700,root,root) /var/lib/ambari-server//ambari-sudo.sh
+%attr(700,root,root) /var/lib/ambari-server//install-helper.sh
+%attr(700,root,root) /var/lib/ambari-server/keys/db
+%attr(755,root,root) /var/lib/ambari-server/resources/stacks/stack_advisor.py
+%dir %attr(755,root,root) /var/lib/ambari-server/data/tmp
+%dir %attr(700,root,root) /var/lib/ambari-server/data/cache
+%attr(755,root,root) /var/lib/ambari-server/resources/apps
+%attr(755,root,root) /var/lib/ambari-server/resources/scripts
+%attr(755,root,root) /var/lib/ambari-server/resources/views
+%attr(755,root,root) /var/lib/ambari-server/resources/custom_actions
+%attr(755,root,root) /var/lib/ambari-server/resources/host_scripts
+%dir  /var/lib/ambari-server/resources/upgrade
+%dir  /var/run/ambari-server
+%dir  /var/run/ambari-server/bootstrap
+%dir  /var/run/ambari-server/stack-recommendations
+%dir  /var/log/ambari-server
+%attr(755,root,root) /usr/lib/python2.6/site-packages/ambari_server
+
+%files agent
+%attr(644,root,root) /etc/init/ambari-agent.conf
+%attr(-,root,root) /usr/lib/python2.6/site-packages/ambari_agent
+%attr(755,root,root) /var/lib/ambari-agent/ambari-python-wrap
+%attr(755,root,root) /var/lib/ambari-agent/ambari-sudo.sh
+%attr(-,root,root) /usr/lib/ambari-agent/lib/ambari_commons
+%attr(-,root,root) /usr/lib/ambari-agent/lib/resource_management
+%attr(755,root,root) /usr/lib/ambari-agent/lib/ambari_jinja2
+%attr(755,root,root) /usr/lib/ambari-agent/lib/ambari_simplejson
+%attr(755,root,root) /usr/lib/ambari-agent/lib/examples
+%attr(755,root,root) /etc/ambari-agent/conf/ambari-agent.ini
+%attr(755,root,root) /etc/ambari-agent/conf/logging.conf.sample
+%attr(755,root,root) /usr/sbin/ambari-agent
+%config %attr(700,root,root) /var/lib/ambari-agent/ambari-env.sh
+%attr(700,root,root) /var/lib/ambari-agent/install-helper.sh
+%attr(700,root,root) /var/lib/ambari-agent/upgrade_agent_configs.py
+%dir %attr(755,root,root) /var/run/ambari-agent
+%dir %attr(755,root,root) /var/lib/ambari-agent/data
+%dir %attr(777,root,root) /var/lib/ambari-agent/tmp
+%dir %attr(755,root,root) /var/lib/ambari-agent/keys
+%dir %attr(755,root,root) /var/log/ambari-agent
+%attr(755,root,root) /etc/init.d/ambari-agent
+%attr(755,root,root) /var/lib/ambari-agent/data
+%attr(755,root,root) /var/lib/ambari-agent/cache
+
+%files client
+%defattr(644,root,root,755)
+/usr/lib/ambari-client
+%attr(755,root,root) /usr/bin/ambari-shell


[30/52] bigtop git commit: ODPI-197. Enable Kerberos in ODPi Ambari

Posted by rv...@apache.org.
ODPI-197. Enable Kerberos in ODPi Ambari

(cherry picked from commit 1f067b757dffaad34f804a1721e51dd6daeb74df)


Project: http://git-wip-us.apache.org/repos/asf/bigtop/repo
Commit: http://git-wip-us.apache.org/repos/asf/bigtop/commit/4ded4432
Tree: http://git-wip-us.apache.org/repos/asf/bigtop/tree/4ded4432
Diff: http://git-wip-us.apache.org/repos/asf/bigtop/diff/4ded4432

Branch: refs/heads/master
Commit: 4ded44321ec0f54ec618e0c209af4be61264c2d9
Parents: d7c2a9a
Author: Roman Shaposhnik <rv...@apache.org>
Authored: Fri Oct 28 10:55:30 2016 -0700
Committer: Roman Shaposhnik <rv...@apache.org>
Committed: Tue Mar 21 23:08:10 2017 -0700

----------------------------------------------------------------------
 .../src/common/ambari/ODPi/1.0/kerberos.json    | 60 +++++++++++++
 .../ambari/ODPi/1.0/services/HIVE/kerberos.json | 17 ----
 .../ODPi/1.0/services/KERBEROS/metainfo.xml     | 26 ++++++
 .../src/common/ambari/ODPi/1.0/widgets.json     | 95 ++++++++++++++++++++
 4 files changed, 181 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/bigtop/blob/4ded4432/bigtop-packages/src/common/ambari/ODPi/1.0/kerberos.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/kerberos.json b/bigtop-packages/src/common/ambari/ODPi/1.0/kerberos.json
new file mode 100644
index 0000000..3aad080
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/kerberos.json
@@ -0,0 +1,60 @@
+{
+  "properties": {
+    "realm": "${kerberos-env/realm}",
+    "keytab_dir": "/etc/security/keytabs",
+    "additional_realms": ""
+  },
+  "identities": [
+    {
+      "name": "spnego",
+      "principal": {
+        "value": "HTTP/_HOST@${realm}",
+        "type" : "service"
+      },
+      "keytab": {
+        "file": "${keytab_dir}/spnego.service.keytab",
+        "owner": {
+          "name": "root",
+          "access": "r"
+        },
+        "group": {
+          "name": "${cluster-env/user_group}",
+          "access": "r"
+        }
+      }
+    },
+    {
+      "name": "smokeuser",
+      "principal": {
+        "value": "${cluster-env/smokeuser}-${cluster_name|toLower()}@${realm}",
+        "type" : "user",
+        "configuration": "cluster-env/smokeuser_principal_name",
+        "local_username" : "${cluster-env/smokeuser}"
+      },
+      "keytab": {
+        "file": "${keytab_dir}/smokeuser.headless.keytab",
+        "owner": {
+          "name": "${cluster-env/smokeuser}",
+          "access": "r"
+        },
+        "group": {
+          "name": "${cluster-env/user_group}",
+          "access": "r"
+        },
+        "configuration": "cluster-env/smokeuser_keytab"
+      }
+    },
+    {
+      "name": "ambari-server",
+      "principal": {
+        "value": "ambari-server-${cluster_name|toLower()}@${realm}",
+        "type" : "user",
+        "configuration": "cluster-env/ambari_principal_name"
+      },
+      "keytab": {
+        "file": "${keytab_dir}/ambari.server.keytab"
+      }
+    }
+  ]
+
+}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4ded4432/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/kerberos.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/kerberos.json b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/kerberos.json
index 34bda73..4b31f7a 100755
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/kerberos.json
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/kerberos.json
@@ -113,23 +113,6 @@
           ]
         },
         {
-          "name": "HIVE_SERVER_INTERACTIVE",
-          "identities": [
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            },
-            {
-              "name": "/HIVE/HIVE_SERVER/hive_server_hive"
-            },
-            {
-              "name": "/HIVE/HIVE_SERVER/spnego"
-            },
-            {
-              "name": "/YARN/NODEMANAGER/llap_zk_hive"
-            }
-          ]
-        },
-        {
           "name": "WEBHCAT_SERVER",
           "identities": [
             {

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4ded4432/bigtop-packages/src/common/ambari/ODPi/1.0/services/KERBEROS/metainfo.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/KERBEROS/metainfo.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/KERBEROS/metainfo.xml
new file mode 100755
index 0000000..25cfcc6
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/KERBEROS/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>KERBEROS</name>
+      <extends>common-services/KERBEROS/1.10.3-10</extends>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/bigtop/blob/4ded4432/bigtop-packages/src/common/ambari/ODPi/1.0/widgets.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/widgets.json b/bigtop-packages/src/common/ambari/ODPi/1.0/widgets.json
new file mode 100644
index 0000000..3176354
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/widgets.json
@@ -0,0 +1,95 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_system_heatmap",
+      "display_name": "Heatmaps",
+      "section_name": "SYSTEM_HEATMAPS",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "Host Disk Space Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "disk_free",
+              "metric_path": "metrics/disk/disk_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "disk_total",
+              "metric_path": "metrics/disk/disk_total",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Disk Space Used %",
+              "value": "${((disk_total-disk_free)/disk_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host Memory Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "mem_total",
+              "metric_path": "metrics/memory/mem_total",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_free",
+              "metric_path": "metrics/memory/mem_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_cached",
+              "metric_path": "metrics/memory/mem_cached",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${((mem_total-mem_free-mem_cached)/mem_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host CPU Wait IO %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "cpu_wio",
+              "metric_path": "metrics/cpu/cpu_wio",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${cpu_wio*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        }
+      ]
+    }
+  ]
+}


[35/52] bigtop git commit: BIGTOP-1406. package Ambari in Bigtop

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/mapred_service_check.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/mapred_service_check.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/mapred_service_check.py
deleted file mode 100755
index 5fc498d..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/mapred_service_check.py
+++ /dev/null
@@ -1,168 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-from resource_management.core.logger import Logger
-
-
-class MapReduce2ServiceCheck(Script):
-  def service_check(self, env):
-    pass
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class MapReduce2ServiceCheckWindows(MapReduce2ServiceCheck):
-  def service_check(self, env):
-    import params
-
-    env.set_params(params)
-
-    component_type = 'hs'
-    if params.hadoop_ssl_enabled:
-      component_address = params.hs_webui_address
-    else:
-      component_address = params.hs_webui_address
-
-    validateStatusFileName = "validateYarnComponentStatusWindows.py"
-    validateStatusFilePath = os.path.join(os.path.dirname(params.hadoop_home), "temp", validateStatusFileName)
-    python_executable = sys.executable
-    validateStatusCmd = "{0} {1} {2} -p {3} -s {4}".format(
-      python_executable, validateStatusFilePath, component_type, component_address, params.hadoop_ssl_enabled)
-
-    if params.security_enabled:
-      kinit_cmd = "{0} -kt {1} {2};".format(params.kinit_path_local, params.smoke_user_keytab, params.smokeuser)
-      smoke_cmd = kinit_cmd + validateStatusCmd
-    else:
-      smoke_cmd = validateStatusCmd
-
-    File(validateStatusFilePath,
-         content=StaticFile(validateStatusFileName)
-    )
-
-    Execute(smoke_cmd,
-            tries=3,
-            try_sleep=5,
-            logoutput=True
-    )
-
-    # hadoop_exe = os.path.join(params.hadoop_home, "bin", "hadoop")
-    #
-    # tested_file = os.path.join(params.hadoop_home, "bin", "hadoop.cmd")
-    # jar_path = os.path.join(params.hadoop_mapred2_jar_location, params.hadoopMapredExamplesJarName)
-    # input_file = format("/user/hadoop/mapredsmokeinput")
-    # output_file = format("/user/hadoop/mapredsmokeoutput")
-    # cleanup_cmd = format("cmd /C {hadoop_exe} fs -rm -r -f {output_file} {input_file}")
-    # create_file_cmd = format("cmd /C {hadoop_exe} fs -put {tested_file} {input_file}")
-    # run_wordcount_job = format("cmd /C {hadoop_exe} jar {jar_path} wordcount {input_file} {output_file}")
-    # test_cmd = format("cmd /C {hadoop_exe} fs -test -e {output_file}")
-    #
-    # if params.security_enabled:
-    #   kinit_cmd = "{0} -kt {1} {2};".format(kinit_path_local, smoke_user_keytab, smokeuser)
-    #   Execute(kinit_cmd)
-    #
-    # Execute(cleanup_cmd,
-    #         tries=1,
-    #         try_sleep=5,
-    #         logoutput=True,
-    #         user=params.hdfs_user
-    # )
-    #
-    # Execute(create_file_cmd,
-    #         tries=1,
-    #         try_sleep=5,
-    #         logoutput=True,
-    #         user=params.hdfs_user
-    # )
-    #
-    # Execute(run_wordcount_job,
-    #         tries=1,
-    #         try_sleep=5,
-    #         logoutput=True,
-    #         user=params.hdfs_user
-    # )
-    #
-    # Execute(test_cmd,
-    #         logoutput=True,
-    #         user=params.hdfs_user
-    # )
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class MapReduce2ServiceCheckDefault(MapReduce2ServiceCheck):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    jar_path = format("{hadoop_mapred2_jar_location}/{hadoopMapredExamplesJarName}")
-    input_file = format("/user/{smokeuser}/mapredsmokeinput")
-    output_file = format("/user/{smokeuser}/mapredsmokeoutput")
-
-    test_cmd = format("fs -test -e {output_file}")
-    run_wordcount_job = format("jar {jar_path} wordcount {input_file} {output_file}")
-
-    params.HdfsResource(format("/user/{smokeuser}"),
-                      type="directory",
-                      action="create_on_execute",
-                      owner=params.smokeuser,
-                      mode=params.smoke_hdfs_user_mode,
-    )
-    params.HdfsResource(output_file,
-                        action = "delete_on_execute",
-                        type = "directory",
-                        dfs_type = params.dfs_type,
-    )
-    params.HdfsResource(input_file,
-                        action = "create_on_execute",
-                        type = "file",
-                        source = "/etc/passwd",
-                        dfs_type = params.dfs_type,
-    )
-    params.HdfsResource(None, action="execute")
-
-    # initialize the ticket
-    if params.security_enabled:
-      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
-      Execute(kinit_cmd, user=params.smokeuser)
-
-    ExecuteHadoop(run_wordcount_job,
-                  tries=1,
-                  try_sleep=5,
-                  user=params.smokeuser,
-                  bin_dir=params.execute_path,
-                  conf_dir=params.hadoop_conf_dir,
-                  logoutput=True)
-
-    # the ticket may have expired, so re-initialize
-    if params.security_enabled:
-      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
-      Execute(kinit_cmd, user=params.smokeuser)
-
-    ExecuteHadoop(test_cmd,
-                  user=params.smokeuser,
-                  bin_dir=params.execute_path,
-                  conf_dir=params.hadoop_conf_dir)
-
-
-if __name__ == "__main__":
-  MapReduce2ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/mapreduce2_client.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/mapreduce2_client.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/mapreduce2_client.py
deleted file mode 100755
index 424157b..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/mapreduce2_client.py
+++ /dev/null
@@ -1,98 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-# Python imports
-import os
-import sys
-
-# Local imports
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import conf_select, stack_select
-from resource_management.libraries.functions.constants import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.core.exceptions import ClientComponentHasNoStatus
-from yarn import yarn
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-from resource_management.core.logger import Logger
-
-
-class MapReduce2Client(Script):
-  def install(self, env):
-    import params
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env, config_dir=None, upgrade_type=None):
-    """
-    :param env: Python environment
-    :param config_dir: During rolling upgrade, which config directory to save configs to.
-    """
-    import params
-    env.set_params(params)
-    yarn(config_dir=config_dir)
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-  def stack_upgrade_save_new_config(self, env):
-    """
-    Because this gets called during a Rolling Upgrade, the new mapreduce configs have already been saved, so we must be
-    careful to only call configure() on the directory of the new version.
-    :param env:
-    """
-    import params
-    env.set_params(params)
-
-    conf_select_name = "hadoop"
-    base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
-    config_dir = self.get_config_dir_during_stack_upgrade(env, base_dir, conf_select_name)
-
-    if config_dir:
-      Logger.info("stack_upgrade_save_new_config(): Calling conf-select on %s using version %s" % (conf_select_name, str(params.version)))
-
-      # Because this script was called from ru_execute_tasks.py which already enters an Environment with its own basedir,
-      # must change it now so this function can find the Jinja Templates for the service.
-      env.config.basedir = base_dir
-      conf_select.select(params.stack_name, conf_select_name, params.version)
-      self.configure(env, config_dir=config_dir)
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class MapReduce2ClientWindows(MapReduce2Client):
-  pass
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class MapReduce2ClientDefault(MapReduce2Client):
-  def get_component_name(self):
-    return "hadoop-client"
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-client", params.version)
-
-
-if __name__ == "__main__":
-  MapReduce2Client().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/nodemanager.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/nodemanager.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/nodemanager.py
deleted file mode 100755
index b235cad..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/nodemanager.py
+++ /dev/null
@@ -1,161 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import nodemanager_upgrade
-
-from resource_management import *
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.security_commons import build_expectations, \
-  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
-  FILE_TYPE_XML
-from yarn import yarn
-from service import service
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-
-
-class Nodemanager(Script):
-  def install(self, env):
-    self.install_packages(env)
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    service('nodemanager',action='stop')
-
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    service('nodemanager',action='start')
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    yarn(name="nodemanager")
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class NodemanagerWindows(Nodemanager):
-  def status(self, env):
-    service('nodemanager', action='status')
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class NodemanagerDefault(Nodemanager):
-  def get_component_name(self):
-    return "hadoop-yarn-nodemanager"
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    Logger.info("Executing NodeManager Stack Upgrade pre-restart")
-    import params
-    env.set_params(params)
-
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-yarn-nodemanager", params.version)
-
-  def post_upgrade_restart(self, env, upgrade_type=None):
-    Logger.info("Executing NodeManager Stack Upgrade post-restart")
-    import params
-    env.set_params(params)
-
-    nodemanager_upgrade.post_upgrade_check()
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.nodemanager_pid_file)
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
-                           "yarn.acl.enable": "true"}
-      props_empty_check = ["yarn.nodemanager.principal",
-                           "yarn.nodemanager.keytab",
-                           "yarn.nodemanager.webapp.spnego-principal",
-                           "yarn.nodemanager.webapp.spnego-keytab-file"]
-
-      props_read_check = ["yarn.nodemanager.keytab",
-                          "yarn.nodemanager.webapp.spnego-keytab-file"]
-      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
-                                           props_read_check)
-
-      yarn_expectations ={}
-      yarn_expectations.update(yarn_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'yarn-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, yarn_site_props)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'yarn-site' not in security_params
-               or 'yarn.nodemanager.keytab' not in security_params['yarn-site']
-               or 'yarn.nodemanager.principal' not in security_params['yarn-site']) \
-            or 'yarn.nodemanager.webapp.spnego-keytab-file' not in security_params['yarn-site'] \
-            or 'yarn.nodemanager.webapp.spnego-principal' not in security_params['yarn-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.nodemanager.keytab'],
-                                security_params['yarn-site']['yarn.nodemanager.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.nodemanager.webapp.spnego-keytab-file'],
-                                security_params['yarn-site']['yarn.nodemanager.webapp.spnego-principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
-  def get_log_folder(self):
-    import params
-    return params.yarn_log_dir
-  
-  def get_user(self):
-    import params
-    return params.yarn_user
-
-if __name__ == "__main__":
-  Nodemanager().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/nodemanager_upgrade.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/nodemanager_upgrade.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/nodemanager_upgrade.py
deleted file mode 100755
index 1c886f9..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/nodemanager_upgrade.py
+++ /dev/null
@@ -1,73 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import subprocess
-
-from resource_management.core.logger import Logger
-from resource_management.core.exceptions import Fail
-from resource_management.core.resources.system import Execute
-from resource_management.core import shell
-from resource_management.libraries.functions.decorator import retry
-from resource_management.libraries.functions.show_logs import show_logs
-
-
-def post_upgrade_check():
-  '''
-  Checks that the NodeManager has rejoined the cluster.
-  This function will obtain the Kerberos ticket if security is enabled.
-  :return:
-  '''
-  import params
-
-  Logger.info('NodeManager executing "yarn node -list -states=RUNNING" to verify the node has rejoined the cluster...')
-  if params.security_enabled and params.nodemanager_kinit_cmd:
-    Execute(params.nodemanager_kinit_cmd, user=params.yarn_user)
-
-  try:
-    _check_nodemanager_startup()
-  except Fail:
-    show_logs(params.yarn_log_dir, params.yarn_user)
-    raise
-    
-
-@retry(times=30, sleep_time=10, err_class=Fail)
-def _check_nodemanager_startup():
-  '''
-  Checks that a NodeManager is in a RUNNING state in the cluster via
-  "yarn node -list -states=RUNNING" command. Once the NodeManager is found to be
-  alive this method will return, otherwise it will raise a Fail(...) and retry
-  automatically.
-  :return:
-  '''
-  import params
-  import socket
-
-  command = 'yarn node -list -states=RUNNING'
-  return_code, yarn_output = shell.checked_call(command, user=params.yarn_user)
-  
-  hostname = params.hostname.lower()
-  hostname_ip = socket.gethostbyname(params.hostname.lower())
-  nodemanager_address = params.nm_address.lower()
-  yarn_output = yarn_output.lower()
-
-  if hostname in yarn_output or nodemanager_address in yarn_output or hostname_ip in yarn_output:
-    Logger.info('NodeManager with ID \'{0}\' has rejoined the cluster.'.format(nodemanager_address))
-    return
-  else:
-    raise Fail('NodeManager with ID \'{0}\' was not found in the list of running NodeManagers. \'{1}\' output was:\n{2}'.format(nodemanager_address, command, yarn_output))

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params.py
deleted file mode 100755
index 073e84f..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params.py
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from ambari_commons import OSCheck
-from resource_management.libraries.functions.default import default
-
-if OSCheck.is_windows_family():
-  from params_windows import *
-else:
-  from params_linux import *
-
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-retryAble = default("/commandParams/command_retry_enabled", False)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params_linux.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params_linux.py
deleted file mode 100755
index 4d42861..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params_linux.py
+++ /dev/null
@@ -1,469 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import os
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.stack_features import get_stack_feature_version
-from resource_management.libraries.functions import get_kinit_path
-from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
-from resource_management.libraries.functions.version import format_stack_version
-from resource_management.libraries.functions.default import default
-from resource_management.libraries import functions
-from resource_management.libraries.functions import is_empty
-
-import status_params
-
-# a map of the Ambari role to the component name
-# for use with <stack-root>/current/<component>
-MAPR_SERVER_ROLE_DIRECTORY_MAP = {
-  'HISTORYSERVER' : 'hadoop-mapreduce-historyserver',
-  'MAPREDUCE2_CLIENT' : 'hadoop-mapreduce-client',
-}
-
-YARN_SERVER_ROLE_DIRECTORY_MAP = {
-  'APP_TIMELINE_SERVER' : 'hadoop-yarn-timelineserver',
-  'NODEMANAGER' : 'hadoop-yarn-nodemanager',
-  'RESOURCEMANAGER' : 'hadoop-yarn-resourcemanager',
-  'YARN_CLIENT' : 'hadoop-yarn-client'
-}
-
-# server configurations
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-stack_name = status_params.stack_name
-stack_root = Script.get_stack_root()
-tarball_map = default("/configurations/cluster-env/tarball_map", None)
-
-config_path = os.path.join(stack_root, "current/hadoop-client/conf")
-config_dir = os.path.realpath(config_path)
-
-# This is expected to be of the form #.#.#.#
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted_major = format_stack_version(stack_version_unformatted)
-stack_version_formatted = functions.get_stack_version('hadoop-yarn-resourcemanager')
-
-stack_supports_ru = stack_version_formatted_major and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted_major)
-stack_supports_timeline_state_store = stack_version_formatted_major and check_stack_feature(StackFeature.TIMELINE_STATE_STORE, stack_version_formatted_major)
-
-# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade.
-# It cannot be used during the initial Cluser Install because the version is not yet known.
-version = default("/commandParams/version", None)
-
-# get the correct version to use for checking stack features
-version_for_stack_feature_checks = get_stack_feature_version(config)
-
-stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
-stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
-
-hostname = config['hostname']
-
-# hadoop default parameters
-hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
-hadoop_bin = stack_select.get_hadoop_dir("sbin")
-hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
-hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-hadoop_yarn_home = '/usr/lib/hadoop-yarn'
-hadoop_mapred2_jar_location = "/usr/lib/hadoop-mapreduce"
-mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
-yarn_bin = "/usr/lib/hadoop-yarn/sbin"
-yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
-hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
-
-# hadoop parameters stack supporting rolling_uprade
-if stack_supports_ru:
-  # MapR directory root
-  mapred_role_root = "hadoop-mapreduce-client"
-  command_role = default("/role", "")
-  if command_role in MAPR_SERVER_ROLE_DIRECTORY_MAP:
-    mapred_role_root = MAPR_SERVER_ROLE_DIRECTORY_MAP[command_role]
-
-  # YARN directory root
-  yarn_role_root = "hadoop-yarn-client"
-  if command_role in YARN_SERVER_ROLE_DIRECTORY_MAP:
-    yarn_role_root = YARN_SERVER_ROLE_DIRECTORY_MAP[command_role]
-
-  hadoop_mapred2_jar_location = format("{stack_root}/current/{mapred_role_root}")
-  mapred_bin = format("{stack_root}/current/{mapred_role_root}/sbin")
-
-  hadoop_yarn_home = format("{stack_root}/current/{yarn_role_root}")
-  yarn_bin = format("{stack_root}/current/{yarn_role_root}/sbin")
-  yarn_container_bin = format("{stack_root}/current/{yarn_role_root}/bin")
-
-if stack_supports_timeline_state_store:
-  # Timeline Service property that was added timeline_state_store stack feature
-  ats_leveldb_state_store_dir = config['configurations']['yarn-site']['yarn.timeline-service.leveldb-state-store.path']
-
-# ats 1.5 properties
-entity_groupfs_active_dir = config['configurations']['yarn-site']['yarn.timeline-service.entity-group-fs-store.active-dir']
-entity_groupfs_active_dir_mode = 01777
-entity_groupfs_store_dir = config['configurations']['yarn-site']['yarn.timeline-service.entity-group-fs-store.done-dir']
-entity_groupfs_store_dir_mode = 0700
-
-hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
-
-limits_conf_dir = "/etc/security/limits.d"
-yarn_user_nofile_limit = default("/configurations/yarn-env/yarn_user_nofile_limit", "32768")
-yarn_user_nproc_limit = default("/configurations/yarn-env/yarn_user_nproc_limit", "65536")
-
-mapred_user_nofile_limit = default("/configurations/mapred-env/mapred_user_nofile_limit", "32768")
-mapred_user_nproc_limit = default("/configurations/mapred-env/mapred_user_nproc_limit", "65536")
-
-execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir + os.pathsep + yarn_container_bin
-
-ulimit_cmd = "ulimit -c unlimited;"
-
-mapred_user = status_params.mapred_user
-yarn_user = status_params.yarn_user
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']
-
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
-smoke_hdfs_user_mode = 0770
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-nm_security_marker_dir = "/var/lib/hadoop-yarn"
-nm_security_marker = format('{nm_security_marker_dir}/nm_security_enabled')
-current_nm_security_state = os.path.isfile(nm_security_marker)
-toggle_nm_security = (current_nm_security_state and not security_enabled) or (not current_nm_security_state and security_enabled)
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-
-yarn_executor_container_group = config['configurations']['yarn-site']['yarn.nodemanager.linux-container-executor.group']
-yarn_nodemanager_container_executor_class =  config['configurations']['yarn-site']['yarn.nodemanager.container-executor.class']
-is_linux_container_executor = (yarn_nodemanager_container_executor_class == 'org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor')
-container_executor_mode = 06050 if is_linux_container_executor else 02050
-kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-yarn_http_policy = config['configurations']['yarn-site']['yarn.http.policy']
-yarn_https_on = (yarn_http_policy.upper() == 'HTTPS_ONLY')
-rm_hosts = config['clusterHostInfo']['rm_host']
-rm_host = rm_hosts[0]
-rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
-rm_https_port = default('/configurations/yarn-site/yarn.resourcemanager.webapp.https.address', ":8090").split(':')[-1]
-# TODO UPGRADE default, update site during upgrade
-rm_nodes_exclude_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
-rm_nodes_exclude_dir = os.path.dirname(rm_nodes_exclude_path)
-
-java64_home = config['hostLevelParams']['java_home']
-hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
-
-yarn_heapsize = config['configurations']['yarn-env']['yarn_heapsize']
-resourcemanager_heapsize = config['configurations']['yarn-env']['resourcemanager_heapsize']
-nodemanager_heapsize = config['configurations']['yarn-env']['nodemanager_heapsize']
-apptimelineserver_heapsize = default("/configurations/yarn-env/apptimelineserver_heapsize", 1024)
-ats_leveldb_dir = config['configurations']['yarn-site']['yarn.timeline-service.leveldb-timeline-store.path']
-ats_leveldb_lock_file = os.path.join(ats_leveldb_dir, "leveldb-timeline-store.ldb", "LOCK")
-yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix']
-yarn_pid_dir_prefix = status_params.yarn_pid_dir_prefix
-mapred_pid_dir_prefix = status_params.mapred_pid_dir_prefix
-mapred_log_dir_prefix = config['configurations']['mapred-env']['mapred_log_dir_prefix']
-mapred_env_sh_template = config['configurations']['mapred-env']['content']
-yarn_env_sh_template = config['configurations']['yarn-env']['content']
-yarn_nodemanager_recovery_dir = default('/configurations/yarn-site/yarn.nodemanager.recovery.dir', None)
-service_check_queue_name = default('/configurations/yarn-env/service_check.queue.name', 'default')
-
-if len(rm_hosts) > 1:
-  additional_rm_host = rm_hosts[1]
-  rm_webui_address = format("{rm_host}:{rm_port},{additional_rm_host}:{rm_port}")
-  rm_webui_https_address = format("{rm_host}:{rm_https_port},{additional_rm_host}:{rm_https_port}")
-else:
-  rm_webui_address = format("{rm_host}:{rm_port}")
-  rm_webui_https_address = format("{rm_host}:{rm_https_port}")
-
-nm_webui_address = config['configurations']['yarn-site']['yarn.nodemanager.webapp.address']
-hs_webui_address = config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address']
-nm_address = config['configurations']['yarn-site']['yarn.nodemanager.address']  # still contains 0.0.0.0
-if hostname and nm_address and nm_address.startswith("0.0.0.0:"):
-  nm_address = nm_address.replace("0.0.0.0", hostname)
-
-# Initialize lists of work directories.
-nm_local_dirs = default("/configurations/yarn-site/yarn.nodemanager.local-dirs", "")
-nm_log_dirs = default("/configurations/yarn-site/yarn.nodemanager.log-dirs", "")
-
-nm_local_dirs_list = nm_local_dirs.split(',')
-nm_log_dirs_list = nm_log_dirs.split(',')
-
-nm_log_dir_to_mount_file = "/var/lib/ambari-agent/data/yarn/yarn_log_dir_mount.hist"
-nm_local_dir_to_mount_file = "/var/lib/ambari-agent/data/yarn/yarn_local_dir_mount.hist"
-
-distrAppJarName = "hadoop-yarn-applications-distributedshell-2.*.jar"
-hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
-
-entity_file_history_directory = "/tmp/entity-file-history/active"
-
-yarn_pid_dir = status_params.yarn_pid_dir
-mapred_pid_dir = status_params.mapred_pid_dir
-
-mapred_log_dir = format("{mapred_log_dir_prefix}/{mapred_user}")
-yarn_log_dir = format("{yarn_log_dir_prefix}/{yarn_user}")
-mapred_job_summary_log = format("{mapred_log_dir_prefix}/{mapred_user}/hadoop-mapreduce.jobsummary.log")
-yarn_job_summary_log = format("{yarn_log_dir_prefix}/{yarn_user}/hadoop-mapreduce.jobsummary.log")
-
-user_group = config['configurations']['cluster-env']['user_group']
-
-#exclude file
-exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
-exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
-
-ats_host = set(default("/clusterHostInfo/app_timeline_server_hosts", []))
-has_ats = not len(ats_host) == 0
-
-nm_hosts = default("/clusterHostInfo/nm_hosts", [])
-
-# don't using len(nm_hosts) here, because check can take too much time on large clusters
-number_of_nm = 1
-
-# default kinit commands
-rm_kinit_cmd = ""
-yarn_timelineservice_kinit_cmd = ""
-nodemanager_kinit_cmd = ""
-
-if security_enabled:
-  rm_principal_name = config['configurations']['yarn-site']['yarn.resourcemanager.principal']
-  rm_principal_name = rm_principal_name.replace('_HOST',hostname.lower())
-  rm_keytab = config['configurations']['yarn-site']['yarn.resourcemanager.keytab']
-  rm_kinit_cmd = format("{kinit_path_local} -kt {rm_keytab} {rm_principal_name};")
-
-  # YARN timeline security options
-  if has_ats:
-    _yarn_timelineservice_principal_name = config['configurations']['yarn-site']['yarn.timeline-service.principal']
-    _yarn_timelineservice_principal_name = _yarn_timelineservice_principal_name.replace('_HOST', hostname.lower())
-    _yarn_timelineservice_keytab = config['configurations']['yarn-site']['yarn.timeline-service.keytab']
-    yarn_timelineservice_kinit_cmd = format("{kinit_path_local} -kt {_yarn_timelineservice_keytab} {_yarn_timelineservice_principal_name};")
-
-  if 'yarn.nodemanager.principal' in config['configurations']['yarn-site']:
-    _nodemanager_principal_name = default('/configurations/yarn-site/yarn.nodemanager.principal', None)
-    if _nodemanager_principal_name:
-      _nodemanager_principal_name = _nodemanager_principal_name.replace('_HOST', hostname.lower())
-
-    _nodemanager_keytab = config['configurations']['yarn-site']['yarn.nodemanager.keytab']
-    nodemanager_kinit_cmd = format("{kinit_path_local} -kt {_nodemanager_keytab} {_nodemanager_principal_name};")
-
-
-yarn_log_aggregation_enabled = config['configurations']['yarn-site']['yarn.log-aggregation-enable']
-yarn_nm_app_log_dir =  config['configurations']['yarn-site']['yarn.nodemanager.remote-app-log-dir']
-mapreduce_jobhistory_intermediate_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.intermediate-done-dir']
-mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.done-dir']
-jobhistory_heapsize = default("/configurations/mapred-env/jobhistory_heapsize", "900")
-jhs_leveldb_state_store_dir = default('/configurations/mapred-site/mapreduce.jobhistory.recovery.store.leveldb.path', "/hadoop/mapreduce/jhs")
-
-# Tez-related properties
-tez_user = config['configurations']['tez-env']['tez_user']
-
-# Tez jars
-tez_local_api_jars = '/usr/lib/tez/tez*.jar'
-tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'
-app_dir_files = {tez_local_api_jars:None}
-
-# Tez libraries
-tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None)
-
-#for create_hdfs_directory
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-
-
-
-hdfs_site = config['configurations']['hdfs-site']
-default_fs = config['configurations']['core-site']['fs.defaultFS']
-is_webhdfs_enabled = hdfs_site['dfs.webhdfs.enabled']
-
-# Path to file that contains list of HDFS resources to be skipped during processing
-hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore"
-
-dfs_type = default("/commandParams/dfs_type", "")
-
-
-import functools
-#create partial functions with common arguments for every HdfsResource call
-#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
-HdfsResource = functools.partial(
-  HdfsResource,
-  user=hdfs_user,
-  hdfs_resource_ignore_file = hdfs_resource_ignore_file,
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir,
-  principal_name = hdfs_principal_name,
-  hdfs_site = hdfs_site,
-  default_fs = default_fs,
-  immutable_paths = get_not_managed_resources(),
-  dfs_type = dfs_type
- )
-update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
-
-mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
-
-#taskcontroller.cfg
-
-mapred_local_dir = "/tmp/hadoop-mapred/mapred/local"
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-min_user_id = config['configurations']['yarn-env']['min_user_id']
-
-# Node labels
-node_labels_dir = default("/configurations/yarn-site/yarn.node-labels.fs-store.root-dir", None)
-node_label_enable = config['configurations']['yarn-site']['yarn.node-labels.enabled']
-
-cgroups_dir = "/cgroups_test/cpu"
-
-# ***********************  RANGER PLUGIN CHANGES ***********************
-# ranger host
-ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
-has_ranger_admin = not len(ranger_admin_hosts) == 0
-xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-# hostname of the active HDFS HA Namenode (only used when HA is enabled)
-dfs_ha_namenode_active = default("/configurations/hadoop-env/dfs_ha_initial_namenode_active", None)
-if dfs_ha_namenode_active is not None: 
-  namenode_hostname = dfs_ha_namenode_active
-else:
-  namenode_hostname = config['clusterHostInfo']['namenode_host'][0]
-
-ranger_admin_log_dir = default("/configurations/ranger-env/ranger_admin_log_dir","/var/log/ranger/admin")
-
-scheme = 'http' if not yarn_https_on else 'https'
-yarn_rm_address = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'] if not yarn_https_on else config['configurations']['yarn-site']['yarn.resourcemanager.webapp.https.address']
-rm_active_port = rm_https_port if yarn_https_on else rm_port
-
-rm_ha_enabled = False
-rm_ha_ids_list = []
-rm_webapp_addresses_list = [yarn_rm_address]
-rm_ha_ids = default("/configurations/yarn-site/yarn.resourcemanager.ha.rm-ids", None)
-
-if rm_ha_ids:
-  rm_ha_ids_list = rm_ha_ids.split(",")
-  if len(rm_ha_ids_list) > 1:
-    rm_ha_enabled = True
-
-if rm_ha_enabled:
-  rm_webapp_addresses_list = []
-  for rm_id in rm_ha_ids_list:
-    rm_webapp_address_property = format('yarn.resourcemanager.webapp.address.{rm_id}') if not yarn_https_on else format('yarn.resourcemanager.webapp.https.address.{rm_id}')
-    rm_webapp_address = config['configurations']['yarn-site'][rm_webapp_address_property]
-    rm_webapp_addresses_list.append(rm_webapp_address)
-
-#ranger yarn properties
-if has_ranger_admin:
-  is_supported_yarn_ranger = config['configurations']['yarn-env']['is_supported_yarn_ranger']
-
-  if is_supported_yarn_ranger:
-    enable_ranger_yarn = (config['configurations']['ranger-yarn-plugin-properties']['ranger-yarn-plugin-enabled'].lower() == 'yes')
-    policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
-    if 'admin-properties' in config['configurations'] and 'policymgr_external_url' in config['configurations']['admin-properties'] and policymgr_mgr_url.endswith('/'):
-      policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
-    xa_audit_db_flavor = (config['configurations']['admin-properties']['DB_FLAVOR']).lower()
-    xa_audit_db_name = default('/configurations/admin-properties/audit_db_name', 'ranger_audits')
-    xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
-    xa_audit_db_password = ''
-    if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db:
-      xa_audit_db_password = unicode(config['configurations']['admin-properties']['audit_db_password'])
-    xa_db_host = config['configurations']['admin-properties']['db_host']
-    repo_name = str(config['clusterName']) + '_yarn'
-
-    ranger_env = config['configurations']['ranger-env']
-    ranger_plugin_properties = config['configurations']['ranger-yarn-plugin-properties']
-    policy_user = config['configurations']['ranger-yarn-plugin-properties']['policy_user']
-    yarn_rest_url = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address']  
-
-    ranger_plugin_config = {
-      'username' : config['configurations']['ranger-yarn-plugin-properties']['REPOSITORY_CONFIG_USERNAME'],
-      'password' : unicode(config['configurations']['ranger-yarn-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']),
-      'yarn.url' : format('{scheme}://{yarn_rest_url}'),
-      'commonNameForCertificate' : config['configurations']['ranger-yarn-plugin-properties']['common.name.for.certificate']
-    }
-
-    yarn_ranger_plugin_repo = {
-      'isEnabled': 'true',
-      'configs': ranger_plugin_config,
-      'description': 'yarn repo',
-      'name': repo_name,
-      'repositoryType': 'yarn',
-      'type': 'yarn',
-      'assetType': '1'
-    }
-
-    if stack_supports_ranger_kerberos:
-      ranger_plugin_config['ambari.service.check.user'] = policy_user
-      ranger_plugin_config['hadoop.security.authentication'] = 'kerberos' if security_enabled else 'simple'
-
-    if stack_supports_ranger_kerberos and security_enabled:
-      ranger_plugin_config['policy.download.auth.users'] = yarn_user
-      ranger_plugin_config['tag.download.auth.users'] = yarn_user
-
-    #For curl command in ranger plugin to get db connector
-    jdk_location = config['hostLevelParams']['jdk_location']
-    java_share_dir = '/usr/share/java'
-    previous_jdbc_jar_name = None
-    if stack_supports_ranger_audit_db:
-      if xa_audit_db_flavor and xa_audit_db_flavor == 'mysql':
-        jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
-        audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
-        jdbc_driver = "com.mysql.jdbc.Driver"
-      elif xa_audit_db_flavor and xa_audit_db_flavor == 'oracle':
-        jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
-        colon_count = xa_db_host.count(':')
-        if colon_count == 2 or colon_count == 0:
-          audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
-        else:
-          audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
-        jdbc_driver = "oracle.jdbc.OracleDriver"
-      elif xa_audit_db_flavor and xa_audit_db_flavor == 'postgres':
-        jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
-        audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
-        jdbc_driver = "org.postgresql.Driver"
-      elif xa_audit_db_flavor and xa_audit_db_flavor == 'mssql':
-        jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
-        audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
-        jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
-      elif xa_audit_db_flavor and xa_audit_db_flavor == 'sqla':
-        jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
-        audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
-        jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
-
-    downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-    driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-    driver_curl_target = format("{hadoop_yarn_home}/lib/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-    previous_jdbc_jar = format("{hadoop_yarn_home}/lib/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-
-    xa_audit_db_is_enabled = False
-    ranger_audit_solr_urls = config['configurations']['ranger-admin-site']['ranger.audit.solr.urls']
-    if xml_configurations_supported and stack_supports_ranger_audit_db:
-      xa_audit_db_is_enabled = config['configurations']['ranger-yarn-audit']['xasecure.audit.destination.db']
-    xa_audit_hdfs_is_enabled = config['configurations']['ranger-yarn-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else None
-    ssl_keystore_password = unicode(config['configurations']['ranger-yarn-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']) if xml_configurations_supported else None
-    ssl_truststore_password = unicode(config['configurations']['ranger-yarn-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
-    credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None
-
-    #For SQLA explicitly disable audit to DB for Ranger
-    if xa_audit_db_flavor == 'sqla':
-      xa_audit_db_is_enabled = False

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params_windows.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params_windows.py
deleted file mode 100755
index 0f8ce73..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params_windows.py
+++ /dev/null
@@ -1,59 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-from resource_management.libraries import functions
-import os
-from status_params import *
-
-# server configurations
-config = Script.get_config()
-
-hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
-yarn_user = hadoop_user
-hdfs_user = hadoop_user
-smokeuser = hadoop_user
-config_dir = os.environ["HADOOP_CONF_DIR"]
-hadoop_home = os.environ["HADOOP_HOME"]
-
-yarn_home = os.environ["HADOOP_YARN_HOME"]
-
-hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
-kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-rm_host = config['clusterHostInfo']['rm_host'][0]
-rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
-rm_https_port = "8090"
-rm_webui_address = format("{rm_host}:{rm_port}")
-rm_webui_https_address = format("{rm_host}:{rm_https_port}")
-
-hs_host = config['clusterHostInfo']['hs_host'][0]
-hs_port = config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address'].split(':')[-1]
-hs_webui_address = format("{hs_host}:{hs_port}")
-
-hadoop_mapred2_jar_location = os.path.join(os.environ["HADOOP_COMMON_HOME"], "share", "hadoop", "mapreduce")
-hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
-
-exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
-exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
-update_exclude_file_only = config['commandParams']['update_exclude_file_only']

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/resourcemanager.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/resourcemanager.py
deleted file mode 100755
index 6a7eea7..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/resourcemanager.py
+++ /dev/null
@@ -1,289 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.check_process_status import check_process_status
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.security_commons import build_expectations, \
-  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
-  FILE_TYPE_XML
-from resource_management.libraries.functions.decorator import retry
-from resource_management.core.resources.system import File, Execute
-from resource_management.core.source import Template
-from resource_management.core.logger import Logger
-from resource_management.core.exceptions import Fail
-from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
-from resource_management.libraries.providers.hdfs_resource import HdfsResourceProvider
-from resource_management import is_empty
-from resource_management import shell
-
-
-from yarn import yarn
-from service import service
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-from setup_ranger_yarn import setup_ranger_yarn
-
-
-class Resourcemanager(Script):
-  def install(self, env):
-    self.install_packages(env)
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    service('resourcemanager', action='stop')
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    yarn(name='resourcemanager')
-
-  def refreshqueues(self, env):
-    pass
-
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class ResourcemanagerWindows(Resourcemanager):
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env)
-    service('resourcemanager', action='start')
-
-  def status(self, env):
-    service('resourcemanager', action='status')
-
-  def decommission(self, env):
-    import params
-
-    env.set_params(params)
-    yarn_user = params.yarn_user
-
-    yarn_refresh_cmd = format("cmd /c yarn rmadmin -refreshNodes")
-
-    File(params.exclude_file_path,
-         content=Template("exclude_hosts_list.j2"),
-         owner=yarn_user,
-         mode="f"
-    )
-
-    if params.update_exclude_file_only == False:
-      Execute(yarn_refresh_cmd, user=yarn_user)
-
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class ResourcemanagerDefault(Resourcemanager):
-  def get_component_name(self):
-    return "hadoop-yarn-resourcemanager"
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    Logger.info("Executing Stack Upgrade post-restart")
-    import params
-    env.set_params(params)
-
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-yarn-resourcemanager", params.version)
-
-  def start(self, env, upgrade_type=None):
-    import params
-
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    if params.has_ranger_admin and params.is_supported_yarn_ranger:
-      setup_ranger_yarn() #Ranger Yarn Plugin related calls
-
-    # wait for active-dir and done-dir to be created by ATS if needed
-    if params.has_ats:
-      Logger.info("Verifying DFS directories where ATS stores time line data for active and completed applications.")
-      self.wait_for_dfs_directories_created(params.entity_groupfs_store_dir, params.entity_groupfs_active_dir)
-
-    service('resourcemanager', action='start')
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.resourcemanager_pid_file)
-    pass
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
-                           "yarn.acl.enable": "true"}
-      props_empty_check = ["yarn.resourcemanager.principal",
-                           "yarn.resourcemanager.keytab",
-                           "yarn.resourcemanager.webapp.spnego-principal",
-                           "yarn.resourcemanager.webapp.spnego-keytab-file"]
-
-      props_read_check = ["yarn.resourcemanager.keytab",
-                          "yarn.resourcemanager.webapp.spnego-keytab-file"]
-      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
-                                           props_read_check)
-
-      yarn_expectations ={}
-      yarn_expectations.update(yarn_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'yarn-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, yarn_site_props)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'yarn-site' not in security_params
-               or 'yarn.resourcemanager.keytab' not in security_params['yarn-site']
-               or 'yarn.resourcemanager.principal' not in security_params['yarn-site']) \
-            or 'yarn.resourcemanager.webapp.spnego-keytab-file' not in security_params['yarn-site'] \
-            or 'yarn.resourcemanager.webapp.spnego-principal' not in security_params['yarn-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.resourcemanager.keytab'],
-                                security_params['yarn-site']['yarn.resourcemanager.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-keytab-file'],
-                                security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
-  def refreshqueues(self, env):
-    import params
-
-    self.configure(env)
-    env.set_params(params)
-
-    service('resourcemanager',
-            action='refreshQueues'
-    )
-
-  def decommission(self, env):
-    import params
-
-    env.set_params(params)
-    rm_kinit_cmd = params.rm_kinit_cmd
-    yarn_user = params.yarn_user
-    conf_dir = params.hadoop_conf_dir
-    user_group = params.user_group
-
-    yarn_refresh_cmd = format("{rm_kinit_cmd} yarn --config {conf_dir} rmadmin -refreshNodes")
-
-    File(params.exclude_file_path,
-         content=Template("exclude_hosts_list.j2"),
-         owner=yarn_user,
-         group=user_group
-    )
-
-    if params.update_exclude_file_only == False:
-      Execute(yarn_refresh_cmd,
-            environment= {'PATH' : params.execute_path },
-            user=yarn_user)
-      pass
-    pass
-
-
-
-
-  def wait_for_dfs_directories_created(self, *dirs):
-    import params
-
-    ignored_dfs_dirs = HdfsResourceProvider.get_ignored_resources_list(params.hdfs_resource_ignore_file)
-
-    if params.security_enabled:
-      Execute(params.rm_kinit_cmd,
-              user=params.yarn_user
-      )
-      Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
-        user=params.hdfs_user
-      )
-
-    for dir_path in dirs:
-      self.wait_for_dfs_directory_created(dir_path, ignored_dfs_dirs)
-
-
-  @retry(times=8, sleep_time=20, backoff_factor=1, err_class=Fail)
-  def wait_for_dfs_directory_created(self, dir_path, ignored_dfs_dirs):
-    import params
-
-
-    if not is_empty(dir_path):
-      dir_path = HdfsResourceProvider.parse_path(dir_path)
-
-      if dir_path in ignored_dfs_dirs:
-        Logger.info("Skipping DFS directory '" + dir_path + "' as it's marked to be ignored.")
-        return
-
-      Logger.info("Verifying if DFS directory '" + dir_path + "' exists.")
-
-      dir_exists = None
-
-      if WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
-        # check with webhdfs is much faster than executing hdfs dfs -test
-        util = WebHDFSUtil(params.hdfs_site, params.hdfs_user, params.security_enabled)
-        list_status = util.run_command(dir_path, 'GETFILESTATUS', method='GET', ignore_status_codes=['404'], assertable_result=False)
-        dir_exists = ('FileStatus' in list_status)
-      else:
-        # have to do time expensive hdfs dfs -d check.
-        dfs_ret_code = shell.call(format("hdfs --config {hadoop_conf_dir} dfs -test -d " + dir_path), user=params.yarn_user)[0]
-        dir_exists = not dfs_ret_code #dfs -test -d returns 0 in case the dir exists
-
-      if not dir_exists:
-        raise Fail("DFS directory '" + dir_path + "' does not exist !")
-      else:
-        Logger.info("DFS directory '" + dir_path + "' exists.")
-
-  def get_log_folder(self):
-    import params
-    return params.yarn_log_dir
-  
-  def get_user(self):
-    import params
-    return params.yarn_user
-  
-if __name__ == "__main__":
-  Resourcemanager().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/service.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/service.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/service.py
deleted file mode 100755
index b1179b9..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/service.py
+++ /dev/null
@@ -1,105 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-from ambari_commons import OSConst
-from resource_management.core.shell import as_user
-from resource_management.libraries.functions.show_logs import show_logs
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def service(componentName, action='start', serviceName='yarn'):
-  import status_params
-  if status_params.service_map.has_key(componentName):
-    service_name = status_params.service_map[componentName]
-    if action == 'start' or action == 'stop':
-      Service(service_name, action=action)
-    elif action == 'status':
-      check_windows_service_status(service_name)
-
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def service(componentName, action='start', serviceName='yarn'):
-  import params
-
-  if serviceName == 'mapreduce' and componentName == 'historyserver':
-    delete_pid_file = True
-    daemon = format("{mapred_bin}/mr-jobhistory-daemon.sh")
-    pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-{componentName}.pid")
-    usr = params.mapred_user
-    log_dir = params.mapred_log_dir
-  else:
-    # !!! yarn-daemon.sh deletes the PID for us; if we remove it the script
-    # may not work correctly when stopping the service
-    delete_pid_file = False
-    daemon = format("{yarn_bin}/yarn-daemon.sh")
-    pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-{componentName}.pid")
-    usr = params.yarn_user
-    log_dir = params.yarn_log_dir
-
-  cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {daemon} --config {hadoop_conf_dir}")
-
-  if action == 'start':
-    daemon_cmd = format("{ulimit_cmd} {cmd} start {componentName}")
-    check_process = as_user(format("ls {pid_file} && ps -p `cat {pid_file}`"), user=usr)
-
-    # Remove the pid file if its corresponding process is not running.
-    File(pid_file, action = "delete", not_if = check_process)
-
-    if componentName == 'timelineserver' and serviceName == 'yarn':
-      File(params.ats_leveldb_lock_file,
-         action = "delete",
-         only_if = format("ls {params.ats_leveldb_lock_file}"),
-         not_if = check_process,
-         ignore_failures = True
-      )
-
-    try:
-      # Attempt to start the process. Internally, this is skipped if the process is already running.
-      Execute(daemon_cmd, user = usr, not_if = check_process)
-  
-      # Ensure that the process with the expected PID exists.
-      Execute(check_process,
-              not_if = check_process,
-              tries=5,
-              try_sleep=1,
-      )
-    except:
-      show_logs(log_dir, usr)
-      raise
-
-  elif action == 'stop':
-    daemon_cmd = format("{cmd} stop {componentName}")
-    try:
-      Execute(daemon_cmd, user=usr)
-    except:
-      show_logs(log_dir, usr)
-      raise
-
-    # !!! yarn-daemon doesn't need us to delete PIDs
-    if delete_pid_file is True:
-      File(pid_file, action="delete")
-
-
-  elif action == 'refreshQueues':
-    rm_kinit_cmd = params.rm_kinit_cmd
-    refresh_cmd = format("{rm_kinit_cmd} export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {yarn_container_bin}/yarn rmadmin -refreshQueues")
-    Execute(refresh_cmd, user=usr)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/service_check.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/service_check.py
deleted file mode 100755
index daa8e7e..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/service_check.py
+++ /dev/null
@@ -1,159 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-import sys
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-import re
-import subprocess
-from ambari_commons import os_utils
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.get_user_call_output import get_user_call_output
-from resource_management.core.exceptions import Fail
-from resource_management.core.logger import Logger
-
-CURL_CONNECTION_TIMEOUT = '5'
-
-class ServiceCheck(Script):
-  def service_check(self, env):
-    pass
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class ServiceCheckWindows(ServiceCheck):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    yarn_exe = os_utils.quote_path(os.path.join(params.yarn_home, "bin", "yarn.cmd"))
-
-    run_yarn_check_cmd = "cmd /C %s node -list" % yarn_exe
-
-    component_type = 'rm'
-    if params.hadoop_ssl_enabled:
-      component_address = params.rm_webui_https_address
-    else:
-      component_address = params.rm_webui_address
-
-    #temp_dir = os.path.abspath(os.path.join(params.hadoop_home, os.pardir)), "/tmp"
-    temp_dir = os.path.join(os.path.dirname(params.hadoop_home), "temp")
-    validateStatusFileName = "validateYarnComponentStatusWindows.py"
-    validateStatusFilePath = os.path.join(temp_dir, validateStatusFileName)
-    python_executable = sys.executable
-    validateStatusCmd = "%s %s %s -p %s -s %s" % (python_executable, validateStatusFilePath, component_type, component_address, params.hadoop_ssl_enabled)
-
-    if params.security_enabled:
-      kinit_cmd = "%s -kt %s %s;" % (params.kinit_path_local, params.smoke_user_keytab, params.smokeuser)
-      smoke_cmd = kinit_cmd + ' ' + validateStatusCmd
-    else:
-      smoke_cmd = validateStatusCmd
-
-    File(validateStatusFilePath,
-         content=StaticFile(validateStatusFileName)
-    )
-
-    Execute(smoke_cmd,
-            tries=3,
-            try_sleep=5,
-            logoutput=True
-    )
-
-    Execute(run_yarn_check_cmd, logoutput=True)
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class ServiceCheckDefault(ServiceCheck):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    params.HdfsResource(format("/user/{smokeuser}"),
-                        type="directory",
-                        action="create_on_execute",
-                        owner=params.smokeuser,
-                        mode=params.smoke_hdfs_user_mode,
-                        )
-
-    if params.stack_version_formatted_major and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.stack_version_formatted_major):
-      path_to_distributed_shell_jar = format("{stack_root}/current/hadoop-yarn-client/hadoop-yarn-applications-distributedshell.jar")
-    else:
-      path_to_distributed_shell_jar = "/usr/lib/hadoop-yarn/hadoop-yarn-applications-distributedshell*.jar"
-
-    yarn_distrubuted_shell_check_params = ["yarn org.apache.hadoop.yarn.applications.distributedshell.Client",
-                                           "-shell_command", "ls", "-num_containers", "{number_of_nm}",
-                                           "-jar", "{path_to_distributed_shell_jar}", "-timeout", "300000",
-                                           "--queue", "{service_check_queue_name}"]
-    yarn_distrubuted_shell_check_cmd = format(" ".join(yarn_distrubuted_shell_check_params))
-
-    if params.security_enabled:
-      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
-      smoke_cmd = format("{kinit_cmd} {yarn_distrubuted_shell_check_cmd}")
-    else:
-      smoke_cmd = yarn_distrubuted_shell_check_cmd
-
-    return_code, out = shell.checked_call(smoke_cmd,
-                                          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-                                          user=params.smokeuser,
-                                          )
-
-    m = re.search("appTrackingUrl=(.*),\s", out)
-    app_url = m.group(1)
-
-    splitted_app_url = str(app_url).split('/')
-
-    for item in splitted_app_url:
-      if "application" in item:
-        application_name = item
-
-    for rm_webapp_address in params.rm_webapp_addresses_list:
-      info_app_url = params.scheme + "://" + rm_webapp_address + "/ws/v1/cluster/apps/" + application_name
-
-      get_app_info_cmd = "curl --negotiate -u : -ksL --connect-timeout " + CURL_CONNECTION_TIMEOUT + " " + info_app_url
-
-      return_code, stdout, _ = get_user_call_output(get_app_info_cmd,
-                                            user=params.smokeuser,
-                                            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-                                            )
-      
-      # Handle HDP<2.2.8.1 where RM doesn't do automatic redirection from standby to active
-      if stdout.startswith("This is standby RM. Redirecting to the current active RM:"):
-        Logger.info(format("Skipped checking of {rm_webapp_address} since returned '{stdout}'"))
-        continue
-
-      try:
-        json_response = json.loads(stdout)
-      except Exception as e:
-        raise Fail(format("Response from YARN API was not a valid JSON. Response: {stdout}"))
-      
-      if json_response is None or 'app' not in json_response or \
-              'state' not in json_response['app'] or 'finalStatus' not in json_response['app']:
-        raise Fail("Application " + app_url + " returns invalid data.")
-
-      if json_response['app']['state'] != "FINISHED" or json_response['app']['finalStatus'] != "SUCCEEDED":
-        raise Fail("Application " + app_url + " state/status is not valid. Should be FINISHED/SUCCEEDED.")
-
-
-
-if __name__ == "__main__":
-  ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/setup_ranger_yarn.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/setup_ranger_yarn.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/setup_ranger_yarn.py
deleted file mode 100755
index 6ea7f82..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/setup_ranger_yarn.py
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-    http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-from resource_management.core.logger import Logger
-
-def setup_ranger_yarn():
-  import params
-
-  if params.has_ranger_admin:
-
-    from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
-
-    if params.retryAble:
-      Logger.info("YARN: Setup ranger: command retry enables thus retrying if ranger admin is down !")
-    else:
-      Logger.info("YARN: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
-
-    if params.xml_configurations_supported and params.enable_ranger_yarn and params.xa_audit_hdfs_is_enabled:
-      params.HdfsResource("/ranger/audit",
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.hdfs_user,
-                         group=params.hdfs_user,
-                         mode=0755,
-                         recursive_chmod=True
-      )
-      params.HdfsResource("/ranger/audit/yarn",
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.yarn_user,
-                         group=params.yarn_user,
-                         mode=0700,
-                         recursive_chmod=True
-      )
-      params.HdfsResource(None, action="execute")
-
-    setup_ranger_plugin('hadoop-yarn-resourcemanager', 'yarn', params.previous_jdbc_jar,
-                        params.downloaded_custom_connector, params.driver_curl_source,
-                        params.driver_curl_target, params.java64_home,
-                        params.repo_name, params.yarn_ranger_plugin_repo,
-                        params.ranger_env, params.ranger_plugin_properties,
-                        params.policy_user, params.policymgr_mgr_url,
-                        params.enable_ranger_yarn, conf_dict=params.hadoop_conf_dir,
-                        component_user=params.yarn_user, component_group=params.user_group, cache_service_list=['yarn'],
-                        plugin_audit_properties=params.config['configurations']['ranger-yarn-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-yarn-audit'],
-                        plugin_security_properties=params.config['configurations']['ranger-yarn-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-yarn-security'],
-                        plugin_policymgr_ssl_properties=params.config['configurations']['ranger-yarn-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-yarn-policymgr-ssl'],
-                        component_list=['hadoop-yarn-resourcemanager'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
-                        credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password, 
-                        ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
-                        api_version = 'v2', skip_if_rangeradmin_down= not params.retryAble,
-                        is_security_enabled = params.security_enabled,
-                        is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos,
-                        component_user_principal=params.rm_principal_name if params.security_enabled else None,
-                        component_user_keytab=params.rm_keytab if params.security_enabled else None
-      )
-  else:
-    Logger.info('Ranger admin not installed')

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/status_params.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/status_params.py
deleted file mode 100755
index c2e9d92..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/status_params.py
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from resource_management.libraries.script.script import Script
-from resource_management.libraries import functions
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.default import default
-from ambari_commons import OSCheck
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-if OSCheck.is_windows_family():
-  resourcemanager_win_service_name = 'resourcemanager'
-  nodemanager_win_service_name = 'nodemanager'
-  historyserver_win_service_name = 'historyserver'
-  timelineserver_win_service_name = 'timelineserver'
-
-  service_map = {
-    'resourcemanager' : resourcemanager_win_service_name,
-    'nodemanager' : nodemanager_win_service_name,
-    'historyserver' : historyserver_win_service_name,
-    'timelineserver' : timelineserver_win_service_name
-  }
-else:
-  mapred_user = config['configurations']['mapred-env']['mapred_user']
-  yarn_user = config['configurations']['yarn-env']['yarn_user']
-  yarn_pid_dir_prefix = config['configurations']['yarn-env']['yarn_pid_dir_prefix']
-  mapred_pid_dir_prefix = config['configurations']['mapred-env']['mapred_pid_dir_prefix']
-  yarn_pid_dir = format("{yarn_pid_dir_prefix}/{yarn_user}")
-  mapred_pid_dir = format("{mapred_pid_dir_prefix}/{mapred_user}")
-
-  resourcemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-resourcemanager.pid")
-  nodemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-nodemanager.pid")
-  yarn_historyserver_pid_file_old = format("{yarn_pid_dir}/yarn-{yarn_user}-historyserver.pid")
-  yarn_historyserver_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-timelineserver.pid")  # *-historyserver.pid is deprecated
-  mapred_historyserver_pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-historyserver.pid")
-
-  hadoop_conf_dir = functions.conf_select.get_hadoop_conf_dir()
-
-  hostname = config['hostname']
-  kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-  security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-stack_name = default("/hostLevelParams/stack_name", None)
\ No newline at end of file


[07/52] bigtop git commit: Working around ODPI-186

Posted by rv...@apache.org.
Working around ODPI-186

(cherry picked from commit 2e21565ab7642379bf579c0fea3aa74e071aaf9a)


Project: http://git-wip-us.apache.org/repos/asf/bigtop/repo
Commit: http://git-wip-us.apache.org/repos/asf/bigtop/commit/490bcb65
Tree: http://git-wip-us.apache.org/repos/asf/bigtop/tree/490bcb65
Diff: http://git-wip-us.apache.org/repos/asf/bigtop/diff/490bcb65

Branch: refs/heads/master
Commit: 490bcb657b838ed8f328f9f4699d268807b6d8c4
Parents: b1d707c
Author: Roman Shaposhnik <rv...@apache.org>
Authored: Tue Sep 13 18:47:50 2016 -0700
Committer: Roman Shaposhnik <rv...@apache.org>
Committed: Tue Mar 21 23:08:07 2017 -0700

----------------------------------------------------------------------
 .../ambari/ODPi/1.0/role_command_order.json     |   43 +-
 .../1.0/services/YARN/MAPREDUCE2_metrics.json   | 2596 +++++++++++++
 .../ODPi/1.0/services/YARN/YARN_metrics.json    | 3486 ++++++++++++++++++
 .../ODPi/1.0/services/YARN/YARN_widgets.json    |  611 +++
 .../ambari/ODPi/1.0/services/YARN/alerts.json   |  418 +++
 .../YARN/configuration-mapred/mapred-env.xml    |  105 +
 .../YARN/configuration-mapred/mapred-site.xml   |  481 +++
 .../YARN/configuration/capacity-scheduler.xml   |  130 +
 .../services/YARN/configuration/yarn-env.xml    |  260 ++
 .../services/YARN/configuration/yarn-log4j.xml  |   94 +
 .../services/YARN/configuration/yarn-site.xml   |  424 +++
 .../ambari/ODPi/1.0/services/YARN/kerberos.json |  214 ++
 .../ambari/ODPi/1.0/services/YARN/metainfo.xml  |  274 +-
 .../ambari/ODPi/1.0/services/YARN/package/.hash |    1 +
 .../package/alerts/alert_nodemanager_health.py  |  209 ++
 .../alerts/alert_nodemanagers_summary.py        |  219 ++
 .../files/validateYarnComponentStatusWindows.py |  161 +
 .../services/YARN/package/scripts/__init__.py   |   20 +
 .../scripts/application_timeline_server.py      |  155 +
 .../YARN/package/scripts/historyserver.py       |  190 +
 .../YARN/package/scripts/install_jars.py        |   99 +
 .../package/scripts/mapred_service_check.py     |  168 +
 .../YARN/package/scripts/mapreduce2_client.py   |   98 +
 .../YARN/package/scripts/nodemanager.py         |  161 +
 .../YARN/package/scripts/nodemanager_upgrade.py |   73 +
 .../1.0/services/YARN/package/scripts/params.py |   31 +
 .../YARN/package/scripts/params_linux.py        |  469 +++
 .../YARN/package/scripts/params_windows.py      |   59 +
 .../YARN/package/scripts/resourcemanager.py     |  289 ++
 .../services/YARN/package/scripts/service.py    |  105 +
 .../YARN/package/scripts/service_check.py       |  159 +
 .../YARN/package/scripts/setup_ranger_yarn.py   |   71 +
 .../YARN/package/scripts/status_params.py       |   61 +
 .../1.0/services/YARN/package/scripts/yarn.py   |  499 +++
 .../YARN/package/scripts/yarn_client.py         |   67 +
 .../package/templates/container-executor.cfg.j2 |   40 +
 .../package/templates/exclude_hosts_list.j2     |   21 +
 .../YARN/package/templates/mapreduce.conf.j2    |   35 +
 .../package/templates/taskcontroller.cfg.j2     |   38 +
 .../YARN/package/templates/yarn.conf.j2         |   35 +
 40 files changed, 12662 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/role_command_order.json b/bigtop-packages/src/common/ambari/ODPi/1.0/role_command_order.json
index ab56c7f..31f26e3 100755
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/role_command_order.json
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/role_command_order.json
@@ -3,39 +3,72 @@
   "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
   "general_deps" : {
     "_comment" : "dependencies for all cases",
+    "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
+    "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
+    "OOZIE_SERVER-START": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+    "WEBHCAT_SERVER-START": ["NODEMANAGER-START", "HIVE_SERVER-START"],
+    "WEBHCAT_SERVER-RESTART": ["NODEMANAGER-RESTART", "HIVE_SERVER-RESTART"],
+    "HIVE_METASTORE-START": ["MYSQL_SERVER-START", "NAMENODE-START"],
+    "HIVE_METASTORE-RESTART": ["MYSQL_SERVER-RESTART", "NAMENODE-RESTART"],
+    "HIVE_SERVER-START": ["NODEMANAGER-START", "MYSQL_SERVER-START"],
+    "HIVE_SERVER-RESTART": ["NODEMANAGER-RESTART", "MYSQL_SERVER-RESTART", "ZOOKEEPER_SERVER-RESTART"],
+    "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"],
+    "FLUME_HANDLER-START": ["OOZIE_SERVER-START"],
     "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+    "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START", "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK"],
+    "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
+    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START", "WEBHCAT_SERVER-START"],
+    "PIG_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+    "SQOOP_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
     "ZOOKEEPER_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
-    "ZOOKEEPER_QUORUM_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"]
+    "ZOOKEEPER_QUORUM_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
+    "ZOOKEEPER_SERVER-STOP" : ["HBASE_MASTER-STOP", "HBASE_REGIONSERVER-STOP", "METRICS_COLLECTOR-STOP"],
+    "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"]
   },
   "_comment" : "GLUSTERFS-specific dependencies",
   "optional_glusterfs": {
+    "HBASE_MASTER-START": ["PEERSTATUS-START"],
     "GLUSTERFS_SERVICE_CHECK-SERVICE_CHECK": ["PEERSTATUS-START"]
   },
   "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster",
   "optional_no_glusterfs": {
+    "METRICS_COLLECTOR-START": ["NAMENODE-START", "DATANODE-START", "SECONDARY_NAMENODE-START", "ZOOKEEPER_SERVER-START"],
+    "AMBARI_METRICS_SERVICE_CHECK-SERVICE_CHECK": ["METRICS_COLLECTOR-START", "HDFS_SERVICE_CHECK-SERVICE_CHECK"],
     "SECONDARY_NAMENODE-START": ["NAMENODE-START"],
+    "SECONDARY_NAMENODE-RESTART": ["NAMENODE-RESTART"],
     "RESOURCEMANAGER-START": ["NAMENODE-START", "DATANODE-START"],
     "NODEMANAGER-START": ["NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START"],
     "HISTORYSERVER-START": ["NAMENODE-START", "DATANODE-START"],
+    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START"],
+    "HIVE_SERVER-START": ["DATANODE-START"],
+    "WEBHCAT_SERVER-START": ["DATANODE-START"],
+    "HISTORYSERVER-RESTART": ["NAMENODE-RESTART"],
+    "RESOURCEMANAGER-RESTART": ["NAMENODE-RESTART"],
+    "NODEMANAGER-RESTART": ["NAMENODE-RESTART"],
+    "OOZIE_SERVER-RESTART": ["NAMENODE-RESTART"],
     "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
         "SECONDARY_NAMENODE-START"],
     "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
         "RESOURCEMANAGER-START", "HISTORYSERVER-START", "YARN_SERVICE_CHECK-SERVICE_CHECK"],
     "YARN_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
     "RESOURCEMANAGER_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START"],
+    "PIG_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"],
     "NAMENODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP",
-        "HISTORYSERVER-STOP"],
+        "HISTORYSERVER-STOP", "HBASE_MASTER-STOP", "METRICS_COLLECTOR-STOP"],
     "DATANODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP",
-        "HISTORYSERVER-STOP"]
+        "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"],
+    "METRICS_GRAFANA-START": ["METRICS_COLLECTOR-START"],
+    "METRICS_COLLECTOR-STOP": ["METRICS_GRAFANA-STOP"]
   },
   "_comment" : "Dependencies that are used in HA NameNode cluster",
   "namenode_optional_ha": {
     "NAMENODE-START": ["ZKFC-START", "JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
-    "ZKFC-START": ["ZOOKEEPER_SERVER-START"]
+    "ZKFC-START": ["ZOOKEEPER_SERVER-START"],
+    "ZKFC-STOP": ["NAMENODE-STOP"],
+    "JOURNALNODE-STOP": ["NAMENODE-STOP"]
   },
   "_comment" : "Dependencies that are used in ResourceManager HA cluster",
   "resourcemanager_optional_ha" : {
     "RESOURCEMANAGER-START": ["ZOOKEEPER_SERVER-START"]
   }
 }
-


[26/52] bigtop git commit: Add Hive 1.2 to ODPi ambari reference implementation

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/b6475d77/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-plugin-properties.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-plugin-properties.xml
deleted file mode 100755
index 346baa9..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-plugin-properties.xml
+++ /dev/null
@@ -1,63 +0,0 @@
-<configuration><property require-input="false">
-    <name>policy_user</name>
-    <value>ambari-qa</value>
-    <description>This user must be system user and also present at Ranger admin portal</description>
-    <display-name>Policy user for HIVE</display-name>
-    <filename>ranger-hive-plugin-properties.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>jdbc.driverClassName</name>
-    <value>org.apache.hive.jdbc.HiveDriver</value>
-    <description>Used for repository creation on ranger admin</description>
-    <filename>ranger-hive-plugin-properties.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>common.name.for.certificate</name>
-    <value></value>
-    <description>Common name for certificate, this value should match what is specified in repo within ranger admin</description>
-    <filename>ranger-hive-plugin-properties.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>REPOSITORY_CONFIG_USERNAME</name>
-    <value>hive</value>
-    <description>Used for repository creation on ranger admin</description>
-    <display-name>Ranger repository config user</display-name>
-    <filename>ranger-hive-plugin-properties.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>REPOSITORY_CONFIG_PASSWORD</name>
-    <value>hive</value>
-    <description>Used for repository creation on ranger admin</description>
-    <display-name>Ranger repository config password</display-name>
-    <filename>ranger-hive-plugin-properties.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type>PASSWORD</property-type>
-    <value-attributes>
-        <type>password</type>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b6475d77/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-policymgr-ssl.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-policymgr-ssl.xml
deleted file mode 100755
index bf73d9f..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-policymgr-ssl.xml
+++ /dev/null
@@ -1,71 +0,0 @@
-<configuration><property require-input="false">
-    <name>xasecure.policymgr.clientssl.keystore</name>
-    <value>/usr/hdp/current/hive-server2/conf/ranger-plugin-keystore.jks</value>
-    <description>Java Keystore files</description>
-    <filename>ranger-hive-policymgr-ssl.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>xasecure.policymgr.clientssl.keystore.password</name>
-    <value>myKeyFilePassword</value>
-    <description>password for keystore</description>
-    <filename>ranger-hive-policymgr-ssl.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type>PASSWORD</property-type>
-    <value-attributes>
-        <type>password</type>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>xasecure.policymgr.clientssl.truststore</name>
-    <value>/usr/hdp/current/hive-server2/conf/ranger-plugin-truststore.jks</value>
-    <description>java truststore file</description>
-    <filename>ranger-hive-policymgr-ssl.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>xasecure.policymgr.clientssl.truststore.password</name>
-    <value>changeit</value>
-    <description>java truststore password</description>
-    <filename>ranger-hive-policymgr-ssl.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type>PASSWORD</property-type>
-    <value-attributes>
-        <type>password</type>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
-    <value>jceks://file{{credential_file}}</value>
-    <description>java keystore credential file</description>
-    <filename>ranger-hive-policymgr-ssl.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
-    <value>jceks://file{{credential_file}}</value>
-    <description>java truststore credential file</description>
-    <filename>ranger-hive-policymgr-ssl.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b6475d77/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-security.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-security.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-security.xml
deleted file mode 100755
index a29780f..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-security.xml
+++ /dev/null
@@ -1,81 +0,0 @@
-<configuration><property require-input="false">
-    <name>ranger.plugin.hive.policy.rest.ssl.config.file</name>
-    <value>/usr/hdp/current/{{ranger_hive_component}}/conf/conf.server/ranger-policymgr-ssl.xml</value>
-    <description>Path to the file containing SSL details to contact Ranger Admin</description>
-    <filename>ranger-hive-security.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>ranger.plugin.hive.service.name</name>
-    <value>{{repo_name}}</value>
-    <description>Name of the Ranger service containing policies for this HIVE instance</description>
-    <filename>ranger-hive-security.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>ranger.plugin.hive.policy.source.impl</name>
-    <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
-    <description>Class to retrieve policies from the source</description>
-    <filename>ranger-hive-security.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>ranger.plugin.hive.policy.rest.url</name>
-    <value>{{policymgr_mgr_url}}</value>
-    <description>URL to Ranger Admin</description>
-    <filename>ranger-hive-security.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>ranger.plugin.hive.policy.pollIntervalMs</name>
-    <value>30000</value>
-    <description>How often to poll for changes in policies?</description>
-    <filename>ranger-hive-security.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>ranger.plugin.hive.policy.cache.dir</name>
-    <value>/etc/ranger/{{repo_name}}/policycache</value>
-    <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
-    <filename>ranger-hive-security.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>xasecure.hive.update.xapolicies.on.grant.revoke</name>
-    <value>true</value>
-    <description>Should Hive plugin update Ranger policies for updates to permissions done using GRANT/REVOKE?</description>
-    <display-name>Should Hive GRANT/REVOKE update XA policies</display-name>
-    <filename>ranger-hive-security.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>boolean</type>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b6475d77/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/tez-interactive-site.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/tez-interactive-site.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/tez-interactive-site.xml
deleted file mode 100755
index 3865c36..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/tez-interactive-site.xml
+++ /dev/null
@@ -1,144 +0,0 @@
-<configuration><property require-input="false">
-    <name>tez.runtime.shuffle.fetch.buffer.percent</name>
-    <value>0.6</value>
-    <description>Fraction (0-1) of the available memory which can be used to
-      retain shuffled data</description>
-    <filename>tez-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>tez.runtime.shuffle.memory.limit.percent</name>
-    <value>0.25</value>
-    <description>This property determines the maximum size of a shuffle segment
-      which can be fetched to memory. Fraction (0-1) of shuffle memory
-      (after applying tez.runtime.shuffle.fetch.buffer.percent)</description>
-    <filename>tez-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>tez.runtime.report.partition.stats</name>
-    <value>true</value>
-    <filename>tez-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>tez.runtime.pipelined-shuffle.enabled</name>
-    <value>false</value>
-    <filename>tez-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>tez.runtime.pipelined.sorter.lazy-allocate.memory</name>
-    <value>true</value>
-    <filename>tez-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>tez.grouping.node.local.only</name>
-    <value>true</value>
-    <filename>tez-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>tez.runtime.shuffle.fetch.verify-disk-checksum</name>
-    <value>false</value>
-    <filename>tez-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>tez.lib.uris</name>
-    <value>/hdp/apps/${hdp.version}/tez_hive2/tez.tar.gz</value>
-    <description>Comma-delimited list of the location of the Tez libraries which will be localized for DAGs.
-      Specifying a single .tar.gz or .tgz assumes that a compressed version of the tez libs is being used. This is uncompressed into a tezlibs directory when running containers, and tezlibs/;tezlibs/lib/ are added to the classpath (after . and .*).
-      If multiple files are specified - files are localized as regular files, contents of directories are localized as regular files (non-recursive).
-    </description>
-    <filename>tez-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>tez.dag.recovery.enabled</name>
-    <value>false</value>
-    <filename>tez-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>tez.am.resource.memory.mb</name>
-    <value>1536</value>
-    <description>The amount of memory to be used by the AppMaster</description>
-    <filename>tez-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>int</type>
-    </value-attributes>
-    <depends-on>
-        <property>
-            <name>yarn.scheduler.capacity.root.queues</name>
-            <type>capacity-scheduler</type>
-        </property>
-        <property>
-            <name>hive.llap.daemon.queue.name</name>
-            <type>hive-interactive-site</type>
-        </property>
-        <property>
-            <name>llap_queue_capacity</name>
-            <type>hive-interactive-env</type>
-        </property>
-        <property>
-            <name>enable_hive_interactive</name>
-            <type>hive-interactive-env</type>
-        </property>
-        <property>
-            <name>hive.server2.tez.sessions.per.default.queue</name>
-            <type>hive-interactive-site</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>tez.session.am.dag.submit.timeout.secs</name>
-    <value>3600</value>
-    <filename>tez-interactive-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b6475d77/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/metainfo.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/metainfo.xml
index d6ecbed..9e73118 100755
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/metainfo.xml
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/metainfo.xml
@@ -3,7 +3,7 @@
   <services><service>
     <name>HIVE</name>
     <displayName>Hive</displayName>
-    <version>1.2.1</version>
+    <version>1.2.1+odpi</version>
     <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
     <components>
         <component>
@@ -12,7 +12,7 @@
             <category>CLIENT</category>
             <deleted>false</deleted>
             <cardinality>0+</cardinality>
-            <versionAdvertised>true</versionAdvertised>
+            <versionAdvertised>false</versionAdvertised>
             <versionAdvertisedInternal>false</versionAdvertisedInternal>
             <commandScript>
                 <script>scripts/hcat_client.py</script>
@@ -37,7 +37,7 @@
             <category>MASTER</category>
             <deleted>false</deleted>
             <cardinality>1+</cardinality>
-            <versionAdvertisedInternal>true</versionAdvertisedInternal>
+            <versionAdvertisedInternal>false</versionAdvertisedInternal>
             <commandScript>
                 <script>scripts/hive_server.py</script>
                 <scriptType>PYTHON</scriptType>
@@ -108,7 +108,7 @@
             <category>CLIENT</category>
             <deleted>false</deleted>
             <cardinality>1+</cardinality>
-            <versionAdvertised>true</versionAdvertised>
+            <versionAdvertised>false</versionAdvertised>
             <versionAdvertisedInternal>false</versionAdvertisedInternal>
             <commandScript>
                 <script>scripts/hive_client.py</script>
@@ -148,7 +148,7 @@
             <category>MASTER</category>
             <deleted>false</deleted>
             <cardinality>1+</cardinality>
-            <versionAdvertisedInternal>true</versionAdvertisedInternal>
+            <versionAdvertisedInternal>false</versionAdvertisedInternal>
             <commandScript>
                 <script>scripts/webhcat_server.py</script>
                 <scriptType>PYTHON</scriptType>
@@ -272,7 +272,7 @@
             <category>MASTER</category>
             <deleted>false</deleted>
             <cardinality>1+</cardinality>
-            <versionAdvertisedInternal>true</versionAdvertisedInternal>
+            <versionAdvertisedInternal>false</versionAdvertisedInternal>
             <commandScript>
                 <script>scripts/hive_metastore.py</script>
                 <scriptType>PYTHON</scriptType>
@@ -295,149 +295,8 @@
             <recovery_enabled>false</recovery_enabled>
             <reassignAllowed>true</reassignAllowed>
         </component>
-        <component>
-            <name>HIVE_SERVER_INTERACTIVE</name>
-            <displayName>HiveServer2 Interactive</displayName>
-            <category>MASTER</category>
-            <deleted>false</deleted>
-            <cardinality>0-1</cardinality>
-            <versionAdvertised>true</versionAdvertised>
-            <versionAdvertisedInternal>false</versionAdvertisedInternal>
-            <commandScript>
-                <script>scripts/hive_server_interactive.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>0</timeout>
-            </commandScript>
-            <customCommands>
-                <customCommand>
-                    <name>RESTART_LLAP</name>
-                    <commandScript>
-                        <script>scripts/hive_server_interactive.py</script>
-                        <scriptType>PYTHON</scriptType>
-                        <timeout>600</timeout>
-                    </commandScript>
-                    <background>false</background>
-                </customCommand>
-            </customCommands>
-            <dependencies>
-                <dependency>
-                    <auto-deploy>
-                        <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
-                        <coLocate>HIVE/HIVE_SERVER_INTERACTIVE</coLocate>
-                        <enabled>true</enabled>
-                    </auto-deploy>
-                    <autoDeploy>
-                        <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
-                        <coLocate>HIVE/HIVE_SERVER_INTERACTIVE</coLocate>
-                        <enabled>true</enabled>
-                    </autoDeploy>
-                    <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-                    <scope>cluster</scope>
-                </dependency>
-                <dependency>
-                    <auto-deploy>
-                        <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
-                        <coLocate>HIVE/HIVE_SERVER_INTERACTIVE</coLocate>
-                        <enabled>true</enabled>
-                    </auto-deploy>
-                    <autoDeploy>
-                        <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
-                        <coLocate>HIVE/HIVE_SERVER_INTERACTIVE</coLocate>
-                        <enabled>true</enabled>
-                    </autoDeploy>
-                    <name>YARN/YARN_CLIENT</name>
-                    <scope>host</scope>
-                </dependency>
-                <dependency>
-                    <auto-deploy>
-                        <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
-                        <coLocate>HIVE/HIVE_SERVER_INTERACTIVE</coLocate>
-                        <enabled>true</enabled>
-                    </auto-deploy>
-                    <autoDeploy>
-                        <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
-                        <coLocate>HIVE/HIVE_SERVER_INTERACTIVE</coLocate>
-                        <enabled>true</enabled>
-                    </autoDeploy>
-                    <name>HDFS/HDFS_CLIENT</name>
-                    <scope>host</scope>
-                </dependency>
-                <dependency>
-                    <auto-deploy>
-                        <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
-                        <coLocate>HIVE/HIVE_SERVER_INTERACTIVE</coLocate>
-                        <enabled>true</enabled>
-                    </auto-deploy>
-                    <autoDeploy>
-                        <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
-                        <coLocate>HIVE/HIVE_SERVER_INTERACTIVE</coLocate>
-                        <enabled>true</enabled>
-                    </autoDeploy>
-                    <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-                    <scope>host</scope>
-                </dependency>
-                <dependency>
-                    <auto-deploy>
-                        <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
-                        <coLocate>HIVE/HIVE_SERVER_INTERACTIVE</coLocate>
-                        <enabled>true</enabled>
-                    </auto-deploy>
-                    <autoDeploy>
-                        <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
-                        <coLocate>HIVE/HIVE_SERVER_INTERACTIVE</coLocate>
-                        <enabled>true</enabled>
-                    </autoDeploy>
-                    <name>TEZ/TEZ_CLIENT</name>
-                    <scope>host</scope>
-                </dependency>
-                <dependency>
-                    <auto-deploy>
-                        <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
-                        <coLocate>HIVE/HIVE_SERVER_INTERACTIVE</coLocate>
-                        <enabled>true</enabled>
-                    </auto-deploy>
-                    <autoDeploy>
-                        <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
-                        <coLocate>HIVE/HIVE_SERVER_INTERACTIVE</coLocate>
-                        <enabled>true</enabled>
-                    </autoDeploy>
-                    <name>PIG/PIG</name>
-                    <scope>host</scope>
-                </dependency>
-                <dependency>
-                    <auto-deploy>
-                        <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
-                        <coLocate>HIVE/HIVE_SERVER_INTERACTIVE</coLocate>
-                        <enabled>true</enabled>
-                    </auto-deploy>
-                    <autoDeploy>
-                        <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
-                        <coLocate>HIVE/HIVE_SERVER_INTERACTIVE</coLocate>
-                        <enabled>true</enabled>
-                    </autoDeploy>
-                    <name>SLIDER/SLIDER</name>
-                    <scope>host</scope>
-                </dependency>
-            </dependencies>
-            <configuration-dependencies>
-                <config-type>beeline-log4j2</config-type>
-                <config-type>hive-exec-log4j2</config-type>
-                <config-type>hive-log4j2</config-type>
-                <config-type>hive-site</config-type>
-                <config-type>hive-interactive-site</config-type>
-                <config-type>tez-interactive-site</config-type>
-                <config-type>hiveserver2-interactive-site</config-type>
-                <config-type>hive-interactive-env</config-type>
-                <config-type>llap-cli-log4j2</config-type>
-                <config-type>llap-daemon-log4j</config-type>
-            </configuration-dependencies>
-            <recovery_enabled>false</recovery_enabled>
-        </component>
     </components>
-    <deleted>false</deleted>
     <configuration-dependencies>
-        <config-type>application-properties</config-type>
-        <config-type>hive-atlas-application.properties</config-type>
         <config-type>hive-log4j</config-type>
         <config-type>hive-exec-log4j</config-type>
         <config-type>tez-site</config-type>
@@ -445,12 +304,7 @@
         <config-type>hivemetastore-site.xml</config-type>
         <config-type>webhcat-site</config-type>
         <config-type>webhcat-env</config-type>
-        <config-type>ranger-hive-plugin-properties</config-type>
-        <config-type>ranger-hive-audit</config-type>
-        <config-type>ranger-hive-policymgr-ssl</config-type>
-        <config-type>ranger-hive-security</config-type>
         <config-type>mapred-site</config-type>
-        <config-type>application.properties</config-type>
     </configuration-dependencies>
     <widgetsFileName>widgets.json</widgetsFileName>
     <metricsFileName>metrics.json</metricsFileName>

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b6475d77/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params_linux.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params_linux.py
index 1cd58c3..b33d715 100755
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params_linux.py
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params_linux.py
@@ -93,12 +93,18 @@ stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_D
 component_directory = status_params.component_directory
 component_directory_interactive = status_params.component_directory_interactive
 
-hadoop_home = format('{stack_root}/current/hadoop-client')
-hive_bin = format('{stack_root}/current/{component_directory}/bin')
-hive_schematool_ver_bin = format('{stack_root}/{version}/hive/bin')
-hive_schematool_bin = format('{stack_root}/current/{component_directory}/bin')
-hive_lib = format('{stack_root}/current/{component_directory}/lib')
-hive_version_lib = format('{stack_root}/{version}/hive/lib')
+hadoop_home = '/usr/lib/hadoop'
+hive_bin = '/usr/lib/hive/bin'
+hive_schematool_ver_bin = '/usr/lib/hive/bin'
+hive_schematool_bin = '/usr/lib/hive/bin'
+hive_lib = '/usr/lib/hive/lib'
+hive_version_lib = '/usr/lib/hive/lib'
+#hadoop_home = format('{stack_root}/current/hadoop-client')
+#hive_bin = format('{stack_root}/current/{component_directory}/bin')
+#hive_schematool_ver_bin = format('{stack_root}/{version}/hive/bin')
+#hive_schematool_bin = format('{stack_root}/current/{component_directory}/bin')
+#hive_lib = format('{stack_root}/current/{component_directory}/lib')
+#hive_version_lib = format('{stack_root}/{version}/hive/lib')
 hive_var_lib = '/var/lib/hive'
 hive_user_home_dir = "/home/hive"
 
@@ -167,10 +173,6 @@ config_dir = '/etc/hive-webhcat/conf'
 hcat_lib = '/usr/lib/hive-hcatalog/share/hcatalog'
 webhcat_bin_dir = '/usr/lib/hive-hcatalog/sbin'
 
-# there are no client versions of these, use server versions directly
-hcat_lib = format('{stack_root}/current/hive-webhcat/share/hcatalog')
-webhcat_bin_dir = format('{stack_root}/current/hive-webhcat/sbin')
-
 # --- Tarballs ---
 # DON'T CHANGE THESE VARIABLE NAMES
 # Values don't change from those in copy_tarball.py
@@ -189,17 +191,14 @@ tarballs_mode = 0444
 
 purge_tables = "false"
 # Starting from stack version for feature hive_purge_table drop should be executed with purge
-if check_stack_feature(StackFeature.HIVE_PURGE_TABLE, version_for_stack_feature_checks):
-  purge_tables = 'true'
+purge_tables = 'true'
 
-if check_stack_feature(StackFeature.HIVE_WEBHCAT_SPECIFIC_CONFIGS, version_for_stack_feature_checks):
-  # this is NOT a typo.  Configs for hcatalog/webhcat point to a
-  # specific directory which is NOT called 'conf'
-  hcat_conf_dir = format('{stack_root}/current/hive-webhcat/etc/hcatalog')
-  config_dir = format('{stack_root}/current/hive-webhcat/etc/webhcat')
+# this is NOT a typo.  Configs for hcatalog/webhcat point to a
+# specific directory which is NOT called 'conf'
+hcat_conf_dir = format('{stack_root}/current/hive-webhcat/etc/hcatalog')
+config_dir = format('{stack_root}/current/hive-webhcat/etc/webhcat')
 
-if check_stack_feature(StackFeature.HIVE_METASTORE_SITE_SUPPORT, version_for_stack_feature_checks):
-  hive_metastore_site_supported = True
+hive_metastore_site_supported = True
 
 execute_path = os.environ['PATH'] + os.pathsep + hive_bin + os.pathsep + hadoop_bin_dir
 
@@ -395,10 +394,7 @@ start_metastore_path = format("{tmp_dir}/start_metastore_script")
 hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
 
 if 'role' in config and config['role'] in ["HIVE_SERVER", "HIVE_METASTORE"]:
-  if check_stack_feature(StackFeature.HIVE_ENV_HEAPSIZE, version_for_stack_feature_checks):
-    hive_heapsize = config['configurations']['hive-env']['hive.heapsize']
-  else:
-    hive_heapsize = config['configurations']['hive-site']['hive.heapsize']
+  hive_heapsize = config['configurations']['hive-env']['hive.heapsize']
 else:
   hive_heapsize = config['configurations']['hive-env']['hive.client.heapsize']
 

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b6475d77/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/status_params.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/status_params.py
index b7cb148..024f3df 100755
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/status_params.py
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/status_params.py
@@ -92,10 +92,14 @@ else:
   hive_server_conf_dir = "/etc/hive/conf.server"
   hive_server_interactive_conf_dir = "/etc/hive2/conf.server"
 
-  webhcat_conf_dir = format("{stack_root}/current/hive-webhcat/conf")
-  hive_home_dir = format("{stack_root}/current/{component_directory}")
-  hive_conf_dir = format("{stack_root}/current/{component_directory}/conf")
-  hive_client_conf_dir = format("{stack_root}/current/{component_directory}/conf")
+#  webhcat_conf_dir = format("{stack_root}/current/hive-webhcat/conf")
+#  hive_home_dir = format("{stack_root}/current/{component_directory}")
+#  hive_conf_dir = format("{stack_root}/current/{component_directory}/conf")
+#  hive_client_conf_dir = format("{stack_root}/current/{component_directory}/conf")
+  webhcat_conf_dir = '/etc/hive/conf'
+  hive_home_dir = '/usr/lib/hive'
+  hive_conf_dir = '/usr/lib/hive/conf'
+  hive_client_conf_dir = '/etc/hive/conf'
 
   if check_stack_feature(StackFeature.CONFIG_VERSIONING, stack_version_formatted_major):
     hive_server_conf_dir = format("{stack_root}/current/{component_directory}/conf/conf.server")
@@ -115,4 +119,4 @@ else:
   if 'role' in config and config['role'] in ["HIVE_SERVER", "HIVE_METASTORE", "HIVE_SERVER_INTERACTIVE"]:
     hive_config_dir = hive_server_conf_dir
     
-stack_name = default("/hostLevelParams/stack_name", None)
\ No newline at end of file
+stack_name = default("/hostLevelParams/stack_name", None)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b6475d77/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/yarn-site.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/yarn-site.xml
index b20114c..59ff82b 100755
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/yarn-site.xml
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/yarn-site.xml
@@ -19,13 +19,6 @@
 <!-- Put site-specific property overrides in this file. -->
 <configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
   <property>
-    <name>yarn.timeline-service.leveldb-timeline-store.path</name>
-    <value>/tmp/dummy</value>
-    <description>This is a temporary workaround for ODPI-186</description>
-  </property>
-
-  <!-- ResourceManager -->
-  <property>
     <name>yarn.resourcemanager.hostname</name>
     <value>localhost</value>
     <description>The hostname of the RM.</description>
@@ -113,25 +106,6 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
-    <name>yarn.acl.enable</name>
-    <value>false</value>
-    <description> Are acls enabled. </description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.admin.acl</name>
-    <value/>
-    <description> ACL of who can be admin of the YARN cluster. </description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!-- NodeManager -->
-  <property>
     <name>yarn.nodemanager.address</name>
     <value>0.0.0.0:45454</value>
     <description>The address of the container manager in the NM.</description>
@@ -421,4 +395,185 @@
     </description>
     <on-ambari-upgrade add="true"/>
   </property>
+====================
+  <property>
+    <name>yarn.timeline-service.enabled</name>
+    <value>true</value>
+    <description>Indicate to clients whether timeline service is enabled or not.
+      If enabled, clients will put entities and events to the timeline server.
+    </description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.generic-application-history.store-class</name>
+    <value>org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore</value>
+    <description>
+      Store class name for history store, defaulting to file system store
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.leveldb-timeline-store.path</name>
+    <value>/var/log/hadoop-yarn/timeline</value>
+    <description>
+      Store file name for leveldb timeline store
+    </description>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.webapp.address</name>
+    <value>localhost:8188</value>
+    <description>
+      The http address of the timeline service web application.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.webapp.https.address</name>
+    <value>localhost:8190</value>
+    <description>
+      The http address of the timeline service web application.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.address</name>
+    <value>localhost:10200</value>
+    <description>
+      This is default address for the timeline server to start
+      the RPC server.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <description>Enable age off of timeline store data.</description>
+    <name>yarn.timeline-service.ttl-enable</name>
+    <value>true</value>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <description>Time to live for timeline store data in milliseconds.</description>
+    <name>yarn.timeline-service.ttl-ms</name>
+    <value>2678400000</value>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <description>Length of time to wait between deletion cycles of leveldb timeline store in milliseconds.</description>
+    <name>yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms</name>
+    <value>300000</value>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+=============================
+  <property>
+    <name>yarn.timeline-service.recovery.enabled</name>
+    <description>
+      Enable timeline server to recover state after starting. If
+      true, then yarn.timeline-service.state-store-class must be specified.
+    </description>
+    <value>true</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.acl.enable</name>
+    <value>false</value>
+    <description> Are acls enabled. </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.authorization-provider</name>
+    <description> Yarn authorization provider class. </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.admin.acl</name>
+    <value>yarn</value>
+    <description> ACL of who can be admin of the YARN cluster. </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!--ats v1.5 properties-->
+  <property>
+    <name>yarn.timeline-service.store-class</name>
+    <value>org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore</value>
+    <description>Main storage class for YARN timeline server.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.active-dir</name>
+    <value>/ats/active/</value>
+    <description>DFS path to store active application&#x2019;s timeline data</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.done-dir</name>
+    <value>/ats/done/</value>
+    <description>DFS path to store done application&#x2019;s timeline data</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes</name>
+    <value/>
+    <description>Plugins that can translate a timeline entity read request into a list of timeline cache ids, separated by commas. </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- advanced ats v1.5 properties-->
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.summary-store</name>
+    <description>Summary storage for ATS v1.5</description>
+    <!-- Use rolling leveldb, advanced -->
+    <value>org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.scan-interval-seconds</name>
+    <description>
+      Scan interval for ATS v1.5 entity group file system storage reader.This
+      value controls how frequent the reader will scan the HDFS active directory
+      for application status.
+    </description>
+    <!-- Default is 60 seconds, advanced -->
+    <value>60</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds</name>
+    <description>
+      Scan interval for ATS v1.5 entity group file system storage cleaner.This
+      value controls how frequent the reader will scan the HDFS done directory
+      for stale application data.
+    </description>
+    <!-- 3600 is default, advanced -->
+    <value>3600</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.retain-seconds</name>
+    <description>
+      How long the ATS v1.5 entity group file system storage will keep an
+      application's data in the done directory.
+    </description>
+    <!-- 7 days is default, advanced -->
+    <value>604800</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b6475d77/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/metainfo.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/metainfo.xml
index 18117f3..35da7fd 100755
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/metainfo.xml
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/metainfo.xml
@@ -26,6 +26,20 @@
       <version>2.7.1+odpi</version>
       <components>
 
+      <component>
+          <name>APP_TIMELINE_SERVER</name>
+          <displayName>App Timeline Server</displayName>
+          <category>MASTER</category>
+          <cardinality>0-1</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <reassignAllowed>true</reassignAllowed>
+          <commandScript>
+            <script>scripts/application_timeline_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+        </component>
+
         <component>
           <name>RESOURCEMANAGER</name>
           <displayName>ResourceManager</displayName>


[04/52] bigtop git commit: Working around ODPI-186

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/YARN_widgets.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/YARN_widgets.json b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/YARN_widgets.json
new file mode 100755
index 0000000..4b76a17
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/YARN_widgets.json
@@ -0,0 +1,611 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_yarn_dashboard",
+      "display_name": "Standard YARN Dashboard",
+      "section_name": "YARN_SUMMARY",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "Memory Utilization",
+          "description": "Percentage of total memory allocated to containers running in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AllocatedMB",
+              "metric_path": "metrics/yarn/Queue/root/AllocatedMB",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AvailableMB",
+              "metric_path": "metrics/yarn/Queue/root/AvailableMB",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Memory Utilization",
+              "value": "${(yarn.QueueMetrics.Queue=root.AllocatedMB / (yarn.QueueMetrics.Queue=root.AllocatedMB + yarn.QueueMetrics.Queue=root.AvailableMB)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "CPU Utilization",
+          "description": "Percentage of total virtual cores allocated to containers running in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AllocatedVCores",
+              "metric_path": "metrics/yarn/Queue/root/AllocatedVCores",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AvailableVCores",
+              "metric_path": "metrics/yarn/Queue/root/AvailableVCores",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Total Allocatable CPU Utilized across NodeManager",
+              "value": "${(yarn.QueueMetrics.Queue=root.AllocatedVCores / (yarn.QueueMetrics.Queue=root.AllocatedVCores + yarn.QueueMetrics.Queue=root.AvailableVCores)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Container Failures",
+          "description": "Percentage of all containers failing in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersFailed._rate",
+              "metric_path": "metrics/yarn/ContainersFailed._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersCompleted._rate",
+              "metric_path": "metrics/yarn/ContainersCompleted._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersLaunched._rate",
+              "metric_path": "metrics/yarn/ContainersLaunched._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersIniting._sum",
+              "metric_path": "metrics/yarn/ContainersIniting._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersKilled._rate",
+              "metric_path": "metrics/yarn/ContainersKilled._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersRunning._sum",
+              "metric_path": "metrics/yarn/ContainersRunning._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Container Failures",
+              "value": "${(yarn.NodeManagerMetrics.ContainersFailed._rate/(yarn.NodeManagerMetrics.ContainersFailed._rate + yarn.NodeManagerMetrics.ContainersCompleted._rate + yarn.NodeManagerMetrics.ContainersLaunched._rate + yarn.NodeManagerMetrics.ContainersIniting._sum + yarn.NodeManagerMetrics.ContainersKilled._rate + yarn.NodeManagerMetrics.ContainersRunning._sum)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "App Failures",
+          "description": "Percentage of all launched applications failing in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsFailed._rate",
+              "metric_path": "metrics/yarn/Queue/root/AppsFailed._rate",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsKilled._rate",
+              "metric_path": "metrics/yarn/Queue/root/AppsKilled._rate",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsPending",
+              "metric_path": "metrics/yarn/Queue/root/AppsPending",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsRunning",
+              "metric_path": "metrics/yarn/Queue/root/AppsRunning",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsSubmitted._rate",
+              "metric_path": "metrics/yarn/Queue/root/AppsSubmitted._rate",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsCompleted._rate",
+              "metric_path": "metrics/yarn/Queue/root/AppsCompleted._rate",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            }
+          ],
+          "values": [
+            {
+              "name": "App Failures",
+              "value": "${(yarn.QueueMetrics.Queue=root.AppsFailed._rate/(yarn.QueueMetrics.Queue=root.AppsFailed._rate + yarn.QueueMetrics.Queue=root.AppsKilled._rate + yarn.QueueMetrics.Queue=root.AppsPending + yarn.QueueMetrics.Queue=root.AppsRunning + yarn.QueueMetrics.Queue=root.AppsSubmitted._rate + yarn.QueueMetrics.Queue=root.AppsCompleted._rate)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Pending Apps",
+          "description": "Count of applications waiting for cluster resources to become available.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsPending",
+              "metric_path": "metrics/yarn/Queue/root/AppsPending",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Pending Apps",
+              "value": "${yarn.QueueMetrics.Queue=root.AppsPending}"
+            }
+          ],
+          "properties": {
+            "display_unit": "Apps",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster Memory",
+          "description": "Percentage of memory used across all NodeManager hosts.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "mem_total._sum",
+              "metric_path": "metrics/memory/mem_total._avg",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "mem_free._sum",
+              "metric_path": "metrics/memory/mem_free._avg",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Memory utilization",
+              "value": "${((mem_total._sum - mem_free._sum)/mem_total._sum) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster Disk",
+          "description": "Sum of disk throughput for all NodeManager hosts.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "read_bps._sum",
+              "metric_path": "metrics/disk/read_bps._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "write_bps._sum",
+              "metric_path": "metrics/disk/write_bps._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Read throughput",
+              "value": "${read_bps._sum/1048576}"
+            },
+            {
+              "name": "Write throughput",
+              "value": "${write_bps._sum/1048576}"
+            }
+          ],
+          "properties": {
+            "display_unit": "Mbps",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster Network",
+          "description": "Average of Network utilized across all NodeManager hosts.",
+          "default_section_name": "YARN_SUMMARY",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "pkts_in._avg",
+              "metric_path": "metrics/network/pkts_in._avg",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "pkts_out._avg",
+              "metric_path": "metrics/network/pkts_out._avg",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Packets In",
+              "value": "${pkts_in._avg}"
+            },
+            {
+              "name": "Packets Out",
+              "value": "${pkts_out._avg}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster CPU",
+          "description": "Percentage of CPU utilized across all NodeManager hosts.",
+          "default_section_name": "YARN_SUMMARY",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "cpu_system._sum",
+              "metric_path": "metrics/cpu/cpu_system._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "cpu_user._sum",
+              "metric_path": "metrics/cpu/cpu_user._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "cpu_nice._sum",
+              "metric_path": "metrics/cpu/cpu_nice._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "cpu_idle._sum",
+              "metric_path": "metrics/cpu/cpu_idle._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "cpu_wio._sum",
+              "metric_path": "metrics/cpu/cpu_wio._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "CPU utilization",
+              "value": "${((cpu_system._sum + cpu_user._sum + cpu_nice._sum)/(cpu_system._sum + cpu_user._sum + cpu_nice._sum + cpu_idle._sum + cpu_wio._sum)) * 100}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1",
+            "display_unit": "%"
+          }
+        }
+      ]
+    },
+    {
+      "layout_name": "default_yarn_heatmap",
+      "display_name": "YARN Heatmaps",
+      "section_name": "YARN_HEATMAPS",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "Total Allocatable RAM Utilized per NodeManager",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedGB",
+              "metric_path": "metrics/yarn/AllocatedGB",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.AvailableGB",
+              "metric_path": "metrics/yarn/AvailableGB",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Total Allocatable RAM Utilized per NodeManager",
+              "value": "${(yarn.NodeManagerMetrics.AllocatedGB/(yarn.NodeManagerMetrics.AvailableGB + yarn.NodeManagerMetrics.AllocatedGB)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Total Allocatable CPU Utilized per NodeManager",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedVCores",
+              "metric_path": "metrics/yarn/AllocatedVCores",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.AvailableVCores",
+              "metric_path": "metrics/yarn/AvailableVCores",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Total Allocatable CPU Utilized per NodeManager",
+              "value": "${(yarn.NodeManagerMetrics.AllocatedVCores/(yarn.NodeManagerMetrics.AllocatedVCores + yarn.NodeManagerMetrics.AvailableVCores)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Container Failures",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersFailed._rate",
+              "metric_path": "metrics/yarn/ContainersFailed._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersCompleted._rate",
+              "metric_path": "metrics/yarn/ContainersCompleted._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersLaunched._rate",
+              "metric_path": "metrics/yarn/ContainersLaunched._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersIniting",
+              "metric_path": "metrics/yarn/ContainersIniting",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersKilled._rate",
+              "metric_path": "metrics/yarn/ContainersKilled._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersRunning",
+              "metric_path": "metrics/yarn/ContainersRunning",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Container Failures",
+              "value": "${(yarn.NodeManagerMetrics.ContainersFailed._rate/(yarn.NodeManagerMetrics.ContainersFailed._rate + yarn.NodeManagerMetrics.ContainersCompleted._rate + yarn.NodeManagerMetrics.ContainersLaunched._rate + yarn.NodeManagerMetrics.ContainersIniting + yarn.NodeManagerMetrics.ContainersKilled._rate + yarn.NodeManagerMetrics.ContainersRunning)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "NodeManager GC Time",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis",
+              "metric_path": "metrics/jvm/gcTimeMillis",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "NodeManager Garbage Collection Time",
+              "value": "${Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "max_limit": "10000"
+          }
+        },
+        {
+          "widget_name": "NodeManager JVM Heap Memory Used",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM",
+              "metric_path": "metrics/jvm/memHeapUsedM",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "NodeManager JVM Heap Memory Used",
+              "value": "${Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "512"
+          }
+        },
+        {
+          "widget_name": "Allocated Containers",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedContainers",
+              "metric_path": "metrics/yarn/AllocatedContainers",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Allocated Containers",
+              "value": "${yarn.NodeManagerMetrics.AllocatedContainers}"
+            }
+          ],
+          "properties": {
+            "display_unit": "",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "NodeManager RAM Utilized",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedGB",
+              "metric_path": "metrics/yarn/AllocatedGB",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "NodeManager RAM Utilized",
+              "value": "${yarn.NodeManagerMetrics.AllocatedGB}"
+            }
+          ],
+          "properties": {
+            "display_unit": "",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "NodeManager CPU Utilized",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedVCores",
+              "metric_path": "metrics/yarn/AllocatedVCores",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "NodeManager CPU Utilized",
+              "value": "${yarn.NodeManagerMetrics.AllocatedVCores}"
+            }
+          ],
+          "properties": {
+            "display_unit": "",
+            "max_limit": "100"
+          }
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/alerts.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/alerts.json b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/alerts.json
new file mode 100755
index 0000000..8561922
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/alerts.json
@@ -0,0 +1,418 @@
+{
+  "MAPREDUCE2": {
+    "service": [],
+    "HISTORYSERVER": [
+      {
+        "name": "mapreduce_history_server_webui",
+        "label": "History Server Web UI",
+        "description": "This host-level alert is triggered if the History Server Web UI is unreachable.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "WEB",
+          "uri": {
+            "http": "{{mapred-site/mapreduce.jobhistory.webapp.address}}",
+            "https": "{{mapred-site/mapreduce.jobhistory.webapp.https.address}}",
+            "https_property": "{{mapred-site/mapreduce.jobhistory.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "kerberos_keytab": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file}}",
+            "kerberos_principal": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-principal}}",
+            "connection_timeout": 5.0
+          },
+          "reporting": {
+            "ok": {
+              "text": "HTTP {0} response in {2:.3f}s"
+            },
+            "warning":{
+              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
+            },
+            "critical": {
+              "text": "Connection failed to {1} ({3})"
+            }
+          }
+        }
+      },
+      {
+        "name": "mapreduce_history_server_cpu",
+        "label": "History Server CPU Utilization",
+        "description": "This host-level alert is triggered if the percent of CPU utilization on the History Server exceeds the configured critical threshold. The threshold values are in percent.",
+        "interval": 5,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{mapred-site/mapreduce.jobhistory.webapp.address}}",
+            "kerberos_keytab": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file}}",
+            "kerberos_principal": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-principal}}",
+            "https": "{{mapred-site/mapreduce.jobhistory.webapp.https.address}}",
+            "https_property": "{{mapred-site/mapreduce.jobhistory.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "connection_timeout": 5.0
+          },
+          "reporting": {
+            "ok": {
+              "text": "{1} CPU, load {0:.1%}"
+            },
+            "warning": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 200
+            },
+            "critical": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 250
+            },
+            "units" : "%",
+            "type": "PERCENT"
+          },
+          "jmx": {
+            "property_list": [
+              "java.lang:type=OperatingSystem/SystemCpuLoad",
+              "java.lang:type=OperatingSystem/AvailableProcessors"
+            ],
+            "value": "{0} * 100"
+          }
+        }
+      },
+      {
+        "name": "mapreduce_history_server_rpc_latency",
+        "label": "History Server RPC Latency",
+        "description": "This host-level alert is triggered if the History Server operations RPC latency exceeds the configured critical threshold. Typically an increase in the RPC processing time increases the RPC queue length, causing the average queue wait time to increase for operations. The threshold values are in milliseconds.",
+        "interval": 5,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{mapred-site/mapreduce.jobhistory.webapp.address}}",
+            "https": "{{mapred-site/mapreduce.jobhistory.webapp.https.address}}",
+            "kerberos_keytab": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file}}",
+            "kerberos_principal": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-principal}}",
+            "https_property": "{{mapred-site/mapreduce.jobhistory.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "connection_timeout": 5.0
+          },
+          "reporting": {
+            "ok": {
+              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]"
+            },
+            "warning": {
+              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
+              "value": 3000
+            },          
+            "critical": {
+              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
+              "value": 5000
+            },
+            "units" : "ms"
+          },
+          "jmx": {
+            "property_list": [
+              "Hadoop:service=JobHistoryServer,name=RpcActivityForPort*/RpcQueueTimeAvgTime",
+              "Hadoop:service=JobHistoryServer,name=RpcActivityForPort*/RpcProcessingTimeAvgTime"
+            ],
+            "value": "{0}"
+          }
+        }
+      },
+      {
+        "name": "mapreduce_history_server_process",
+        "label": "History Server Process",
+        "description": "This host-level alert is triggered if the History Server process cannot be established to be up and listening on the network.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "PORT",
+          "uri": "{{mapred-site/mapreduce.jobhistory.webapp.address}}",
+          "default_port": 19888,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ]
+  },
+  "YARN": {
+    "service": [
+      {
+        "name": "yarn_nodemanager_webui_percent",
+        "label": "Percent NodeManagers Available",
+        "description": "This alert is triggered if the number of down NodeManagers in the cluster is greater than the configured critical threshold. It aggregates the results of NodeManager process checks.",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "AGGREGATE",
+          "alert_name": "yarn_nodemanager_webui",
+          "reporting": {
+            "ok": {
+              "text": "affected: [{1}], total: [{0}]"
+            },
+            "warning": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 10
+            },
+            "critical": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 30
+            },
+            "units" : "%",
+            "type": "PERCENT"
+          }
+        }
+      }
+    ],
+    "NODEMANAGER": [
+      {
+        "name": "yarn_nodemanager_webui",
+        "label": "NodeManager Web UI",
+        "description": "This host-level alert is triggered if the NodeManager Web UI is unreachable.",
+        "interval": 1,
+        "scope": "HOST",
+        "source": {
+          "type": "WEB",
+          "uri": {
+            "http": "{{yarn-site/yarn.nodemanager.webapp.address}}",
+            "https": "{{yarn-site/yarn.nodemanager.webapp.https.address}}",
+            "https_property": "{{yarn-site/yarn.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "default_port": 8042,
+            "kerberos_keytab": "{{yarn-site/yarn.nodemanager.webapp.spnego-keytab-file}}",
+            "kerberos_principal": "{{yarn-site/yarn.nodemanager.webapp.spnego-principal}}",
+            "connection_timeout": 5.0
+          },
+          "reporting": {
+            "ok": {
+              "text": "HTTP {0} response in {2:.3f}s"
+            },
+            "warning":{
+              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
+            },
+            "critical": {
+              "text": "Connection failed to {1} ({3})"
+            }
+          }
+        }
+      },
+      {
+        "name": "yarn_nodemanager_health",
+        "label": "NodeManager Health",
+        "description": "This host-level alert checks the node health property available from the NodeManager component.",
+        "interval": 1,
+        "scope": "HOST",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "YARN/2.1.0.2.0/package/alerts/alert_nodemanager_health.py",
+          "parameters": [
+            {
+              "name": "connection.timeout",
+              "display_name": "Connection Timeout",
+              "value": 5.0,
+              "type": "NUMERIC",
+              "description": "The maximum time before this alert is considered to be CRITICAL",
+              "units": "seconds",
+              "threshold": "CRITICAL"
+            }
+          ]
+        }
+      }
+    ],
+    "RESOURCEMANAGER": [
+      {
+        "name": "yarn_resourcemanager_webui",
+        "label": "ResourceManager Web UI",
+        "description": "This host-level alert is triggered if the ResourceManager Web UI is unreachable.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "WEB",
+          "uri": {
+            "http": "{{yarn-site/yarn.resourcemanager.webapp.address}}",
+            "https": "{{yarn-site/yarn.resourcemanager.webapp.https.address}}",
+            "https_property": "{{yarn-site/yarn.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "kerberos_keytab": "{{yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file}}",
+            "kerberos_principal": "{{yarn-site/yarn.resourcemanager.webapp.spnego-principal}}",
+            "connection_timeout": 5.0,
+            "high_availability": {
+              "alias_key" : "{{yarn-site/yarn.resourcemanager.ha.rm-ids}}",
+              "http_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.address.{{alias}}}}",
+              "https_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.https.address.{{alias}}}}"
+            }
+          },
+          "reporting": {
+            "ok": {
+              "text": "HTTP {0} response in {2:.3f}s"
+            },
+            "warning":{
+              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
+            },
+            "critical": {
+              "text": "Connection failed to {1} ({3})"
+            }
+          }
+        }
+      },
+      {
+        "name": "yarn_resourcemanager_cpu",
+        "label": "ResourceManager CPU Utilization",
+        "description": "This host-level alert is triggered if CPU utilization of the ResourceManager exceeds certain warning and critical thresholds. It checks the ResourceManager JMX Servlet for the SystemCPULoad property. The threshold values are in percent.",
+        "interval": 5,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{yarn-site/yarn.resourcemanager.webapp.address}}",
+            "https": "{{yarn-site/yarn.resourcemanager.webapp.https.address}}",
+            "kerberos_keytab": "{{yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file}}",
+            "kerberos_principal": "{{yarn-site/yarn.resourcemanager.webapp.spnego-principal}}",
+            "https_property": "{{yarn-site/yarn.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "connection_timeout": 5.0,
+            "high_availability": {
+              "alias_key" : "{{yarn-site/yarn.resourcemanager.ha.rm-ids}}",
+              "http_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.address.{{alias}}}}",
+              "https_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.https.address.{{alias}}}}"
+            }
+          },
+          "reporting": {
+            "ok": {
+              "text": "{1} CPU, load {0:.1%}"
+            },
+            "warning": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 200
+            },
+            "critical": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 250
+            },
+            "units" : "%",
+            "type": "PERCENT"
+          },
+          "jmx": {
+            "property_list": [
+              "java.lang:type=OperatingSystem/SystemCpuLoad",
+              "java.lang:type=OperatingSystem/AvailableProcessors"
+            ],
+            "value": "{0} * 100"
+          }
+        }
+      },
+      {
+        "name": "yarn_resourcemanager_rpc_latency",
+        "label": "ResourceManager RPC Latency",
+        "description": "This host-level alert is triggered if the ResourceManager operations RPC latency exceeds the configured critical threshold. Typically an increase in the RPC processing time increases the RPC queue length, causing the average queue wait time to increase for ResourceManager operations. The threshold values are in milliseconds.",
+        "interval": 5,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{yarn-site/yarn.resourcemanager.webapp.address}}",
+            "https": "{{yarn-site/yarn.resourcemanager.webapp.https.address}}",
+            "kerberos_keytab": "{{yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file}}",
+            "kerberos_principal": "{{yarn-site/yarn.resourcemanager.webapp.spnego-principal}}",
+            "https_property": "{{yarn-site/yarn.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "connection_timeout": 5.0,
+            "high_availability": {
+              "alias_key" : "{{yarn-site/yarn.resourcemanager.ha.rm-ids}}",
+              "http_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.address.{{alias}}}}",
+              "https_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.https.address.{{alias}}}}"
+            }
+          },
+          "reporting": {
+            "ok": {
+              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]"
+            },
+            "warning": {
+              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
+              "value": 3000
+            },          
+            "critical": {
+              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
+              "value": 5000
+            },
+            "units" : "ms"
+          },
+          "jmx": {
+            "property_list": [
+              "Hadoop:service=ResourceManager,name=RpcActivityForPort*/RpcQueueTimeAvgTime",
+              "Hadoop:service=ResourceManager,name=RpcActivityForPort*/RpcProcessingTimeAvgTime"
+            ],
+            "value": "{0}"
+          }
+        }
+      },
+      {
+        "name": "nodemanager_health_summary",
+        "label": "NodeManager Health Summary",
+        "description": "This service-level alert is triggered if there are unhealthy NodeManagers",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "YARN/2.1.0.2.0/package/alerts/alert_nodemanagers_summary.py",
+          "parameters": [
+            {
+              "name": "connection.timeout",
+              "display_name": "Connection Timeout",
+              "value": 5.0,
+              "type": "NUMERIC",
+              "description": "The maximum time before this alert is considered to be CRITICAL",
+              "units": "seconds",
+              "threshold": "CRITICAL"
+            }
+          ]
+        }
+      }
+    ],
+    "APP_TIMELINE_SERVER": [
+      {
+        "name": "yarn_app_timeline_server_webui",
+        "label": "App Timeline Web UI",
+        "description": "This host-level alert is triggered if the App Timeline Server Web UI is unreachable.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "WEB",
+          "uri": {
+            "http": "{{yarn-site/yarn.timeline-service.webapp.address}}/ws/v1/timeline",
+            "https": "{{yarn-site/yarn.timeline-service.webapp.https.address}}/ws/v1/timeline",
+            "https_property": "{{yarn-site/yarn.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "kerberos_keytab": "{{yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab}}",
+            "kerberos_principal": "{{yarn-site/yarn.timeline-service.http-authentication.kerberos.principal}}",
+            "connection_timeout": 5.0
+          },
+          "reporting": {
+            "ok": {
+              "text": "HTTP {0} response in {2:.3f}s"
+            },
+            "warning":{
+              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
+            },
+            "critical": {
+              "text": "Connection failed to {1} ({3})"
+            }
+          }
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration-mapred/mapred-env.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration-mapred/mapred-env.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration-mapred/mapred-env.xml
new file mode 100755
index 0000000..fe6d4b9
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration-mapred/mapred-env.xml
@@ -0,0 +1,105 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <property>
+    <name>mapred_log_dir_prefix</name>
+    <value>/var/log/hadoop-mapreduce</value>
+    <display-name>Mapreduce Log Dir Prefix</display-name>
+    <description>Mapreduce Log Dir Prefix</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapred_pid_dir_prefix</name>
+    <value>/var/run/hadoop-mapreduce</value>
+    <display-name>Mapreduce PID Dir Prefix</display-name>
+    <description>Mapreduce PID Dir Prefix</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapred_user</name>
+    <display-name>Mapreduce User</display-name>
+    <value>mapred</value>
+    <property-type>USER</property-type>
+    <description>Mapreduce User</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>jobhistory_heapsize</name>
+    <display-name>History Server heap size</display-name>
+    <value>900</value>
+    <description>Value for JobHistoryServer heap_size variable in hadoop-env.sh</description>
+    <value-attributes>
+      <unit>MB</unit>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapred_user_nofile_limit</name>
+    <value>32768</value>
+    <description>Max open files limit setting for MAPREDUCE user.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapred_user_nproc_limit</name>
+    <value>65536</value>
+    <description>Max number of processes limit setting for MAPREDUCE user.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- mapred-env.sh -->
+  <property>
+    <name>content</name>
+    <display-name>mapred-env template</display-name>
+    <description>This is the jinja template for mapred-env.sh file</description>
+    <value>
+# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+
+export HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}
+
+export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA
+
+#export HADOOP_JOB_HISTORYSERVER_OPTS=
+#export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.
+#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.
+#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.
+#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default
+#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.
+    </value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration-mapred/mapred-site.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration-mapred/mapred-site.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration-mapred/mapred-site.xml
new file mode 100755
index 0000000..434eea0
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration-mapred/mapred-site.xml
@@ -0,0 +1,481 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<!-- Put site-specific property overrides in this file. -->
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
+  <!-- i/o properties -->
+  <property>
+    <name>mapreduce.task.io.sort.mb</name>
+    <value>358</value>
+    <description>
+      The total amount of buffer memory to use while sorting files, in megabytes.
+      By default, gives each merge stream 1MB, which should minimize seeks.
+    </description>
+    <display-name>Sort Allocation Memory</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>2047</maximum>
+      <unit>MB</unit>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>mapred-site</type>
+        <name>mapreduce.map.memory.mb</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.map.sort.spill.percent</name>
+    <value>0.7</value>
+    <description>
+      The soft limit in the serialization buffer. Once reached, a thread will
+      begin to spill the contents to disk in the background. Note that
+      collection will not block if this threshold is exceeded while a spill
+      is already in progress, so spills may be larger than this threshold when
+      it is set to less than .5
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.task.io.sort.factor</name>
+    <value>100</value>
+    <description>
+      The number of streams to merge at once while sorting files.
+      This determines the number of open file handles.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- map/reduce properties -->
+  <property>
+    <name>mapreduce.cluster.administrators</name>
+    <value> hadoop</value>
+    <description>
+      Administrators for MapReduce applications.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.reduce.shuffle.parallelcopies</name>
+    <value>30</value>
+    <description>
+      The default number of parallel transfers run by reduce during
+      the copy(shuffle) phase.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.map.speculative</name>
+    <value>false</value>
+    <description>
+      If true, then multiple instances of some map tasks
+      may be executed in parallel.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.reduce.speculative</name>
+    <value>false</value>
+    <description>
+      If true, then multiple instances of some reduce tasks may be
+      executed in parallel.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.job.reduce.slowstart.completedmaps</name>
+    <value>0.05</value>
+    <description>
+      Fraction of the number of maps in the job which should be complete before
+      reduces are scheduled for the job.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.job.counters.max</name>
+    <value>130</value>
+    <description>
+      Limit on the number of counters allowed per job.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.reduce.shuffle.merge.percent</name>
+    <value>0.66</value>
+    <description>
+      The usage threshold at which an in-memory merge will be
+      initiated, expressed as a percentage of the total memory allocated to
+      storing in-memory map outputs, as defined by
+      mapreduce.reduce.shuffle.input.buffer.percent.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
+    <value>0.7</value>
+    <description>
+      The percentage of memory to be allocated from the maximum heap
+      size to storing map outputs during the shuffle.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.output.fileoutputformat.compress.type</name>
+    <value>BLOCK</value>
+    <description>
+      If the job outputs are to compressed as SequenceFiles, how should
+      they be compressed? Should be one of NONE, RECORD or BLOCK.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.reduce.input.buffer.percent</name>
+    <value>0.0</value>
+    <description>
+      The percentage of memory- relative to the maximum heap size- to
+      retain map outputs during the reduce. When the shuffle is concluded, any
+      remaining map outputs in memory must consume less than this threshold before
+      the reduce can begin.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- copied from kryptonite configuration -->
+  <property>
+    <name>mapreduce.map.output.compress</name>
+    <value>false</value>
+    <description>
+      Should the outputs of the maps be compressed before being sent across the network. Uses SequenceFile compression.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.task.timeout</name>
+    <value>300000</value>
+    <description>
+      The number of milliseconds before a task will be
+      terminated if it neither reads an input, writes an output, nor
+      updates its status string.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.map.memory.mb</name>
+    <value>512</value>
+    <description>Virtual memory for single Map task</description>
+    <display-name>Map Memory</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>512</minimum>
+      <maximum>5120</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.scheduler.maximum-allocation-mb</name>
+      </property>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.scheduler.minimum-allocation-mb</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.reduce.memory.mb</name>
+    <value>1024</value>
+    <description>Virtual memory for single Reduce task</description>
+    <display-name>Reduce Memory</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>512</minimum>
+      <maximum>5120</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.scheduler.maximum-allocation-mb</name>
+      </property>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.scheduler.minimum-allocation-mb</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.shuffle.port</name>
+    <value>13562</value>
+    <description>
+      Default port that the ShuffleHandler will run on.
+      ShuffleHandler is a service run at the NodeManager to facilitate
+      transfers of intermediate Map outputs to requesting Reducers.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.intermediate-done-dir</name>
+    <value>/mr-history/tmp</value>
+    <description>
+      Directory where history files are written by MapReduce jobs.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.done-dir</name>
+    <value>/mr-history/done</value>
+    <description>
+      Directory where history files are managed by the MR JobHistory Server.
+    </description>
+    <property-type>NOT_MANAGED_HDFS_PATH</property-type>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.address</name>
+    <value>localhost:10020</value>
+    <description>Enter your JobHistoryServer hostname.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.webapp.address</name>
+    <value>localhost:19888</value>
+    <description>Enter your JobHistoryServer hostname.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>mapreduce.framework.name</name>
+    <value>yarn</value>
+    <description>
+      The runtime framework for executing MapReduce jobs. Can be one of local,
+      classic or yarn.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.app.mapreduce.am.staging-dir</name>
+    <value>/user</value>
+    <description>
+      The staging dir used while submitting jobs.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.app.mapreduce.am.resource.mb</name>
+    <value>512</value>
+    <description>The amount of memory the MR AppMaster needs.</description>
+    <display-name>AppMaster Memory</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>512</minimum>
+      <maximum>5120</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.scheduler.maximum-allocation-mb</name>
+      </property>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.scheduler.minimum-allocation-mb</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.app.mapreduce.am.command-opts</name>
+    <value>-Xmx410m</value>
+    <description>
+      Java opts for the MR App Master processes.
+      The following symbol, if present, will be interpolated: @taskid@ is replaced
+      by current TaskID. Any other occurrences of '@' will go unchanged.
+      For example, to enable verbose gc logging to a file named for the taskid in
+      /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
+      -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
+
+      Usage of -Djava.library.path can cause programs to no longer function if
+      hadoop native libraries are used. These values should instead be set as part
+      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
+      mapreduce.reduce.env config settings.
+    </description>
+    <display-name>MR AppMaster Java Heap Size</display-name>
+    <depends-on>
+      <property>
+        <type>mapred-site</type>
+        <name>yarn.app.mapreduce.am.resource.mb</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.app.mapreduce.am.admin-command-opts</name>
+    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
+    <description>
+      Java opts for the MR App Master processes for admin purposes.
+      It will appears before the opts set by yarn.app.mapreduce.am.command-opts and
+      thus its options can be overridden user.
+
+      Usage of -Djava.library.path can cause programs to no longer function if
+      hadoop native libraries are used. These values should instead be set as part
+      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
+      mapreduce.reduce.env config settings.
+    </description>
+    <display-name>MR AppMaster Java Heap Size</display-name>
+    <depends-on>
+      <property>
+        <type>mapred-site</type>
+        <name>yarn.app.mapreduce.am.resource.mb</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.app.mapreduce.am.log.level</name>
+    <value>INFO</value>
+    <description>MR App Master process log level.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.admin.map.child.java.opts</name>
+    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
+    <description>This property stores Java options for map tasks.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.admin.reduce.child.java.opts</name>
+    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
+    <description>This property stores Java options for reduce tasks.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.application.classpath</name>
+    <value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*</value>
+    <description>
+      CLASSPATH for MR applications. A comma-separated list of CLASSPATH
+      entries.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.am.max-attempts</name>
+    <value>2</value>
+    <description>
+      The maximum number of application attempts. It is a
+      application-specific setting. It should not be larger than the global number
+      set by resourcemanager. Otherwise, it will be override. The default number is
+      set to 2, to allow at least one retry for AM.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.map.java.opts</name>
+    <value>-Xmx410m</value>
+    <description>
+      Larger heap-size for child jvms of maps.
+    </description>
+    <display-name>MR Map Java Heap Size</display-name>
+    <depends-on>
+      <property>
+        <type>mapred-site</type>
+        <name>mapreduce.map.memory.mb</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.reduce.java.opts</name>
+    <value>-Xmx756m</value>
+    <description>
+      Larger heap-size for child jvms of reduces.
+    </description>
+    <display-name>MR Reduce Java Heap Size</display-name>
+    <depends-on>
+      <property>
+        <type>mapred-site</type>
+        <name>mapreduce.reduce.memory.mb</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.map.log.level</name>
+    <value>INFO</value>
+    <description>
+      The logging level for the map task. The allowed levels are:
+      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.reduce.log.level</name>
+    <value>INFO</value>
+    <description>
+      The logging level for the reduce task. The allowed levels are:
+      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.admin.user.env</name>
+    <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/Linux-amd64-64</value>
+    <description>
+      Additional execution environment entries for map and reduce task processes.
+      This is not an additive property. You must preserve the original value if
+      you want your map and reduce tasks to have access to native libraries (compression, etc)
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.output.fileoutputformat.compress</name>
+    <value>false</value>
+    <description>
+      Should the job outputs be compressed?
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.http.policy</name>
+    <value>HTTP_ONLY</value>
+    <description>
+      This configures the HTTP endpoint for JobHistoryServer web UI.
+      The following values are supported: - HTTP_ONLY : Service is provided only
+      on http - HTTPS_ONLY : Service is provided only on https
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.job.queuename</name>
+    <value>default</value>
+    <description>
+      Queue to which a job is submitted.
+    </description>
+    <depends-on>
+      <property>
+        <type>capacity-scheduler</type>
+        <name>yarn.scheduler.capacity.root.queues</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/capacity-scheduler.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/capacity-scheduler.xml
new file mode 100755
index 0000000..912113b
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/capacity-scheduler.xml
@@ -0,0 +1,130 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <property>
+    <name>yarn.scheduler.capacity.maximum-applications</name>
+    <value>10000</value>
+    <description>
+      Maximum number of applications that can be pending and running.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
+    <value>0.2</value>
+    <description>
+      Maximum percent of resources in the cluster which can be used to run 
+      application masters i.e. controls number of concurrent running
+      applications.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.queues</name>
+    <value>default</value>
+    <description>
+      The queues at the this level (root is the root queue).
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.capacity</name>
+    <value>100</value>
+    <description>
+      The total capacity as a percentage out of 100 for this queue.
+      If it has child queues then this includes their capacity as well.
+      The child queues capacity should add up to their parent queue's capacity
+      or less.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.default.capacity</name>
+    <value>100</value>
+    <description>Default queue target capacity.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
+    <value>1</value>
+    <description>
+      Default queue user limit a percentage from 0.0 to 1.0.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
+    <value>100</value>
+    <description>
+      The maximum capacity of the default queue. 
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.default.state</name>
+    <value>RUNNING</value>
+    <description>
+      The state of the default queue. State can be one of RUNNING or STOPPED.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
+    <value>*</value>
+    <description>
+      The ACL of who can submit jobs to the default queue.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.default.acl_administer_jobs</name>
+    <value>*</value>
+    <description>
+      The ACL of who can administer jobs on the default queue.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.acl_administer_queue</name>
+    <value>*</value>
+    <description>
+      The ACL for who can administer this queue i.e. change sub-queue 
+      allocations.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.node-locality-delay</name>
+    <value>40</value>
+    <description>
+      Number of missed scheduling opportunities after which the CapacityScheduler
+      attempts to schedule rack-local containers.
+      Typically this should be set to number of nodes in the cluster, By default is setting
+      approximately number of nodes in one rack which is 40.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.default.minimum-user-limit-percent</name>
+    <value>100</value>
+    <description>
+      Default minimum queue resource limit depends on the number of users who have submitted applications.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/yarn-env.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/yarn-env.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/yarn-env.xml
new file mode 100755
index 0000000..c3bbcb6
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/yarn-env.xml
@@ -0,0 +1,260 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <property>
+    <name>yarn_log_dir_prefix</name>
+    <value>/var/log/hadoop-yarn</value>
+    <display-name>YARN Log Dir Prefix</display-name>
+    <description>YARN Log Dir Prefix</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn_pid_dir_prefix</name>
+    <value>/var/run/hadoop-yarn</value>
+    <display-name>YARN PID Dir Prefix</display-name>
+    <description>YARN PID Dir Prefix</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn_user</name>
+    <display-name>Yarn User</display-name>
+    <value>yarn</value>
+    <property-type>USER</property-type>
+    <description>YARN User</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn_heapsize</name>
+    <value>1024</value>
+    <display-name>YARN Java heap size</display-name>
+    <description>Max heapsize for all YARN components using a numerical value in the scale of MB</description>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>resourcemanager_heapsize</name>
+    <value>1024</value>
+    <display-name>ResourceManager Java heap size</display-name>
+    <description>Max heapsize for ResourceManager using a numerical value in the scale of MB</description>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>nodemanager_heapsize</name>
+    <value>1024</value>
+    <display-name>NodeManager Java heap size</display-name>
+    <description>Max heapsize for NodeManager using a numerical value in the scale of MB</description>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>min_user_id</name>
+    <value>1000</value>
+    <display-name>Minimum user ID for submitting job</display-name>
+    <description>Set to 0 to disallow root from submitting jobs. Set to 1000 to disallow all superusers from submitting jobs</description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>is_supported_yarn_ranger</name>
+    <value>false</value>
+    <description>Set to false by default,  needs to be set to true in stacks that use Ranger Yarn Plugin</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn_user_nofile_limit</name>
+    <value>32768</value>
+    <description>Max open files limit setting for YARN user.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn_user_nproc_limit</name>
+    <value>65536</value>
+    <description>Max number of processes limit setting for YARN user.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- yarn-env.sh -->
+  <property>
+    <name>content</name>
+    <display-name>yarn-env template</display-name>
+    <description>This is the jinja template for yarn-env.sh file</description>
+    <value>
+export HADOOP_YARN_HOME={{hadoop_yarn_home}}
+export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER
+export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+export JAVA_HOME={{java64_home}}
+export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}"
+
+# User for YARN daemons
+export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
+
+# resolve links - $0 may be a softlink
+export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
+
+# some Java parameters
+# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+if [ "$JAVA_HOME" != "" ]; then
+  #echo "run java in $JAVA_HOME"
+  JAVA_HOME=$JAVA_HOME
+fi
+
+if [ "$JAVA_HOME" = "" ]; then
+  echo "Error: JAVA_HOME is not set."
+  exit 1
+fi
+
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m
+
+# For setting YARN specific HEAP sizes please use this
+# Parameter and set appropriately
+YARN_HEAPSIZE={{yarn_heapsize}}
+
+# check envvars which might override default args
+if [ "$YARN_HEAPSIZE" != "" ]; then
+  JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
+fi
+
+# Resource Manager specific parameters
+
+# Specify the max Heapsize for the ResourceManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_RESOURCEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
+
+# Specify the JVM options to be used when starting the ResourceManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+#export YARN_RESOURCEMANAGER_OPTS=
+
+# Node Manager specific parameters
+
+# Specify the max Heapsize for the NodeManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_NODEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
+
+# Specify the max Heapsize for the HistoryManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1024.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_HISTORYSERVER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}
+
+# Specify the JVM options to be used when starting the NodeManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+#export YARN_NODEMANAGER_OPTS=
+
+# so that filenames w/ spaces are handled correctly in loops below
+IFS=
+
+
+# default log directory and file
+if [ "$YARN_LOG_DIR" = "" ]; then
+  YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
+fi
+if [ "$YARN_LOGFILE" = "" ]; then
+  YARN_LOGFILE='yarn.log'
+fi
+
+# default policy file for service-level authorization
+if [ "$YARN_POLICYFILE" = "" ]; then
+  YARN_POLICYFILE="hadoop-policy.xml"
+fi
+
+# restore ordinary behaviour
+unset IFS
+
+
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
+YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
+YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+export YARN_NODEMANAGER_OPTS="$YARN_NODEMANAGER_OPTS -Dnm.audit.logger=INFO,NMAUDIT"
+export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS -Drm.audit.logger=INFO,RMAUDIT"
+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+  YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+fi
+YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
+YARN_OPTS="$YARN_OPTS -Djava.io.tmpdir={{hadoop_java_io_tmpdir}}"
+    </value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>service_check.queue.name</name>
+    <value>default</value>
+    <description>
+      The queue that used by service check.
+    </description>
+    <depends-on>
+      <property>
+        <type>capacity-scheduler</type>
+        <name>yarn.scheduler.capacity.root.queues</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/yarn-log4j.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/yarn-log4j.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/yarn-log4j.xml
new file mode 100755
index 0000000..89dd52d
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/yarn-log4j.xml
@@ -0,0 +1,94 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <property>
+    <name>content</name>
+    <display-name>yarn-log4j template</display-name>
+    <description>Custom log4j.properties</description>
+    <value>
+#Relative to Yarn Log Dir Prefix
+yarn.log.dir=.
+#
+# Job Summary Appender
+#
+# Use following logger to send summary to separate file defined by
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+#
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
+# Set the ResourceManager summary log filename
+yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log
+# Set the ResourceManager summary log level and appender
+yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
+#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
+
+# To enable AppSummaryLogging for the RM,
+# set yarn.server.resourcemanager.appsummary.logger to
+# LEVEL,RMSUMMARY in hadoop-env.sh
+
+# Appender for ResourceManager Application Summary Log
+# Requires the following properties to be set
+#    - hadoop.log.dir (Hadoop Log directory)
+#    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
+#    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
+log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
+log4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
+log4j.appender.RMSUMMARY.MaxFileSize=256MB
+log4j.appender.RMSUMMARY.MaxBackupIndex=20
+log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
+log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+log4j.appender.JSA.DatePattern=.yyyy-MM-dd
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
+log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
+
+# Audit logging for ResourceManager
+rm.audit.logger=${hadoop.root.logger}
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=${rm.audit.logger}
+log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=false
+log4j.appender.RMAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.RMAUDIT.File=${yarn.log.dir}/rm-audit.log
+log4j.appender.RMAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.RMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.RMAUDIT.DatePattern=.yyyy-MM-dd
+
+# Audit logging for NodeManager
+nm.audit.logger=${hadoop.root.logger}
+log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=${nm.audit.logger}
+log4j.additivity.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=false
+log4j.appender.NMAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.NMAUDIT.File=${yarn.log.dir}/nm-audit.log
+log4j.appender.NMAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.NMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.NMAUDIT.DatePattern=.yyyy-MM-dd
+    </value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>


[25/52] bigtop git commit: Swithing ODPi stack repos to trunk

Posted by rv...@apache.org.
Swithing ODPi stack repos to trunk

(cherry picked from commit 04b120c22512a21716fe0d1baa9cfd497eb0d8e1)


Project: http://git-wip-us.apache.org/repos/asf/bigtop/repo
Commit: http://git-wip-us.apache.org/repos/asf/bigtop/commit/aab5273f
Tree: http://git-wip-us.apache.org/repos/asf/bigtop/tree/aab5273f
Diff: http://git-wip-us.apache.org/repos/asf/bigtop/diff/aab5273f

Branch: refs/heads/master
Commit: aab5273f960f261bcc9f1ddaba233275ac48bbbb
Parents: b6475d7
Author: Roman Shaposhnik <rv...@apache.org>
Authored: Wed Oct 26 17:48:52 2016 -0700
Committer: Roman Shaposhnik <rv...@apache.org>
Committed: Tue Mar 21 23:08:09 2017 -0700

----------------------------------------------------------------------
 .../src/common/ambari/ODPi/1.0/repos/repoinfo.xml            | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/bigtop/blob/aab5273f/bigtop-packages/src/common/ambari/ODPi/1.0/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/repos/repoinfo.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/repos/repoinfo.xml
index ab4f25f..60eae65 100755
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/repos/repoinfo.xml
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/repos/repoinfo.xml
@@ -18,15 +18,15 @@
 <reposinfo>
   <os family="redhat6">
     <repo>
-      <baseurl>http://repo.odpi.org/ODPi/1.0/centos-6/</baseurl>
-      <repoid>ODPi-1.0</repoid>
+      <baseurl>http://repo.odpi.org/ODPi/trunk/centos-6/</baseurl>
+      <repoid>ODPi-trunk</repoid>
       <reponame>ODPi</reponame>
     </repo>
   </os>
   <os family="ubuntu14">
     <repo>
-      <baseurl>http://repo.odpi.org/ODPi/1.0/ubuntu-14.04/apt</baseurl>
-      <repoid>ODPi-1.0</repoid>
+      <baseurl>http://repo.odpi.org/ODPi/trunk/ubuntu-14.04/apt</baseurl>
+      <repoid>ODPi-trunk</repoid>
       <reponame>odpi</reponame>
     </repo>
   </os>


[48/52] bigtop git commit: BIGTOP-1406. package Ambari in Bigtop

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hivemetastore-site.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hivemetastore-site.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hivemetastore-site.xml
deleted file mode 100755
index 75be7f2..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hivemetastore-site.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<configuration><property require-input="false">
-    <name>hive.metastore.metrics.enabled</name>
-    <value>true</value>
-    <filename>hivemetastore-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>boolean</type>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.service.metrics.reporter</name>
-    <value>JSON_FILE, JMX, HADOOP2</value>
-    <filename>hivemetastore-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.service.metrics.hadoop2.component</name>
-    <value>hivemetastore</value>
-    <filename>hivemetastore-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.service.metrics.file.location</name>
-    <value>/var/log/hive/hivemetastore-report.json</value>
-    <filename>hivemetastore-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hiveserver2-site.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hiveserver2-site.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hiveserver2-site.xml
deleted file mode 100755
index e78f176..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hiveserver2-site.xml
+++ /dev/null
@@ -1,122 +0,0 @@
-<configuration><property require-input="false">
-    <name>hive.metastore.metrics.enabled</name>
-    <value>true</value>
-    <filename>hiveserver2-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>boolean</type>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.service.metrics.reporter</name>
-    <value>JSON_FILE, JMX, HADOOP2</value>
-    <filename>hiveserver2-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.service.metrics.hadoop2.component</name>
-    <value>hiveserver2</value>
-    <filename>hiveserver2-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.service.metrics.file.location</name>
-    <value>/var/log/hive/hiveserver2-report.json</value>
-    <filename>hiveserver2-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.security.authenticator.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator</value>
-    <description>Hive client authenticator manager class name. The user-defined authenticator class should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.  </description>
-    <filename>hiveserver2-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on>
-        <property>
-            <name>hive_security_authorization</name>
-            <type>hive-env</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.security.authorization.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory</value>
-    <description>the hive client authorization manager class name.
-    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
-    <filename>hiveserver2-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on>
-        <property>
-            <name>hive_security_authorization</name>
-            <type>hive-env</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.security.authorization.enabled</name>
-    <value>false</value>
-    <description>enable or disable the Hive client authorization</description>
-    <display-name>Enable Authorization</display-name>
-    <filename>hiveserver2-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>value-list</type>
-        <entries>
-            <entry>
-                <value>true</value>
-                <label>True</label>
-            </entry>
-            <entry>
-                <value>false</value>
-                <label>False</label>
-            </entry>
-        </entries>
-        <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on>
-        <property>
-            <name>hive_security_authorization</name>
-            <type>hive-env</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.conf.restricted.list</name>
-    <value>hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role</value>
-    <description></description>
-    <filename>hiveserver2-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on>
-        <property>
-            <name>hive_security_authorization</name>
-            <type>hive-env</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/webhcat-env.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/webhcat-env.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/webhcat-env.xml
deleted file mode 100755
index fa62c78..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/webhcat-env.xml
+++ /dev/null
@@ -1,38 +0,0 @@
-<configuration><property require-input="false">
-    <name>content</name>
-    <value>
-# The file containing the running pid
-PID_FILE={{webhcat_pid_file}}
-
-TEMPLETON_LOG_DIR={{templeton_log_dir}}/
-
-
-WEBHCAT_LOG_DIR={{templeton_log_dir}}/
-
-# The console error log
-ERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log
-
-# The console log
-CONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log
-
-#TEMPLETON_JAR=templeton_jar_name
-
-#HADOOP_PREFIX=hadoop_prefix
-
-#HCAT_PREFIX=hive_prefix
-
-# Set HADOOP_HOME to point to a specific hadoop install directory
-export HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-    </value>
-    <description>webhcat-env.sh content</description>
-    <display-name>webhcat-env template</display-name>
-    <filename>webhcat-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>content</type>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/webhcat-log4j.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/webhcat-log4j.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/webhcat-log4j.xml
deleted file mode 100755
index 541b1c9..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/webhcat-log4j.xml
+++ /dev/null
@@ -1,63 +0,0 @@
-<configuration><property require-input="false">
-    <name>content</name>
-    <value>
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-# Define some default values that can be overridden by system properties
-webhcat.root.logger = INFO, standard
-webhcat.log.dir = .
-webhcat.log.file = webhcat.log
-
-log4j.rootLogger = ${webhcat.root.logger}
-
-# Logging Threshold
-log4j.threshhold = DEBUG
-
-log4j.appender.standard  =  org.apache.log4j.DailyRollingFileAppender
-log4j.appender.standard.File = ${webhcat.log.dir}/${webhcat.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern = .yyyy-MM-dd
-
-log4j.appender.DRFA.layout = org.apache.log4j.PatternLayout
-
-log4j.appender.standard.layout = org.apache.log4j.PatternLayout
-log4j.appender.standard.layout.conversionPattern = %-5p | %d{DATE} | %c | %m%n
-
-# Class logging settings
-log4j.logger.com.sun.jersey = DEBUG
-log4j.logger.com.sun.jersey.spi.container.servlet.WebComponent = ERROR
-log4j.logger.org.apache.hadoop = INFO
-log4j.logger.org.apache.hadoop.conf = WARN
-log4j.logger.org.apache.zookeeper = WARN
-log4j.logger.org.eclipse.jetty = INFO
-
-    </value>
-    <description>Custom webhcat-log4j.properties</description>
-    <display-name>webhcat-log4j template</display-name>
-    <filename>webhcat-log4j.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>content</type>
-        <show-property-name>false</show-property-name>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/webhcat-site.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/webhcat-site.xml
deleted file mode 100755
index 680ddc8..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/webhcat-site.xml
+++ /dev/null
@@ -1,287 +0,0 @@
-<configuration><property require-input="false">
-    <name>templeton.libjars</name>
-    <value>/usr/hdp/${hdp.version}/zookeeper/zookeeper.jar,/usr/hdp/${hdp.version}/hive/lib/hive-common.jar</value>
-    <description>Jars to add the the classpath.</description>
-    <filename>webhcat-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>templeton.hive.extra.files</name>
-    <value>/usr/hdp/${hdp.version}/tez/conf/tez-site.xml,/usr/hdp/${hdp.version}/tez,/usr/hdp/${hdp.version}/tez/lib</value>
-    <description>The resources in this list will be localized to the node running LaunchMapper and added to HADOOP_CLASSPTH
-      before launching 'hive' command.  If the path /foo/bar is a directory, the contents of the the entire dir will be localized
-      and ./foo/* will be added to HADOOP_CLASSPATH.  Note that since classpath path processing does not recurse into subdirectories,
-      the paths in this property may be overlapping.  In the example above, "./tez-site.xml:./tez-client/*:./lib/*" will be added to
-      HADOOP_CLASSPATH.
-      This can be used to specify config files, Tez artifacts, etc.  This will be sent -files option of hadoop jar command thus
-      each path is interpreted by Generic Option Parser.  It can be local or hdfs path.
-    </description>
-    <filename>webhcat-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>templeton.jar</name>
-    <value>/usr/hdp/${hdp.version}/hive/share/webhcat/svr/lib/hive-webhcat-*.jar</value>
-    <description>The path to the Templeton jar file.</description>
-    <filename>webhcat-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>templeton.hadoop</name>
-    <value>/usr/hdp/${hdp.version}/hadoop/bin/hadoop</value>
-    <description>The path to the Hadoop executable.</description>
-    <filename>webhcat-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>templeton.pig.archive</name>
-    <value>hdfs:///hdp/apps/${hdp.version}/pig/pig.tar.gz</value>
-    <description>The path to the Pig archive in HDFS.</description>
-    <filename>webhcat-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>templeton.pig.path</name>
-    <value>pig.tar.gz/pig/bin/pig</value>
-    <description>The path to the Pig executable.</description>
-    <filename>webhcat-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>templeton.hcat</name>
-    <value>/usr/hdp/${hdp.version}/hive/bin/hcat</value>
-    <description>The path to the hcatalog executable.</description>
-    <filename>webhcat-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>templeton.hive.archive</name>
-    <value>hdfs:///hdp/apps/${hdp.version}/hive/hive.tar.gz</value>
-    <description>The path to the Hive archive.</description>
-    <filename>webhcat-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>templeton.hive.home</name>
-    <value>hive.tar.gz/hive</value>
-    <description>The path to the Hive home within the tar. Has no effect if templeton.hive.archive is not set.</description>
-    <filename>webhcat-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>templeton.hcat.home</name>
-    <value>hive.tar.gz/hive/hcatalog</value>
-    <description>The path to the HCat home within the tar. Has no effect if templeton.hive.archive is not set.</description>
-    <filename>webhcat-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>templeton.hive.path</name>
-    <value>hive.tar.gz/hive/bin/hive</value>
-    <description>The path to the Hive executable.</description>
-    <filename>webhcat-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>templeton.sqoop.archive</name>
-    <value>hdfs:///hdp/apps/${hdp.version}/sqoop/sqoop.tar.gz</value>
-    <description>The path to the Sqoop archive in HDFS.</description>
-    <filename>webhcat-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>templeton.sqoop.path</name>
-    <value>sqoop.tar.gz/sqoop/bin/sqoop</value>
-    <description>The path to the Sqoop executable.</description>
-    <filename>webhcat-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>templeton.sqoop.home</name>
-    <value>sqoop.tar.gz/sqoop</value>
-    <description>The path to the Sqoop home within the tar. Has no effect if
-      templeton.sqoop.archive is not set.
-    </description>
-    <filename>webhcat-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>templeton.streaming.jar</name>
-    <value>hdfs:///hdp/apps/${hdp.version}/mapreduce/hadoop-streaming.jar</value>
-    <description>The hdfs path to the Hadoop streaming jar file.</description>
-    <filename>webhcat-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>templeton.port</name>
-    <value>50111</value>
-    <description>The HTTP port for the main server.</description>
-    <filename>webhcat-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>templeton.hadoop.conf.dir</name>
-    <value>/etc/hadoop/conf</value>
-    <description>The path to the Hadoop configuration.</description>
-    <filename>webhcat-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>templeton.python</name>
-    <value>${env.PYTHON_CMD}</value>
-    <description>The path to the Python executable.</description>
-    <filename>webhcat-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>templeton.hive.properties</name>
-    <value>hive.metastore.local=false,hive.metastore.uris=thrift://localhost:9083,hive.metastore.sasl.enabled=false</value>
-    <description>Properties to set when running hive.</description>
-    <filename>webhcat-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>templeton.zookeeper.hosts</name>
-    <value>localhost:2181</value>
-    <description>ZooKeeper servers, as comma separated host:port pairs</description>
-    <filename>webhcat-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>multiLine</type>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>templeton.storage.class</name>
-    <value>org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage</value>
-    <description>The class to use as storage</description>
-    <filename>webhcat-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>templeton.override.enabled</name>
-    <value>false</value>
-    <description>Enable the override path in templeton.override.jars</description>
-    <filename>webhcat-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>templeton.exec.timeout</name>
-    <value>60000</value>
-    <description>Time out for templeton api</description>
-    <filename>webhcat-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>templeton.hadoop.queue.name</name>
-    <value>default</value>
-    <description>
-      MapReduce queue name where WebHCat map-only jobs will be submitted to. Can be used to avoid a deadlock where all map slots in the cluster are taken over by Templeton launcher tasks.
-    </description>
-    <filename>webhcat-site.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="false" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on>
-        <property>
-            <name>yarn.scheduler.capacity.root.queues</name>
-            <type>capacity-scheduler</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.12.0.mysql.sql
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.12.0.mysql.sql b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.12.0.mysql.sql
deleted file mode 100755
index b0415b1..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.12.0.mysql.sql
+++ /dev/null
@@ -1,777 +0,0 @@
--- MySQL dump 10.13  Distrib 5.5.25, for osx10.6 (i386)
---
--- Host: localhost    Database: test
--- ------------------------------------------------------
--- Server version	5.5.25
-
-/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
-/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
-/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
-/*!40101 SET NAMES utf8 */;
-/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
-/*!40103 SET TIME_ZONE='+00:00' */;
-/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
-/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
-/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
-/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
-
---
--- Table structure for table `BUCKETING_COLS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` (
-  `SD_ID` bigint(20) NOT NULL,
-  `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
-  KEY `BUCKETING_COLS_N49` (`SD_ID`),
-  CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `CDS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `CDS` (
-  `CD_ID` bigint(20) NOT NULL,
-  PRIMARY KEY (`CD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `COLUMNS_V2`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
-  `CD_ID` bigint(20) NOT NULL,
-  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `TYPE_NAME` varchar(4000) DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
-  KEY `COLUMNS_V2_N49` (`CD_ID`),
-  CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `DATABASE_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` (
-  `DB_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`DB_ID`,`PARAM_KEY`),
-  KEY `DATABASE_PARAMS_N49` (`DB_ID`),
-  CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `DBS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `DBS` (
-  `DB_ID` bigint(20) NOT NULL,
-  `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`DB_ID`),
-  UNIQUE KEY `UNIQUE_DATABASE` (`NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `DB_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
-  `DB_GRANT_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `DB_ID` bigint(20) DEFAULT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `DB_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`DB_GRANT_ID`),
-  UNIQUE KEY `DBPRIVILEGEINDEX` (`DB_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DB_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  KEY `DB_PRIVS_N49` (`DB_ID`),
-  CONSTRAINT `DB_PRIVS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `GLOBAL_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` (
-  `USER_GRANT_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `USER_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`USER_GRANT_ID`),
-  UNIQUE KEY `GLOBALPRIVILEGEINDEX` (`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`USER_PRIV`,`GRANTOR`,`GRANTOR_TYPE`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `IDXS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `IDXS` (
-  `INDEX_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `DEFERRED_REBUILD` bit(1) NOT NULL,
-  `INDEX_HANDLER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INDEX_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INDEX_TBL_ID` bigint(20) DEFAULT NULL,
-  `LAST_ACCESS_TIME` int(11) NOT NULL,
-  `ORIG_TBL_ID` bigint(20) DEFAULT NULL,
-  `SD_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`INDEX_ID`),
-  UNIQUE KEY `UNIQUEINDEX` (`INDEX_NAME`,`ORIG_TBL_ID`),
-  KEY `IDXS_N51` (`SD_ID`),
-  KEY `IDXS_N50` (`INDEX_TBL_ID`),
-  KEY `IDXS_N49` (`ORIG_TBL_ID`),
-  CONSTRAINT `IDXS_FK1` FOREIGN KEY (`ORIG_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
-  CONSTRAINT `IDXS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
-  CONSTRAINT `IDXS_FK3` FOREIGN KEY (`INDEX_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `INDEX_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `INDEX_PARAMS` (
-  `INDEX_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`),
-  KEY `INDEX_PARAMS_N49` (`INDEX_ID`),
-  CONSTRAINT `INDEX_PARAMS_FK1` FOREIGN KEY (`INDEX_ID`) REFERENCES `IDXS` (`INDEX_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `NUCLEUS_TABLES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `NUCLEUS_TABLES` (
-  `CLASS_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `TYPE` varchar(4) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `OWNER` varchar(2) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `VERSION` varchar(20) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `INTERFACE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`CLASS_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITIONS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITIONS` (
-  `PART_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `LAST_ACCESS_TIME` int(11) NOT NULL,
-  `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `SD_ID` bigint(20) DEFAULT NULL,
-  `TBL_ID` bigint(20) DEFAULT NULL,
-  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`PART_ID`),
-  UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
-  KEY `PARTITIONS_N49` (`TBL_ID`),
-  KEY `PARTITIONS_N50` (`SD_ID`),
-  KEY `PARTITIONS_N51` (`LINK_TARGET_ID`),
-  CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
-  CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
-  CONSTRAINT `PARTITIONS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITION_EVENTS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` (
-  `PART_NAME_ID` bigint(20) NOT NULL,
-  `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `EVENT_TIME` bigint(20) NOT NULL,
-  `EVENT_TYPE` int(11) NOT NULL,
-  `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`PART_NAME_ID`),
-  KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITION_KEYS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` (
-  `TBL_ID` bigint(20) NOT NULL,
-  `PKEY_COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PKEY_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PKEY_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`TBL_ID`,`PKEY_NAME`),
-  KEY `PARTITION_KEYS_N49` (`TBL_ID`),
-  CONSTRAINT `PARTITION_KEYS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITION_KEY_VALS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` (
-  `PART_ID` bigint(20) NOT NULL,
-  `PART_KEY_VAL` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`PART_ID`,`INTEGER_IDX`),
-  KEY `PARTITION_KEY_VALS_N49` (`PART_ID`),
-  CONSTRAINT `PARTITION_KEY_VALS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITION_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` (
-  `PART_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`PART_ID`,`PARAM_KEY`),
-  KEY `PARTITION_PARAMS_N49` (`PART_ID`),
-  CONSTRAINT `PARTITION_PARAMS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PART_COL_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` (
-  `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL,
-  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PART_ID` bigint(20) DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PART_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`PART_COLUMN_GRANT_ID`),
-  KEY `PART_COL_PRIVS_N49` (`PART_ID`),
-  KEY `PARTITIONCOLUMNPRIVILEGEINDEX` (`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  CONSTRAINT `PART_COL_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PART_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PART_PRIVS` (
-  `PART_GRANT_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PART_ID` bigint(20) DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PART_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`PART_GRANT_ID`),
-  KEY `PARTPRIVILEGEINDEX` (`PART_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  KEY `PART_PRIVS_N49` (`PART_ID`),
-  CONSTRAINT `PART_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `ROLES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `ROLES` (
-  `ROLE_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `ROLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`ROLE_ID`),
-  UNIQUE KEY `ROLEENTITYINDEX` (`ROLE_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `ROLE_MAP`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `ROLE_MAP` (
-  `ROLE_GRANT_ID` bigint(20) NOT NULL,
-  `ADD_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `ROLE_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`ROLE_GRANT_ID`),
-  UNIQUE KEY `USERROLEMAPINDEX` (`PRINCIPAL_NAME`,`ROLE_ID`,`GRANTOR`,`GRANTOR_TYPE`),
-  KEY `ROLE_MAP_N49` (`ROLE_ID`),
-  CONSTRAINT `ROLE_MAP_FK1` FOREIGN KEY (`ROLE_ID`) REFERENCES `ROLES` (`ROLE_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SDS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SDS` (
-  `SD_ID` bigint(20) NOT NULL,
-  `CD_ID` bigint(20) DEFAULT NULL,
-  `INPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `IS_COMPRESSED` bit(1) NOT NULL,
-  `IS_STOREDASSUBDIRECTORIES` bit(1) NOT NULL,
-  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `NUM_BUCKETS` int(11) NOT NULL,
-  `OUTPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `SERDE_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`SD_ID`),
-  KEY `SDS_N49` (`SERDE_ID`),
-  KEY `SDS_N50` (`CD_ID`),
-  CONSTRAINT `SDS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
-  CONSTRAINT `SDS_FK2` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SD_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SD_PARAMS` (
-  `SD_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`SD_ID`,`PARAM_KEY`),
-  KEY `SD_PARAMS_N49` (`SD_ID`),
-  CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SEQUENCE_TABLE`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` (
-  `SEQUENCE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `NEXT_VAL` bigint(20) NOT NULL,
-  PRIMARY KEY (`SEQUENCE_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SERDES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SERDES` (
-  `SERDE_ID` bigint(20) NOT NULL,
-  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`SERDE_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SERDE_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` (
-  `SERDE_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`),
-  KEY `SERDE_PARAMS_N49` (`SERDE_ID`),
-  CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_COL_NAMES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` (
-  `SD_ID` bigint(20) NOT NULL,
-  `SKEWED_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
-  KEY `SKEWED_COL_NAMES_N49` (`SD_ID`),
-  CONSTRAINT `SKEWED_COL_NAMES_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_COL_VALUE_LOC_MAP`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` (
-  `SD_ID` bigint(20) NOT NULL,
-  `STRING_LIST_ID_KID` bigint(20) NOT NULL,
-  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`),
-  KEY `SKEWED_COL_VALUE_LOC_MAP_N49` (`STRING_LIST_ID_KID`),
-  KEY `SKEWED_COL_VALUE_LOC_MAP_N50` (`SD_ID`),
-  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK2` FOREIGN KEY (`STRING_LIST_ID_KID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
-  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_STRING_LIST`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` (
-  `STRING_LIST_ID` bigint(20) NOT NULL,
-  PRIMARY KEY (`STRING_LIST_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_STRING_LIST_VALUES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` (
-  `STRING_LIST_ID` bigint(20) NOT NULL,
-  `STRING_LIST_VALUE` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`),
-  KEY `SKEWED_STRING_LIST_VALUES_N49` (`STRING_LIST_ID`),
-  CONSTRAINT `SKEWED_STRING_LIST_VALUES_FK1` FOREIGN KEY (`STRING_LIST_ID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_VALUES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` (
-  `SD_ID_OID` bigint(20) NOT NULL,
-  `STRING_LIST_ID_EID` bigint(20) NOT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`),
-  KEY `SKEWED_VALUES_N50` (`SD_ID_OID`),
-  KEY `SKEWED_VALUES_N49` (`STRING_LIST_ID_EID`),
-  CONSTRAINT `SKEWED_VALUES_FK2` FOREIGN KEY (`STRING_LIST_ID_EID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
-  CONSTRAINT `SKEWED_VALUES_FK1` FOREIGN KEY (`SD_ID_OID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SORT_COLS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SORT_COLS` (
-  `SD_ID` bigint(20) NOT NULL,
-  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `ORDER` int(11) NOT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
-  KEY `SORT_COLS_N49` (`SD_ID`),
-  CONSTRAINT `SORT_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TABLE_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` (
-  `TBL_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`TBL_ID`,`PARAM_KEY`),
-  KEY `TABLE_PARAMS_N49` (`TBL_ID`),
-  CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TBLS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TBLS` (
-  `TBL_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `DB_ID` bigint(20) DEFAULT NULL,
-  `LAST_ACCESS_TIME` int(11) NOT NULL,
-  `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `RETENTION` int(11) NOT NULL,
-  `SD_ID` bigint(20) DEFAULT NULL,
-  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `VIEW_EXPANDED_TEXT` mediumtext,
-  `VIEW_ORIGINAL_TEXT` mediumtext,
-  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`TBL_ID`),
-  UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
-  KEY `TBLS_N50` (`SD_ID`),
-  KEY `TBLS_N49` (`DB_ID`),
-  KEY `TBLS_N51` (`LINK_TARGET_ID`),
-  CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
-  CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`),
-  CONSTRAINT `TBLS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TBL_COL_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` (
-  `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL,
-  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`TBL_COLUMN_GRANT_ID`),
-  KEY `TABLECOLUMNPRIVILEGEINDEX` (`TBL_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  KEY `TBL_COL_PRIVS_N49` (`TBL_ID`),
-  CONSTRAINT `TBL_COL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TBL_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TBL_PRIVS` (
-  `TBL_GRANT_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`TBL_GRANT_ID`),
-  KEY `TBL_PRIVS_N49` (`TBL_ID`),
-  KEY `TABLEPRIVILEGEINDEX` (`TBL_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  CONSTRAINT `TBL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TAB_COL_STATS`
---
-CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
- `CS_ID` bigint(20) NOT NULL,
- `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `TBL_ID` bigint(20) NOT NULL,
- `LONG_LOW_VALUE` bigint(20),
- `LONG_HIGH_VALUE` bigint(20),
- `DOUBLE_HIGH_VALUE` double(53,4),
- `DOUBLE_LOW_VALUE` double(53,4),
- `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
- `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
- `NUM_NULLS` bigint(20) NOT NULL,
- `NUM_DISTINCTS` bigint(20),
- `AVG_COL_LEN` double(53,4),
- `MAX_COL_LEN` bigint(20),
- `NUM_TRUES` bigint(20),
- `NUM_FALSES` bigint(20),
- `LAST_ANALYZED` bigint(20) NOT NULL,
-  PRIMARY KEY (`CS_ID`),
-  CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
---
--- Table structure for table `PART_COL_STATS`
---
-CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
- `CS_ID` bigint(20) NOT NULL,
- `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `PART_ID` bigint(20) NOT NULL,
- `LONG_LOW_VALUE` bigint(20),
- `LONG_HIGH_VALUE` bigint(20),
- `DOUBLE_HIGH_VALUE` double(53,4),
- `DOUBLE_LOW_VALUE` double(53,4),
- `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
- `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
- `NUM_NULLS` bigint(20) NOT NULL,
- `NUM_DISTINCTS` bigint(20),
- `AVG_COL_LEN` double(53,4),
- `MAX_COL_LEN` bigint(20),
- `NUM_TRUES` bigint(20),
- `NUM_FALSES` bigint(20),
- `LAST_ANALYZED` bigint(20) NOT NULL,
-  PRIMARY KEY (`CS_ID`),
-  CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
---
--- Table structure for table `TYPES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TYPES` (
-  `TYPES_ID` bigint(20) NOT NULL,
-  `TYPE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TYPE1` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TYPE2` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`TYPES_ID`),
-  UNIQUE KEY `UNIQUE_TYPE` (`TYPE_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TYPE_FIELDS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TYPE_FIELDS` (
-  `TYPE_NAME` bigint(20) NOT NULL,
-  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `FIELD_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `FIELD_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`TYPE_NAME`,`FIELD_NAME`),
-  KEY `TYPE_FIELDS_N49` (`TYPE_NAME`),
-  CONSTRAINT `TYPE_FIELDS_FK1` FOREIGN KEY (`TYPE_NAME`) REFERENCES `TYPES` (`TYPES_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
--- Table `MASTER_KEYS` for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
-CREATE TABLE IF NOT EXISTS `MASTER_KEYS` 
-(
-    `KEY_ID` INTEGER NOT NULL AUTO_INCREMENT,
-    `MASTER_KEY` VARCHAR(767) BINARY NULL,
-    PRIMARY KEY (`KEY_ID`)
-) ENGINE=INNODB DEFAULT CHARSET=latin1;
-
--- Table `DELEGATION_TOKENS` for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
-CREATE TABLE IF NOT EXISTS `DELEGATION_TOKENS`
-(
-    `TOKEN_IDENT` VARCHAR(767) BINARY NOT NULL,
-    `TOKEN` VARCHAR(767) BINARY NULL,
-    PRIMARY KEY (`TOKEN_IDENT`)
-) ENGINE=INNODB DEFAULT CHARSET=latin1;
-
---
--- Table structure for VERSION
---
-CREATE TABLE IF NOT EXISTS `VERSION` (
-  `VER_ID` BIGINT NOT NULL,
-  `SCHEMA_VERSION` VARCHAR(127) NOT NULL,
-  `VERSION_COMMENT` VARCHAR(255),
-  PRIMARY KEY (`VER_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.12.0', 'Hive release version 0.12.0');
-
-/*!40101 SET character_set_client = @saved_cs_client */;
-/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
-
-/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
-/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
-/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
-/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
-/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
-/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
-/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-
--- Dump completed on 2012-08-23  0:56:31

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.12.0.oracle.sql
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.12.0.oracle.sql b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.12.0.oracle.sql
deleted file mode 100755
index 812b897..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.12.0.oracle.sql
+++ /dev/null
@@ -1,718 +0,0 @@
--- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
--- NOTE: Some versions of SchemaTool do not automatically generate this table.
--- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
-CREATE TABLE SEQUENCE_TABLE
-(
-   SEQUENCE_NAME VARCHAR2(255) NOT NULL,
-   NEXT_VAL NUMBER NOT NULL
-);
-
-ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY (SEQUENCE_NAME);
-
--- Table NUCLEUS_TABLES is an internal table required by DataNucleus.
--- This table is required if datanucleus.autoStartMechanism=SchemaTable
--- NOTE: Some versions of SchemaTool do not automatically generate this table.
--- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
-CREATE TABLE NUCLEUS_TABLES
-(
-   CLASS_NAME VARCHAR2(128) NOT NULL,
-   TABLE_NAME VARCHAR2(128) NOT NULL,
-   TYPE VARCHAR2(4) NOT NULL,
-   OWNER VARCHAR2(2) NOT NULL,
-   VERSION VARCHAR2(20) NOT NULL,
-   INTERFACE_NAME VARCHAR2(255) NULL
-);
-
-ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY (CLASS_NAME);
-
--- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
-CREATE TABLE PART_COL_PRIVS
-(
-    PART_COLUMN_GRANT_ID NUMBER NOT NULL,
-    "COLUMN_NAME" VARCHAR2(128) NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PART_ID NUMBER NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    PART_COL_PRIV VARCHAR2(128) NULL
-);
-
-ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
-
--- Table CDS.
-CREATE TABLE CDS
-(
-    CD_ID NUMBER NOT NULL
-);
-
-ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
-
--- Table COLUMNS_V2 for join relationship
-CREATE TABLE COLUMNS_V2
-(
-    CD_ID NUMBER NOT NULL,
-    "COMMENT" VARCHAR2(256) NULL,
-    "COLUMN_NAME" VARCHAR2(128) NOT NULL,
-    TYPE_NAME VARCHAR2(4000) NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
-
--- Table PARTITION_KEY_VALS for join relationship
-CREATE TABLE PARTITION_KEY_VALS
-(
-    PART_ID NUMBER NOT NULL,
-    PART_KEY_VAL VARCHAR2(256) NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
-
--- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
-CREATE TABLE DBS
-(
-    DB_ID NUMBER NOT NULL,
-    "DESC" VARCHAR2(4000) NULL,
-    DB_LOCATION_URI VARCHAR2(4000) NOT NULL,
-    "NAME" VARCHAR2(128) NULL
-);
-
-ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
-
--- Table PARTITION_PARAMS for join relationship
-CREATE TABLE PARTITION_PARAMS
-(
-    PART_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
-
--- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
-CREATE TABLE SERDES
-(
-    SERDE_ID NUMBER NOT NULL,
-    "NAME" VARCHAR2(128) NULL,
-    SLIB VARCHAR2(4000) NULL
-);
-
-ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
-
--- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
-CREATE TABLE TYPES
-(
-    TYPES_ID NUMBER NOT NULL,
-    TYPE_NAME VARCHAR2(128) NULL,
-    TYPE1 VARCHAR2(767) NULL,
-    TYPE2 VARCHAR2(767) NULL
-);
-
-ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
-
--- Table PARTITION_KEYS for join relationship
-CREATE TABLE PARTITION_KEYS
-(
-    TBL_ID NUMBER NOT NULL,
-    PKEY_COMMENT VARCHAR2(4000) NULL,
-    PKEY_NAME VARCHAR2(128) NOT NULL,
-    PKEY_TYPE VARCHAR2(767) NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
-
--- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
-CREATE TABLE ROLES
-(
-    ROLE_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    OWNER_NAME VARCHAR2(128) NULL,
-    ROLE_NAME VARCHAR2(128) NULL
-);
-
-ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
-
--- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
-CREATE TABLE PARTITIONS
-(
-    PART_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
-    PART_NAME VARCHAR2(767) NULL,
-    SD_ID NUMBER NULL,
-    TBL_ID NUMBER NULL
-);
-
-ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
-
--- Table INDEX_PARAMS for join relationship
-CREATE TABLE INDEX_PARAMS
-(
-    INDEX_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
-
--- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
-CREATE TABLE TBL_COL_PRIVS
-(
-    TBL_COLUMN_GRANT_ID NUMBER NOT NULL,
-    "COLUMN_NAME" VARCHAR2(128) NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    TBL_COL_PRIV VARCHAR2(128) NULL,
-    TBL_ID NUMBER NULL
-);
-
-ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
-
--- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
-CREATE TABLE IDXS
-(
-    INDEX_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    DEFERRED_REBUILD NUMBER(1) NOT NULL CHECK (DEFERRED_REBUILD IN (1,0)),
-    INDEX_HANDLER_CLASS VARCHAR2(4000) NULL,
-    INDEX_NAME VARCHAR2(128) NULL,
-    INDEX_TBL_ID NUMBER NULL,
-    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
-    ORIG_TBL_ID NUMBER NULL,
-    SD_ID NUMBER NULL
-);
-
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
-
--- Table BUCKETING_COLS for join relationship
-CREATE TABLE BUCKETING_COLS
-(
-    SD_ID NUMBER NOT NULL,
-    BUCKET_COL_NAME VARCHAR2(256) NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
-
--- Table TYPE_FIELDS for join relationship
-CREATE TABLE TYPE_FIELDS
-(
-    TYPE_NAME NUMBER NOT NULL,
-    "COMMENT" VARCHAR2(256) NULL,
-    FIELD_NAME VARCHAR2(128) NOT NULL,
-    FIELD_TYPE VARCHAR2(767) NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
-
--- Table SD_PARAMS for join relationship
-CREATE TABLE SD_PARAMS
-(
-    SD_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
-
--- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
-CREATE TABLE GLOBAL_PRIVS
-(
-    USER_GRANT_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    USER_PRIV VARCHAR2(128) NULL
-);
-
-ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
-
--- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
-CREATE TABLE SDS
-(
-    SD_ID NUMBER NOT NULL,
-    CD_ID NUMBER NULL,
-    INPUT_FORMAT VARCHAR2(4000) NULL,
-    IS_COMPRESSED NUMBER(1) NOT NULL CHECK (IS_COMPRESSED IN (1,0)),
-    LOCATION VARCHAR2(4000) NULL,
-    NUM_BUCKETS NUMBER (10) NOT NULL,
-    OUTPUT_FORMAT VARCHAR2(4000) NULL,
-    SERDE_ID NUMBER NULL,
-    IS_STOREDASSUBDIRECTORIES NUMBER(1) NOT NULL CHECK (IS_STOREDASSUBDIRECTORIES IN (1,0))
-);
-
-ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
-
--- Table TABLE_PARAMS for join relationship
-CREATE TABLE TABLE_PARAMS
-(
-    TBL_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
-
--- Table SORT_COLS for join relationship
-CREATE TABLE SORT_COLS
-(
-    SD_ID NUMBER NOT NULL,
-    "COLUMN_NAME" VARCHAR2(128) NULL,
-    "ORDER" NUMBER (10) NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
-
--- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
-CREATE TABLE TBL_PRIVS
-(
-    TBL_GRANT_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    TBL_PRIV VARCHAR2(128) NULL,
-    TBL_ID NUMBER NULL
-);
-
-ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
-
--- Table DATABASE_PARAMS for join relationship
-CREATE TABLE DATABASE_PARAMS
-(
-    DB_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(180) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
-
--- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
-CREATE TABLE ROLE_MAP
-(
-    ROLE_GRANT_ID NUMBER NOT NULL,
-    ADD_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    ROLE_ID NUMBER NULL
-);
-
-ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
-
--- Table SERDE_PARAMS for join relationship
-CREATE TABLE SERDE_PARAMS
-(
-    SERDE_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
-
--- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
-CREATE TABLE PART_PRIVS
-(
-    PART_GRANT_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PART_ID NUMBER NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    PART_PRIV VARCHAR2(128) NULL
-);
-
-ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
-
--- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
-CREATE TABLE DB_PRIVS
-(
-    DB_GRANT_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    DB_ID NUMBER NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    DB_PRIV VARCHAR2(128) NULL
-);
-
-ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
-
--- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
-CREATE TABLE TBLS
-(
-    TBL_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    DB_ID NUMBER NULL,
-    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
-    OWNER VARCHAR2(767) NULL,
-    RETENTION NUMBER (10) NOT NULL,
-    SD_ID NUMBER NULL,
-    TBL_NAME VARCHAR2(128) NULL,
-    TBL_TYPE VARCHAR2(128) NULL,
-    VIEW_EXPANDED_TEXT CLOB NULL,
-    VIEW_ORIGINAL_TEXT CLOB NULL
-);
-
-ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
-
--- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
-CREATE TABLE PARTITION_EVENTS
-(
-    PART_NAME_ID NUMBER NOT NULL,
-    DB_NAME VARCHAR2(128) NULL,
-    EVENT_TIME NUMBER NOT NULL,
-    EVENT_TYPE NUMBER (10) NOT NULL,
-    PARTITION_NAME VARCHAR2(767) NULL,
-    TBL_NAME VARCHAR2(128) NULL
-);
-
-ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
-
--- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
-CREATE TABLE SKEWED_STRING_LIST
-(
-    STRING_LIST_ID NUMBER NOT NULL
-);
-
-ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
-
-CREATE TABLE SKEWED_STRING_LIST_VALUES
-(
-    STRING_LIST_ID NUMBER NOT NULL,
-    "STRING_LIST_VALUE" VARCHAR2(256) NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
-
-ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
-
-CREATE TABLE SKEWED_COL_NAMES
-(
-    SD_ID NUMBER NOT NULL,
-    "SKEWED_COL_NAME" VARCHAR2(256) NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
-
-ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
-(
-    SD_ID NUMBER NOT NULL,
-    STRING_LIST_ID_KID NUMBER NOT NULL,
-    "LOCATION" VARCHAR2(4000) NULL
-);
-
-CREATE TABLE MASTER_KEYS
-(
-    KEY_ID NUMBER (10) NOT NULL,
-    MASTER_KEY VARCHAR2(767) NULL
-);
-
-CREATE TABLE DELEGATION_TOKENS
-(
-    TOKEN_IDENT VARCHAR2(767) NOT NULL,
-    TOKEN VARCHAR2(767) NULL
-);
-
-ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
-
-ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE TABLE SKEWED_VALUES
-(
-    SD_ID_OID NUMBER NOT NULL,
-    STRING_LIST_ID_EID NUMBER NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
-
-ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
--- column statistics
-
-CREATE TABLE TAB_COL_STATS (
- CS_ID NUMBER NOT NULL,
- DB_NAME VARCHAR2(128) NOT NULL,
- TABLE_NAME VARCHAR2(128) NOT NULL, 
- COLUMN_NAME VARCHAR2(128) NOT NULL,
- COLUMN_TYPE VARCHAR2(128) NOT NULL,
- TBL_ID NUMBER NOT NULL,
- LONG_LOW_VALUE NUMBER,
- LONG_HIGH_VALUE NUMBER,
- DOUBLE_LOW_VALUE NUMBER,
- DOUBLE_HIGH_VALUE NUMBER,
- BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
- BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
- NUM_NULLS NUMBER NOT NULL,
- NUM_DISTINCTS NUMBER,
- AVG_COL_LEN NUMBER,
- MAX_COL_LEN NUMBER,
- NUM_TRUES NUMBER,
- NUM_FALSES NUMBER,
- LAST_ANALYZED NUMBER NOT NULL
-);
-
-CREATE TABLE VERSION (
-  VER_ID NUMBER NOT NULL,
-  SCHEMA_VERSION VARCHAR(127) NOT NULL,
-  VERSION_COMMENT VARCHAR(255)
-);
-ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
-
-ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID);
-
-ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID);
-
-CREATE TABLE PART_COL_STATS (
- CS_ID NUMBER NOT NULL,
- DB_NAME VARCHAR2(128) NOT NULL,
- TABLE_NAME VARCHAR2(128) NOT NULL,
- PARTITION_NAME VARCHAR2(767) NOT NULL,
- COLUMN_NAME VARCHAR2(128) NOT NULL,
- COLUMN_TYPE VARCHAR2(128) NOT NULL,
- PART_ID NUMBER NOT NULL,
- LONG_LOW_VALUE NUMBER,
- LONG_HIGH_VALUE NUMBER,
- DOUBLE_LOW_VALUE NUMBER,
- DOUBLE_HIGH_VALUE NUMBER,
- BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
- BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
- NUM_NULLS NUMBER NOT NULL,
- NUM_DISTINCTS NUMBER,
- AVG_COL_LEN NUMBER,
- MAX_COL_LEN NUMBER,
- NUM_TRUES NUMBER,
- NUM_FALSES NUMBER,
- LAST_ANALYZED NUMBER NOT NULL
-);
-
-ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
-
-ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED;
-
-CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
-
--- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
-ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
-
-CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table COLUMNS_V2
-ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
-
-
--- Constraints for table PARTITION_KEY_VALS
-ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
-
-
--- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
-CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME");
-
-
--- Constraints for table PARTITION_PARAMS
-ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
-
-
--- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
-
--- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
-CREATE UNIQUE INDEX UNIQUE_TYPE ON TYPES (TYPE_NAME);
-
-
--- Constraints for table PARTITION_KEYS
-ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
-
-
--- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
-CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
-
-
--- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
-ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
-
-CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
-
-CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
-
-
--- Constraints for table INDEX_PARAMS
-ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
-
-
--- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
-ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
-
-CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
-
-
--- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
-
-CREATE INDEX IDXS_N50 ON IDXS (INDEX_TBL_ID);
-
-CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
-
-CREATE INDEX IDXS_N49 ON IDXS (ORIG_TBL_ID);
-
-
--- Constraints for table BUCKETING_COLS
-ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
-
-
--- Constraints for table TYPE_FIELDS
-ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
-
-
--- Constraints for table SD_PARAMS
-ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
-
-
--- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
-CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
-ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
-ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
-CREATE INDEX SDS_N50 ON SDS (CD_ID);
-
-
--- Constraints for table TABLE_PARAMS
-ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
-
-
--- Constraints for table SORT_COLS
-ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
-
-
--- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
-ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
-
-CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table DATABASE_PARAMS
-ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
-
-
--- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
-ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
-
-CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table SERDE_PARAMS
-ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
-
-
--- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
-ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
-
-CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
-
-
--- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
-ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
-
-CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
-
-CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
-
-
--- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
-ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
-
-CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
-
-CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
-
-
--- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
-CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
-
-INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.12.0', 'Hive release version 0.12.0');
-


[13/52] bigtop git commit: ODPI-193. Add Hive 1.2 to ODPi ambari reference implementation

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_server_upgrade.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_server_upgrade.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_server_upgrade.py
new file mode 100755
index 0000000..478c240
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_server_upgrade.py
@@ -0,0 +1,141 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import re
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.core.resources.system import Execute
+from resource_management.core import shell
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.version import format_stack_version
+
+
+def post_upgrade_deregister():
+  """
+  Runs the "hive --service hiveserver2 --deregister <version>" command to
+  de-provision the server in preparation for an upgrade. This will contact
+  ZooKeeper to remove the server so that clients that attempt to connect
+  will be directed to other servers automatically. Once all
+  clients have drained, the server will shutdown automatically; this process
+  could take a very long time.
+  This function will obtain the Kerberos ticket if security is enabled.
+  :return:
+  """
+  import params
+
+  Logger.info('HiveServer2 executing "deregister" command to complete upgrade...')
+
+  if params.security_enabled:
+    kinit_command=format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; ")
+    Execute(kinit_command,user=params.smokeuser)
+
+  # calculate the current hive server version
+  current_hiveserver_version = _get_current_hiveserver_version()
+  if current_hiveserver_version is None:
+    raise Fail('Unable to determine the current HiveServer2 version to deregister.')
+
+  # fallback when upgrading because <stack-root>/current/hive-server2/conf/conf.server may not exist
+  hive_server_conf_dir = params.hive_server_conf_dir
+  if not os.path.exists(hive_server_conf_dir):
+    hive_server_conf_dir = "/etc/hive/conf.server"
+
+  # deregister
+  hive_execute_path = params.execute_path
+  # If upgrading, the upgrade-target hive binary should be used to call the --deregister command.
+  # If downgrading, the downgrade-source hive binary should be used to call the --deregister command.
+  # By now <stack-selector-tool> has been called to set 'current' to target-stack
+  if "downgrade" == params.upgrade_direction:
+    # hive_bin
+    downgrade_version = params.current_version
+    if params.downgrade_from_version:
+      downgrade_version = params.downgrade_from_version
+    hive_execute_path = _get_hive_execute_path(downgrade_version)
+
+  command = format('hive --config {hive_server_conf_dir} --service hiveserver2 --deregister ' + current_hiveserver_version)
+  Execute(command, user=params.hive_user, path=hive_execute_path, tries=1 )
+
+
+def _get_hive_execute_path(stack_version_formatted):
+  """
+  Returns the exact execute path to use for the given stack-version.
+  This method does not return the "current" path
+  :param stack_version_formatted: Exact stack-version to use in the new path
+  :return: Hive execute path for the exact stack-version
+  """
+  import params
+
+  hive_execute_path = params.execute_path
+  formatted_stack_version = format_stack_version(stack_version_formatted)
+  if formatted_stack_version and check_stack_feature(StackFeature.ROLLING_UPGRADE, formatted_stack_version):
+    # hive_bin
+    new_hive_bin = format('{stack_root}/{stack_version_formatted}/hive/bin')
+    if (os.pathsep + params.hive_bin) in hive_execute_path:
+      hive_execute_path = hive_execute_path.replace(os.pathsep + params.hive_bin, os.pathsep + new_hive_bin)
+    # hadoop_bin_dir
+    new_hadoop_bin = stack_select.get_hadoop_dir_for_stack_version("bin", stack_version_formatted)
+    old_hadoop_bin = params.hadoop_bin_dir
+    if new_hadoop_bin and len(new_hadoop_bin) > 0 and (os.pathsep + old_hadoop_bin) in hive_execute_path:
+      hive_execute_path = hive_execute_path.replace(os.pathsep + old_hadoop_bin, os.pathsep + new_hadoop_bin)
+  return hive_execute_path
+
+
+def _get_current_hiveserver_version():
+  """
+  Runs "hive --version" and parses the result in order
+  to obtain the current version of hive.
+
+  :return:  the hiveserver2 version, returned by "hive --version"
+  """
+  import params
+
+  try:
+    # When downgrading the source version should be the version we are downgrading from
+    if "downgrade" == params.upgrade_direction:
+      if not params.downgrade_from_version:
+        raise Fail('The version from which we are downgrading from should be provided in \'downgrade_from_version\'')
+      source_version = params.downgrade_from_version
+    else:
+      source_version = params.current_version
+    hive_execute_path = _get_hive_execute_path(source_version)
+    version_hive_bin = params.hive_bin
+    formatted_source_version = format_stack_version(source_version)
+    if formatted_source_version and check_stack_feature(StackFeature.ROLLING_UPGRADE, formatted_source_version):
+      version_hive_bin = format('{stack_root}/{source_version}/hive/bin')
+    command = format('{version_hive_bin}/hive --version')
+    return_code, output = shell.call(command, user=params.hive_user, path=hive_execute_path)
+  except Exception, e:
+    Logger.error(str(e))
+    raise Fail('Unable to execute hive --version command to retrieve the hiveserver2 version.')
+
+  if return_code != 0:
+    raise Fail('Unable to determine the current HiveServer2 version because of a non-zero return code of {0}'.format(str(return_code)))
+
+  match = re.search('^(Hive) ([0-9]+.[0-9]+.\S+)', output, re.MULTILINE)
+
+  if match:
+    current_hive_server_version = match.group(2)
+    return current_hive_server_version
+  else:
+    raise Fail('The extracted hiveserver2 version "{0}" does not matching any known pattern'.format(output))
+
+

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_service.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_service.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_service.py
new file mode 100755
index 0000000..22b4061
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_service.py
@@ -0,0 +1,187 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import time
+
+from ambari_commons.constants import UPGRADE_TYPE_ROLLING
+from resource_management.core.logger import Logger
+from resource_management.core import shell
+from resource_management.libraries.functions.format import format
+from resource_management.core.resources.system import File, Execute
+from resource_management.core.resources.service import Service
+from resource_management.core.exceptions import Fail
+from resource_management.core.shell import as_user
+from resource_management.libraries.functions.hive_check import check_thrift_port_sasl
+from resource_management.libraries.functions import get_user_call_output
+from resource_management.libraries.functions.show_logs import show_logs
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def hive_service(name, action='start', upgrade_type=None):
+  import params
+  if name == 'metastore':
+    if action == 'start' or action == 'stop':
+      Service(params.hive_metastore_win_service_name, action=action)
+
+  if name == 'hiveserver2':
+    if action == 'start' or action == 'stop':
+      Service(params.hive_server_win_service_name, action=action)
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def hive_service(name, action='start', upgrade_type=None):
+
+  import params
+
+  if name == 'metastore':
+    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
+    cmd = format("{start_metastore_path} {hive_log_dir}/hive.out {hive_log_dir}/hive.err {pid_file} {hive_server_conf_dir} {hive_log_dir}")
+  elif name == 'hiveserver2':
+    pid_file = format("{hive_pid_dir}/{hive_pid}")
+    cmd = format("{start_hiveserver2_path} {hive_log_dir}/hive-server2.out {hive_log_dir}/hive-server2.err {pid_file} {hive_server_conf_dir} {hive_log_dir}")
+
+
+    if params.security_enabled and params.current_version and check_stack_feature(StackFeature.HIVE_SERVER2_KERBERIZED_ENV, params.current_version):
+      hive_kinit_cmd = format("{kinit_path_local} -kt {hive_server2_keytab} {hive_principal}; ")
+      Execute(hive_kinit_cmd, user=params.hive_user)
+
+  pid = get_user_call_output.get_user_call_output(format("cat {pid_file}"), user=params.hive_user, is_checked_call=False)[1]
+  process_id_exists_command = format("ls {pid_file} >/dev/null 2>&1 && ps -p {pid} >/dev/null 2>&1")
+
+  if action == 'start':
+    if name == 'hiveserver2':
+      check_fs_root(params.hive_server_conf_dir, params.execute_path)
+
+    daemon_cmd = cmd
+    hadoop_home = params.hadoop_home
+    hive_bin = "hive"
+
+    # upgrading hiveserver2 (rolling_restart) means that there is an existing,
+    # de-registering hiveserver2; the pid will still exist, but the new
+    # hiveserver is spinning up on a new port, so the pid will be re-written
+    if upgrade_type == UPGRADE_TYPE_ROLLING:
+      process_id_exists_command = None
+
+      if params.version and params.stack_root:
+        hadoop_home = format("{stack_root}/{version}/hadoop")
+        hive_bin = os.path.join(params.hive_bin, hive_bin)
+      
+    Execute(daemon_cmd, 
+      user = params.hive_user,
+      environment = { 'HADOOP_HOME': hadoop_home, 'JAVA_HOME': params.java64_home, 'HIVE_BIN': hive_bin },
+      path = params.execute_path,
+      not_if = process_id_exists_command)
+
+    if params.hive_jdbc_driver == "com.mysql.jdbc.Driver" or \
+       params.hive_jdbc_driver == "org.postgresql.Driver" or \
+       params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
+
+      validation_called = False
+
+      if params.hive_jdbc_target is not None:
+        validation_called = True
+        validate_connection(params.hive_jdbc_target, params.hive_lib)
+      if params.hive2_jdbc_target is not None:
+        validation_called = True
+        validate_connection(params.hive2_jdbc_target, params.hive_server2_hive2_lib)
+
+      if not validation_called:
+        emessage = "ERROR! DB connection check should be executed at least one time!"
+        Logger.error(emessage)
+
+  elif action == 'stop':
+
+    daemon_kill_cmd = format("{sudo} kill {pid}")
+    daemon_hard_kill_cmd = format("{sudo} kill -9 {pid}")
+
+    Execute(daemon_kill_cmd,
+      not_if = format("! ({process_id_exists_command})")
+    )
+
+    wait_time = 5
+    Execute(daemon_hard_kill_cmd,
+      not_if = format("! ({process_id_exists_command}) || ( sleep {wait_time} && ! ({process_id_exists_command}) )"),
+      ignore_failures = True
+    )
+
+    try:
+      # check if stopped the process, else fail the task
+      Execute(format("! ({process_id_exists_command})"),
+        tries=20,
+        try_sleep=3,
+      )
+    except:
+      show_logs(params.hive_log_dir, params.hive_user)
+      raise
+
+    File(pid_file,
+         action = "delete"
+    )
+
+def validate_connection(target_path_to_jdbc, hive_lib_path):
+  import params
+
+  path_to_jdbc = target_path_to_jdbc
+  if not params.jdbc_jar_name:
+    path_to_jdbc = format("{hive_lib_path}/") + \
+                   params.default_connectors_map[params.hive_jdbc_driver] if params.hive_jdbc_driver in params.default_connectors_map else None
+    if not os.path.isfile(path_to_jdbc):
+      path_to_jdbc = format("{hive_lib_path}/") + "*"
+      error_message = "Error! Sorry, but we can't find jdbc driver with default name " + params.default_connectors_map[params.hive_jdbc_driver] + \
+                      " in hive lib dir. So, db connection check can fail. Please run 'ambari-server setup --jdbc-db={db_name} --jdbc-driver={path_to_jdbc} on server host.'"
+      Logger.error(error_message)
+
+  db_connection_check_command = format(
+    "{java64_home}/bin/java -cp {check_db_connection_jar}:{path_to_jdbc} org.apache.ambari.server.DBConnectionVerification '{hive_jdbc_connection_url}' {hive_metastore_user_name} {hive_metastore_user_passwd!p} {hive_jdbc_driver}")
+
+  try:
+    Execute(db_connection_check_command,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin', tries=5, try_sleep=10)
+  except:
+    show_logs(params.hive_log_dir, params.hive_user)
+    raise
+
+
+def check_fs_root(conf_dir, execution_path):
+  import params
+
+  if not params.fs_root.startswith("hdfs://"):
+    Logger.info("Skipping fs root check as fs_root does not start with hdfs://")
+    return
+
+  metatool_cmd = format("hive --config {conf_dir} --service metatool")
+  cmd = as_user(format("{metatool_cmd} -listFSRoot", env={'PATH': execution_path}), params.hive_user) \
+        + format(" 2>/dev/null | grep hdfs:// | cut -f1,2,3 -d '/' | grep -v '{fs_root}' | head -1")
+  code, out = shell.call(cmd)
+
+  if code == 0 and out.strip() != "" and params.fs_root.strip() != out.strip():
+    out = out.strip()
+    cmd = format("{metatool_cmd} -updateLocation {fs_root} {out}")
+    Execute(cmd,
+            user=params.hive_user,
+            environment={'PATH': execution_path}
+    )
+

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_service_interactive.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_service_interactive.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_service_interactive.py
new file mode 100755
index 0000000..eaf95ad
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_service_interactive.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+# Python Imports
+
+# Ambari Commons & Resource Management imports
+import os
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.format import format
+from resource_management.core.resources.system import File, Execute
+from resource_management.libraries.functions import get_user_call_output
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+# Local Imports
+from hive_service import check_fs_root
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def hive_service_interactive(name, action='start', upgrade_type=None):
+  pass
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def hive_service_interactive(name, action='start', upgrade_type=None):
+  import params
+
+  pid_file = format("{hive_pid_dir}/{hive_interactive_pid}")
+  cmd = format("{start_hiveserver2_interactive_path} {hive_pid_dir}/hive-server2-interactive.out {hive_log_dir}/hive-server2-interactive.err {pid_file} {hive_server_interactive_conf_dir} {hive_log_dir}")
+
+  pid = get_user_call_output.get_user_call_output(format("cat {pid_file}"), user=params.hive_user, is_checked_call=False)[1]
+  process_id_exists_command = format("ls {pid_file} >/dev/null 2>&1 && ps -p {pid} >/dev/null 2>&1")
+
+  if action == 'start':
+    check_fs_root(params.hive_server_interactive_conf_dir, params.execute_path_hive_interactive)
+    daemon_cmd = cmd
+    hadoop_home = params.hadoop_home
+    hive_interactive_bin = "hive2"
+
+    Execute(daemon_cmd,
+            user = params.hive_user,
+            environment = { 'HADOOP_HOME': hadoop_home, 'JAVA_HOME': params.java64_home, 'HIVE_BIN': hive_interactive_bin },
+            path = params.execute_path,
+            not_if = process_id_exists_command)
+
+    if params.hive_jdbc_driver == "com.mysql.jdbc.Driver" or \
+        params.hive_jdbc_driver == "org.postgresql.Driver" or \
+        params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
+
+      path_to_jdbc = params.target_hive_interactive
+      if not params.jdbc_jar_name:
+        path_to_jdbc = format("{hive_interactive_lib}/") + \
+                       params.default_connectors_map[params.hive_jdbc_driver] if params.hive_jdbc_driver in params.default_connectors_map else None
+        if not os.path.isfile(path_to_jdbc):
+          path_to_jdbc = format("{hive_interactive_lib}/") + "*"
+          error_message = "Error! Sorry, but we can't find jdbc driver with default name " + params.default_connectors_map[params.hive_jdbc_driver] + \
+                " in hive lib dir. So, db connection check can fail. Please run 'ambari-server setup --jdbc-db={db_name} --jdbc-driver={path_to_jdbc} on server host.'"
+          Logger.error(error_message)
+
+      db_connection_check_command = format(
+        "{java64_home}/bin/java -cp {check_db_connection_jar}:{path_to_jdbc} org.apache.ambari.server.DBConnectionVerification '{hive_jdbc_connection_url}' {hive_metastore_user_name} {hive_metastore_user_passwd!p} {hive_jdbc_driver}")
+      Execute(db_connection_check_command,
+              path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin', tries=5, try_sleep=10)
+  elif action == 'stop':
+
+    daemon_kill_cmd = format("{sudo} kill {pid}")
+    daemon_hard_kill_cmd = format("{sudo} kill -9 {pid}")
+
+    Execute(daemon_kill_cmd,
+            not_if = format("! ({process_id_exists_command})")
+            )
+
+    # check if stopped the process, otherwise send hard kill command.
+    try:
+      Execute(format("! ({process_id_exists_command})"),
+              tries=10,
+              try_sleep=3,
+              )
+    except:
+      Execute(daemon_hard_kill_cmd,
+              not_if = format("! ({process_id_exists_command}) ")
+              )
+
+    # check if stopped the process, else fail the task
+    Execute(format("! ({process_id_exists_command})"),
+            tries=20,
+            try_sleep=3,
+            )
+
+    File(pid_file,
+         action = "delete"
+         )

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_server.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_server.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_server.py
new file mode 100755
index 0000000..851dc02
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_server.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+import os
+import mysql_users
+from resource_management import *
+
+from mysql_service import mysql_service
+from mysql_utils import mysql_configure
+
+
+class MysqlServer(Script):
+  def install(self, env):
+    import params
+    self.install_packages(env)
+    self.configure(env)
+
+  def clean(self, env):
+    import params
+    env.set_params(params)
+    mysql_users.mysql_deluser()
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    mysql_configure()
+
+  def start(self, env, rolling_restart=False):
+    import params
+    env.set_params(params)
+    mysql_service(daemon_name=params.daemon_name, action='start')
+
+  def stop(self, env, rolling_restart=False):
+    import params
+    env.set_params(params)
+    mysql_service(daemon_name=params.daemon_name, action='stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+
+    mysql_service(daemon_name=status_params.daemon_name, action='status')
+
+
+if __name__ == "__main__":
+  MysqlServer().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_service.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_service.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_service.py
new file mode 100755
index 0000000..8b98ed1
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_service.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def mysql_service(daemon_name=None, action='start'): 
+  status_cmd = format("pgrep -l '^{process_name}$'")
+  cmd = ('service', daemon_name, action)
+
+  if action == 'status':
+    try:
+      Execute(status_cmd)
+    except Fail:
+      raise ComponentIsNotRunning()
+  elif action == 'stop':
+    import params
+    Execute(cmd,
+            logoutput = True,
+            only_if = status_cmd,
+            sudo = True,
+    )
+  elif action == 'start':
+    import params   
+    Execute(cmd,
+      logoutput = True,
+      not_if = status_cmd,
+      sudo = True,
+    )
+
+
+

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_users.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_users.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_users.py
new file mode 100755
index 0000000..c023548
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_users.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+# Used to add hive access to the needed components
+def mysql_adduser():
+  import params
+  
+  File(params.mysql_adduser_path,
+       mode=0755,
+       content=StaticFile('addMysqlUser.sh')
+  )
+  hive_server_host = format("{hive_server_host}")
+  hive_metastore_host = format("{hive_metastore_host}")
+
+  add_metastore_cmd = "bash -x {mysql_adduser_path} {daemon_name} {hive_metastore_user_name} {hive_metastore_user_passwd!p} {hive_metastore_host}"
+  add_hiveserver_cmd = "bash -x {mysql_adduser_path} {daemon_name} {hive_metastore_user_name} {hive_metastore_user_passwd!p} {hive_server_host}"
+  if (hive_server_host == hive_metastore_host):
+    cmd = format(add_hiveserver_cmd)
+  else:
+    cmd = format(add_hiveserver_cmd + ";" + add_metastore_cmd)
+  Execute(cmd,
+          tries=3,
+          try_sleep=5,
+          logoutput=False,
+          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+  )
+
+# Removes hive access from components
+def mysql_deluser():
+  import params
+  
+  File(params.mysql_deluser_path,
+       mode=0755,
+       content=StaticFile('removeMysqlUser.sh')
+  )
+  hive_server_host = format("{hive_server_host}")
+  hive_metastore_host = format("{hive_metastore_host}")
+
+  del_hiveserver_cmd = "bash -x {mysql_deluser_path} {daemon_name} {hive_metastore_user_name} {hive_server_host}"
+  del_metastore_cmd = "bash -x {mysql_deluser_path} {daemon_name} {hive_metastore_user_name} {hive_metastore_host}"
+  if (hive_server_host == hive_metastore_host):
+    cmd = format(del_hiveserver_cmd)
+  else:
+    cmd = format(
+      del_hiveserver_cmd + ";" + del_metastore_cmd)
+  Execute(cmd,
+          tries=3,
+          try_sleep=5,
+          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+  )
+

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_utils.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_utils.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_utils.py
new file mode 100755
index 0000000..5006b56
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_utils.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import mysql_users
+
+def mysql_configure():
+  import params
+
+  # required for running hive
+  replace_bind_address = ('sed','-i','s|^bind-address[ \t]*=.*|bind-address = 0.0.0.0|',params.mysql_configname)
+  Execute(replace_bind_address,
+          sudo = True,
+  )
+  
+  # this also will start mysql-server
+  mysql_users.mysql_adduser()
+  
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params.py
new file mode 100755
index 0000000..f10a3f3
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from ambari_commons import OSCheck
+from resource_management.libraries.functions.default import default
+
+if OSCheck.is_windows_family():
+  from params_windows import *
+else:
+  from params_linux import *
+
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+retryAble = default("/commandParams/command_retry_enabled", False)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params_linux.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params_linux.py
new file mode 100755
index 0000000..1cd58c3
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params_linux.py
@@ -0,0 +1,736 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import status_params
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+import os
+
+from urlparse import urlparse
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from ambari_commons.os_check import OSCheck
+
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.functions.copy_tarball import STACK_ROOT_PATTERN, STACK_NAME_PATTERN, STACK_VERSION_PATTERN
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
+from resource_management.libraries.functions.get_port_from_url import get_port_from_url
+from resource_management.libraries.functions.expect import expect
+from resource_management.libraries import functions
+from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
+
+# Default log4j version; put config files under /etc/hive/conf
+log4j_version = '1'
+
+# server configurations
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+sudo = AMBARI_SUDO_BINARY
+
+stack_root = status_params.stack_root
+stack_name = status_params.stack_name
+stack_name_uppercase = stack_name.upper()
+agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
+agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
+
+# Needed since this is an Atlas Hook service.
+cluster_name = config['clusterName']
+
+# node hostname
+hostname = config["hostname"]
+
+# This is expected to be of the form #.#.#.#
+stack_version_unformatted = status_params.stack_version_unformatted
+stack_version_formatted_major = status_params.stack_version_formatted_major
+
+# this is not available on INSTALL action because <stack-selector-tool> is not available
+stack_version_formatted = functions.get_stack_version('hive-server2')
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade.
+# It cannot be used during the initial Cluser Install because the version is not yet known.
+version = default("/commandParams/version", None)
+
+# current host stack version
+current_version = default("/hostLevelParams/current_version", None)
+
+# When downgrading the 'version' and 'current_version' are both pointing to the downgrade-target version
+# downgrade_from_version provides the source-version the downgrade is happening from
+downgrade_from_version = default("/commandParams/downgrade_from_version", None)
+
+# get the correct version to use for checking stack features
+version_for_stack_feature_checks = get_stack_feature_version(config)
+
+# Upgrade direction
+upgrade_direction = default("/commandParams/upgrade_direction", None)
+stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
+stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
+
+# component ROLE directory (like hive-metastore or hive-server2-hive2)
+component_directory = status_params.component_directory
+component_directory_interactive = status_params.component_directory_interactive
+
+hadoop_home = format('{stack_root}/current/hadoop-client')
+hive_bin = format('{stack_root}/current/{component_directory}/bin')
+hive_schematool_ver_bin = format('{stack_root}/{version}/hive/bin')
+hive_schematool_bin = format('{stack_root}/current/{component_directory}/bin')
+hive_lib = format('{stack_root}/current/{component_directory}/lib')
+hive_version_lib = format('{stack_root}/{version}/hive/lib')
+hive_var_lib = '/var/lib/hive'
+hive_user_home_dir = "/home/hive"
+
+# starting on stacks where HSI is supported, we need to begin using the 'hive2' schematool
+hive_server2_hive2_dir = None
+hive_server2_hive2_lib = None
+
+version = default("/commandParams/version", None)
+
+if check_stack_feature(StackFeature.HIVE_SERVER_INTERACTIVE, version_for_stack_feature_checks):
+  # the name of the hiveserver2-hive2 component
+  hive_server2_hive2_component = status_params.SERVER_ROLE_DIRECTORY_MAP["HIVE_SERVER_INTERACTIVE"]
+
+  # when using the version, we can just specify the component as "hive2"
+  hive_schematool_ver_bin = format('{stack_root}/{version}/hive2/bin')
+
+  # use the schematool which ships with hive2
+  hive_schematool_bin = format('{stack_root}/current/{hive_server2_hive2_component}/bin')
+
+  # <stack-root>/<version>/hive2 (as opposed to <stack-root>/<version>/hive)
+  hive_server2_hive2_dir = format('{stack_root}/current/{hive_server2_hive2_component}')
+
+  # <stack-root>/<version>/hive2 (as opposed to <stack-root>/<version>/hive)
+  hive_server2_hive2_version_dir = format('{stack_root}/{version}/hive2')
+
+  # <stack-root>/current/hive-server2-hive2/lib -> <stack-root>/<version>/hive2/lib
+  hive_server2_hive2_lib = format('{hive_server2_hive2_dir}/lib')
+
+  # <stack-root>/<version>/hive2/lib
+  hive_server2_hive2_version_lib = format('{hive_server2_hive2_version_dir}/lib')
+
+
+hive_interactive_bin = format('{stack_root}/current/{component_directory_interactive}/bin')
+hive_interactive_lib = format('{stack_root}/current/{component_directory_interactive}/lib')
+
+# Hive Interactive related paths
+hive_interactive_var_lib = '/var/lib/hive2'
+
+# These tar folders were used in previous stack versions, e.g., HDP 2.1
+hadoop_streaming_jars = '/usr/lib/hadoop-mapreduce/hadoop-streaming-*.jar'
+pig_tar_file = format('/usr/share/{stack_name_uppercase}-webhcat/pig.tar.gz')
+hive_tar_file = format('/usr/share/{stack_name_uppercase}-webhcat/hive.tar.gz')
+sqoop_tar_file = format('/usr/share/{stack_name_uppercase}-webhcat/sqoop*.tar.gz')
+
+hive_metastore_site_supported = False
+hive_etc_dir_prefix = "/etc/hive"
+hive_interactive_etc_dir_prefix = "/etc/hive2"
+limits_conf_dir = "/etc/security/limits.d"
+
+hive_user_nofile_limit = default("/configurations/hive-env/hive_user_nofile_limit", "32000")
+hive_user_nproc_limit = default("/configurations/hive-env/hive_user_nproc_limit", "16000")
+
+# use the directories from status_params as they are already calculated for
+# the correct stack version
+hadoop_conf_dir = status_params.hadoop_conf_dir
+hadoop_bin_dir = status_params.hadoop_bin_dir
+webhcat_conf_dir = status_params.webhcat_conf_dir
+hive_conf_dir = status_params.hive_conf_dir
+hive_home_dir = status_params.hive_home_dir
+hive_config_dir = status_params.hive_config_dir
+hive_client_conf_dir = status_params.hive_client_conf_dir
+hive_server_conf_dir = status_params.hive_server_conf_dir
+
+hcat_conf_dir = '/etc/hive-hcatalog/conf'
+config_dir = '/etc/hive-webhcat/conf'
+hcat_lib = '/usr/lib/hive-hcatalog/share/hcatalog'
+webhcat_bin_dir = '/usr/lib/hive-hcatalog/sbin'
+
+# there are no client versions of these, use server versions directly
+hcat_lib = format('{stack_root}/current/hive-webhcat/share/hcatalog')
+webhcat_bin_dir = format('{stack_root}/current/hive-webhcat/sbin')
+
+# --- Tarballs ---
+# DON'T CHANGE THESE VARIABLE NAMES
+# Values don't change from those in copy_tarball.py
+webhcat_apps_dir = "/apps/webhcat"
+hive_tar_source = "{0}/{1}/hive/hive.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN)
+pig_tar_source = "{0}/{1}/pig/pig.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN)
+hive_tar_dest_file = "/{0}/apps/{1}/hive/hive.tar.gz".format(STACK_NAME_PATTERN,STACK_VERSION_PATTERN)
+pig_tar_dest_file = "/{0}/apps/{1}/pig/pig.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)
+
+hadoop_streaming_tar_source = "{0}/{1}/hadoop-mapreduce/hadoop-streaming.jar".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN)
+sqoop_tar_source = "{0}/{1}/sqoop/sqoop.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN)
+hadoop_streaming_tar_dest_dir = "/{0}/apps/{1}/mapreduce/".format(STACK_NAME_PATTERN,STACK_VERSION_PATTERN)
+sqoop_tar_dest_dir = "/{0}/apps/{1}/sqoop/".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)
+
+tarballs_mode = 0444
+
+purge_tables = "false"
+# Starting from stack version for feature hive_purge_table drop should be executed with purge
+if check_stack_feature(StackFeature.HIVE_PURGE_TABLE, version_for_stack_feature_checks):
+  purge_tables = 'true'
+
+if check_stack_feature(StackFeature.HIVE_WEBHCAT_SPECIFIC_CONFIGS, version_for_stack_feature_checks):
+  # this is NOT a typo.  Configs for hcatalog/webhcat point to a
+  # specific directory which is NOT called 'conf'
+  hcat_conf_dir = format('{stack_root}/current/hive-webhcat/etc/hcatalog')
+  config_dir = format('{stack_root}/current/hive-webhcat/etc/webhcat')
+
+if check_stack_feature(StackFeature.HIVE_METASTORE_SITE_SUPPORT, version_for_stack_feature_checks):
+  hive_metastore_site_supported = True
+
+execute_path = os.environ['PATH'] + os.pathsep + hive_bin + os.pathsep + hadoop_bin_dir
+
+hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
+hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
+
+hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
+hive_metastore_user_passwd = unicode(hive_metastore_user_passwd) if not is_empty(hive_metastore_user_passwd) else hive_metastore_user_passwd
+hive_metastore_db_type = config['configurations']['hive-env']['hive_database_type']
+
+#HACK Temporarily use dbType=azuredb while invoking schematool
+if hive_metastore_db_type == "mssql":
+  hive_metastore_db_type = "azuredb"
+
+#users
+hive_user = config['configurations']['hive-env']['hive_user']
+
+#JDBC driver jar name
+hive_jdbc_driver = config['configurations']['hive-site']['javax.jdo.option.ConnectionDriverName']
+jdk_location = config['hostLevelParams']['jdk_location']
+java_share_dir = '/usr/share/java'
+hive_database_name = config['configurations']['hive-env']['hive_database_name']
+hive_database = config['configurations']['hive-env']['hive_database']
+hive_use_existing_db = hive_database.startswith('Existing')
+
+default_connectors_map = { "com.microsoft.sqlserver.jdbc.SQLServerDriver":"sqljdbc4.jar",
+                           "com.mysql.jdbc.Driver":"mysql-connector-java.jar",
+                           "org.postgresql.Driver":"postgresql-jdbc.jar",
+                           "oracle.jdbc.driver.OracleDriver":"ojdbc.jar",
+                           "sap.jdbc4.sqlanywhere.IDriver":"sajdbc4.jar"}
+
+# NOT SURE THAT IT'S A GOOD IDEA TO USE PATH TO CLASS IN DRIVER, MAYBE IT WILL BE BETTER TO USE DB TYPE.
+# BECAUSE PATH TO CLASSES COULD BE CHANGED
+sqla_db_used = False
+hive_previous_jdbc_jar_name = None
+if hive_jdbc_driver == "com.microsoft.sqlserver.jdbc.SQLServerDriver":
+  jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
+  hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
+elif hive_jdbc_driver == "com.mysql.jdbc.Driver":
+  jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
+  hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
+elif hive_jdbc_driver == "org.postgresql.Driver":
+  jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
+  hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
+elif hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
+  jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
+  hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
+elif hive_jdbc_driver == "sap.jdbc4.sqlanywhere.IDriver":
+  jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
+  hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
+  sqla_db_used = True
+
+default_mysql_jar_name = "mysql-connector-java.jar"
+default_mysql_target = format("{hive_lib}/{default_mysql_jar_name}")
+hive_previous_jdbc_jar = format("{hive_lib}/{hive_previous_jdbc_jar_name}")
+if not hive_use_existing_db:
+  jdbc_jar_name = default_mysql_jar_name
+
+
+downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}")
+
+hive_jdbc_target = format("{hive_lib}/{jdbc_jar_name}")
+hive2_jdbc_target = None
+if hive_server2_hive2_dir:
+  hive2_jdbc_target = format("{hive_server2_hive2_lib}/{jdbc_jar_name}")
+
+# during upgrade / downgrade, use the specific version to copy the JDBC JAR to
+if upgrade_direction:
+  hive_jdbc_target = format("{hive_version_lib}/{jdbc_jar_name}")
+  hive2_jdbc_target = format("{hive_server2_hive2_version_lib}/{jdbc_jar_name}") if hive2_jdbc_target is not None else None
+
+
+hive2_previous_jdbc_jar = format("{hive_server2_hive2_lib}/{hive_previous_jdbc_jar_name}") if hive_server2_hive2_lib is not None else None
+driver_curl_source = format("{jdk_location}/{jdbc_jar_name}")
+
+# normally, the JDBC driver would be referenced by <stack-root>/current/.../foo.jar
+# but in RU if <stack-selector-tool> is called and the restart fails, then this means that current pointer
+# is now pointing to the upgraded version location; that's bad for the cp command
+source_jdbc_file = format("{stack_root}/{current_version}/hive/lib/{jdbc_jar_name}")
+
+check_db_connection_jar_name = "DBConnectionVerification.jar"
+check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
+hive_jdbc_drivers_list = ["com.microsoft.sqlserver.jdbc.SQLServerDriver","com.mysql.jdbc.Driver",
+                          "org.postgresql.Driver","oracle.jdbc.driver.OracleDriver","sap.jdbc4.sqlanywhere.IDriver"]
+
+prepackaged_jdbc_name = "ojdbc6.jar"
+prepackaged_ojdbc_symlink = format("{hive_lib}/{prepackaged_jdbc_name}")
+templeton_port = config['configurations']['webhcat-site']['templeton.port']
+
+#constants for type2 jdbc
+jdbc_libs_dir = format("{hive_lib}/native/lib64")
+lib_dir_available = os.path.exists(jdbc_libs_dir)
+
+if sqla_db_used:
+  jars_path_in_archive = format("{tmp_dir}/sqla-client-jdbc/java/*")
+  libs_path_in_archive = format("{tmp_dir}/sqla-client-jdbc/native/lib64/*")
+  downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}")
+  libs_in_hive_lib = format("{jdbc_libs_dir}/*")
+
+
+# Start, Common Hosts and Ports
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+hive_metastore_hosts = default('/clusterHostInfo/hive_metastore_host', [])
+hive_metastore_host = hive_metastore_hosts[0] if len(hive_metastore_hosts) > 0 else None
+hive_metastore_port = get_port_from_url(config['configurations']['hive-site']['hive.metastore.uris'])
+
+hive_server_hosts = default("/clusterHostInfo/hive_server_host", [])
+hive_server_host = hive_server_hosts[0] if len(hive_server_hosts) > 0 else None
+
+hive_server_interactive_hosts = default('/clusterHostInfo/hive_server_interactive_hosts', [])
+hive_server_interactive_host = hive_server_interactive_hosts[0] if len(hive_server_interactive_hosts) > 0 else None
+# End, Common Hosts and Ports
+
+hive_transport_mode = config['configurations']['hive-site']['hive.server2.transport.mode']
+
+if hive_transport_mode.lower() == "http":
+  hive_server_port = config['configurations']['hive-site']['hive.server2.thrift.http.port']
+else:
+  hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
+
+hive_url = format("jdbc:hive2://{hive_server_host}:{hive_server_port}")
+hive_http_endpoint = default('/configurations/hive-site/hive.server2.thrift.http.path', "cliservice")
+hive_server_principal = config['configurations']['hive-site']['hive.server2.authentication.kerberos.principal']
+hive_server2_authentication = config['configurations']['hive-site']['hive.server2.authentication']
+
+# ssl options
+hive_ssl = default('/configurations/hive-site/hive.server2.use.SSL', False)
+hive_ssl_keystore_path = default('/configurations/hive-site/hive.server2.keystore.path', None)
+hive_ssl_keystore_password = default('/configurations/hive-site/hive.server2.keystore.password', None)
+
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+smoke_test_sql = format("{tmp_dir}/hiveserver2.sql")
+smoke_test_path = format("{tmp_dir}/hiveserver2Smoke.sh")
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
+
+fs_root = config['configurations']['core-site']['fs.defaultFS']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+hive_metastore_keytab_path = config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
+hive_metastore_principal = config['configurations']['hive-site']['hive.metastore.kerberos.principal']
+
+hive_server2_keytab = config['configurations']['hive-site']['hive.server2.authentication.kerberos.keytab']
+
+#hive_env
+hive_log_dir = config['configurations']['hive-env']['hive_log_dir']
+hive_pid_dir = status_params.hive_pid_dir
+hive_pid = status_params.hive_pid
+hive_interactive_pid = status_params.hive_interactive_pid
+
+#Default conf dir for client
+hive_conf_dirs_list = [hive_client_conf_dir]
+
+# These are the folders to which the configs will be written to.
+ranger_hive_component = status_params.SERVER_ROLE_DIRECTORY_MAP['HIVE_SERVER']
+if status_params.role == "HIVE_METASTORE" and hive_metastore_hosts is not None and hostname in hive_metastore_hosts:
+  hive_conf_dirs_list.append(hive_server_conf_dir)
+elif status_params.role == "HIVE_SERVER" and hive_server_hosts is not None and hostname in hive_server_host:
+  hive_conf_dirs_list.append(hive_server_conf_dir)
+elif status_params.role == "HIVE_SERVER_INTERACTIVE" and hive_server_interactive_hosts is not None and hostname in hive_server_interactive_hosts:
+  hive_conf_dirs_list.append(status_params.hive_server_interactive_conf_dir)
+  ranger_hive_component = status_params.SERVER_ROLE_DIRECTORY_MAP['HIVE_SERVER_INTERACTIVE']
+# log4j version is 2 for hive2; put config files under /etc/hive2/conf
+if status_params.role == "HIVE_SERVER_INTERACTIVE":
+  log4j_version = '2'
+
+#Starting hiveserver2
+start_hiveserver2_script = 'startHiveserver2.sh.j2'
+
+##Starting metastore
+start_metastore_script = 'startMetastore.sh'
+hive_metastore_pid = status_params.hive_metastore_pid
+
+# Hive Server Interactive
+slider_am_container_mb = default("/configurations/hive-interactive-env/slider_am_container_mb", 341)
+
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+yarn_user = config['configurations']['yarn-env']['yarn_user']
+user_group = config['configurations']['cluster-env']['user_group']
+artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
+# Need this for yarn.nodemanager.recovery.dir in yarn-site
+yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix']
+
+target_hive_interactive = format("{hive_interactive_lib}/{jdbc_jar_name}")
+hive_intaractive_previous_jdbc_jar = format("{hive_interactive_lib}/{hive_previous_jdbc_jar_name}")
+jars_in_hive_lib = format("{hive_lib}/*.jar")
+
+start_hiveserver2_path = format("{tmp_dir}/start_hiveserver2_script")
+start_metastore_path = format("{tmp_dir}/start_metastore_script")
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+
+if 'role' in config and config['role'] in ["HIVE_SERVER", "HIVE_METASTORE"]:
+  if check_stack_feature(StackFeature.HIVE_ENV_HEAPSIZE, version_for_stack_feature_checks):
+    hive_heapsize = config['configurations']['hive-env']['hive.heapsize']
+  else:
+    hive_heapsize = config['configurations']['hive-site']['hive.heapsize']
+else:
+  hive_heapsize = config['configurations']['hive-env']['hive.client.heapsize']
+
+hive_metastore_heapsize = config['configurations']['hive-env']['hive.metastore.heapsize']
+
+java64_home = config['hostLevelParams']['java_home']
+java_version = expect("/hostLevelParams/java_version", int)
+
+##### MYSQL
+db_name = config['configurations']['hive-env']['hive_database_name']
+mysql_group = 'mysql'
+mysql_host = config['clusterHostInfo']['hive_mysql_host']
+
+mysql_adduser_path = format("{tmp_dir}/addMysqlUser.sh")
+mysql_deluser_path = format("{tmp_dir}/removeMysqlUser.sh")
+
+#### Metastore
+# initialize the schema only if not in an upgrade/downgrade
+init_metastore_schema = upgrade_direction is None
+
+########## HCAT
+hcat_dbroot = hcat_lib
+
+hcat_user = config['configurations']['hive-env']['hcat_user']
+webhcat_user = config['configurations']['hive-env']['webhcat_user']
+
+hcat_pid_dir = status_params.hcat_pid_dir
+hcat_log_dir = config['configurations']['hive-env']['hcat_log_dir']
+hcat_env_sh_template = config['configurations']['hcat-env']['content']
+
+#hive-log4j.properties.template
+if (('hive-log4j' in config['configurations']) and ('content' in config['configurations']['hive-log4j'])):
+  log4j_props = config['configurations']['hive-log4j']['content']
+else:
+  log4j_props = None
+
+#webhcat-log4j.properties.template
+if (('webhcat-log4j' in config['configurations']) and ('content' in config['configurations']['webhcat-log4j'])):
+  log4j_webhcat_props = config['configurations']['webhcat-log4j']['content']
+else:
+  log4j_webhcat_props = None
+
+#hive-exec-log4j.properties.template
+if (('hive-exec-log4j' in config['configurations']) and ('content' in config['configurations']['hive-exec-log4j'])):
+  log4j_exec_props = config['configurations']['hive-exec-log4j']['content']
+else:
+  log4j_exec_props = None
+
+daemon_name = status_params.daemon_name
+process_name = status_params.process_name
+hive_env_sh_template = config['configurations']['hive-env']['content']
+
+hive_hdfs_user_dir = format("/user/{hive_user}")
+hive_hdfs_user_mode = 0755
+hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"]
+whs_dir_protocol = urlparse(hive_apps_whs_dir).scheme
+hive_exec_scratchdir = config['configurations']['hive-site']["hive.exec.scratchdir"]
+#for create_hdfs_directory
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', 'missing_principal').replace("_HOST", hostname)
+
+# Tez-related properties
+tez_user = config['configurations']['tez-env']['tez_user']
+
+# Tez jars
+tez_local_api_jars = '/usr/lib/tez/tez*.jar'
+tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'
+
+# Tez libraries
+tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None)
+
+if OSCheck.is_ubuntu_family():
+  mysql_configname = '/etc/mysql/my.cnf'
+else:
+  mysql_configname = '/etc/my.cnf'
+
+mysql_user = 'mysql'
+
+# Hive security
+hive_authorization_enabled = config['configurations']['hive-site']['hive.security.authorization.enabled']
+
+mysql_jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
+
+hive_site_config = dict(config['configurations']['hive-site'])
+
+########################################################
+############# AMS related params #####################
+########################################################
+ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
+has_metric_collector = not len(ams_collector_hosts) == 0
+if has_metric_collector:
+  if 'cluster-env' in config['configurations'] and \
+      'metrics_collector_vip_host' in config['configurations']['cluster-env']:
+    metric_collector_host = config['configurations']['cluster-env']['metrics_collector_vip_host']
+  else:
+    metric_collector_host = ams_collector_hosts[0]
+  if 'cluster-env' in config['configurations'] and \
+      'metrics_collector_vip_port' in config['configurations']['cluster-env']:
+    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
+  else:
+    metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "localhost:6188")
+    if metric_collector_web_address.find(':') != -1:
+      metric_collector_port = metric_collector_web_address.split(':')[1]
+    else:
+      metric_collector_port = '6188'
+  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+    metric_collector_protocol = 'https'
+  else:
+    metric_collector_protocol = 'http'
+  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
+
+metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
+metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
+
+########################################################
+############# Atlas related params #####################
+########################################################
+#region Atlas Hooks
+hive_atlas_application_properties = default('/configurations/hive-atlas-application.properties', {})
+
+if has_atlas_in_cluster():
+  atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
+#endregion
+
+########################################################
+########### WebHCat related params #####################
+########################################################
+
+webhcat_env_sh_template = config['configurations']['webhcat-env']['content']
+templeton_log_dir = config['configurations']['hive-env']['hcat_log_dir']
+templeton_pid_dir = status_params.hcat_pid_dir
+
+webhcat_pid_file = status_params.webhcat_pid_file
+
+templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
+
+
+webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
+
+hcat_hdfs_user_dir = format("/user/{hcat_user}")
+hcat_hdfs_user_mode = 0755
+webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
+webhcat_hdfs_user_mode = 0755
+#for create_hdfs_directory
+security_param = "true" if security_enabled else "false"
+
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+dfs_type = default("/commandParams/dfs_type", "")
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create hdfs directory we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+ HdfsResource,
+  user = hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
+ )
+
+# Hive Interactive related
+hive_interactive_hosts = default('/clusterHostInfo/hive_server_interactive_hosts', [])
+has_hive_interactive = len(hive_interactive_hosts) > 0
+if has_hive_interactive:
+  llap_daemon_log4j = config['configurations']['llap-daemon-log4j']['content']
+  llap_cli_log4j2 = config['configurations']['llap-cli-log4j2']['content']
+  hive_log4j2 = config['configurations']['hive-log4j2']['content']
+  hive_exec_log4j2 = config['configurations']['hive-exec-log4j2']['content']
+  beeline_log4j2 = config['configurations']['beeline-log4j2']['content']
+
+  hive_server_interactive_conf_dir = status_params.hive_server_interactive_conf_dir
+  execute_path_hive_interactive = os.path.join(os.environ['PATH'], hive_interactive_bin, hadoop_bin_dir)
+  start_hiveserver2_interactive_script = 'startHiveserver2Interactive.sh.j2'
+  start_hiveserver2_interactive_path = format("{tmp_dir}/start_hiveserver2_interactive_script")
+  hive_interactive_env_sh_template = config['configurations']['hive-interactive-env']['content']
+  hive_interactive_enabled = default('/configurations/hive-interactive-env/enable_hive_interactive', False)
+  llap_app_java_opts = default('/configurations/hive-interactive-env/llap_java_opts', '-XX:+AlwaysPreTouch {% if java_version > 7 %}-XX:+UseG1GC -XX:TLABSize=8m -XX:+ResizeTLAB -XX:+UseNUMA -XX:+AggressiveOpts -XX:MetaspaceSize=1024m -XX:InitiatingHeapOccupancyPercent=80 -XX:MaxGCPauseMillis=200{% else %}-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC{% endif %}')
+
+  # Service check related
+  if hive_transport_mode.lower() == "http":
+    hive_server_interactive_port = config['configurations']['hive-interactive-site']['hive.server2.thrift.http.port']
+  else:
+    hive_server_interactive_port = default('/configurations/hive-interactive-site/hive.server2.thrift.port',"10500")
+  # Tez for Hive interactive related
+  tez_interactive_config_dir = "/etc/tez_hive2/conf"
+  tez_interactive_user = config['configurations']['tez-env']['tez_user']
+  num_retries_for_checking_llap_status = default('/configurations/hive-interactive-env/num_retries_for_checking_llap_status', 10)
+  # Used in LLAP slider package creation
+  num_llap_nodes = config['configurations']['hive-interactive-env']['num_llap_nodes']
+  llap_daemon_container_size = config['configurations']['hive-interactive-site']['hive.llap.daemon.yarn.container.mb']
+  llap_log_level = config['configurations']['hive-interactive-env']['llap_log_level']
+  hive_llap_io_mem_size = config['configurations']['hive-interactive-site']['hive.llap.io.memory.size']
+  llap_heap_size = config['configurations']['hive-interactive-env']['llap_heap_size']
+  llap_app_name = config['configurations']['hive-interactive-env']['llap_app_name']
+  hive_llap_principal = None
+  if security_enabled:
+    hive_llap_keytab_file = config['configurations']['hive-interactive-site']['hive.llap.zk.sm.keytab.file']
+    hive_llap_principal = (config['configurations']['hive-interactive-site']['hive.llap.zk.sm.principal']).replace('_HOST',hostname.lower())
+  pass
+
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
+
+#ranger hive properties
+policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
+if 'admin-properties' in config['configurations'] and 'policymgr_external_url' in config['configurations']['admin-properties'] and policymgr_mgr_url.endswith('/'):
+  policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
+xa_audit_db_name = default('/configurations/admin-properties/audit_db_name', 'ranger_audits')
+xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
+xa_db_host = config['configurations']['admin-properties']['db_host']
+repo_name = str(config['clusterName']) + '_hive'
+
+jdbc_driver_class_name = config['configurations']['ranger-hive-plugin-properties']['jdbc.driverClassName']
+common_name_for_certificate = config['configurations']['ranger-hive-plugin-properties']['common.name.for.certificate']
+
+repo_config_username = config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+
+ranger_env = config['configurations']['ranger-env']
+ranger_plugin_properties = config['configurations']['ranger-hive-plugin-properties']
+policy_user = config['configurations']['ranger-hive-plugin-properties']['policy_user']
+
+if security_enabled:
+  hive_principal = hive_server_principal.replace('_HOST',hostname.lower())
+  hive_keytab = config['configurations']['hive-site']['hive.server2.authentication.kerberos.keytab']
+
+#For curl command in ranger plugin to get db connector
+if has_ranger_admin:
+  enable_ranger_hive = (config['configurations']['hive-env']['hive_security_authorization'].lower() == 'ranger')
+  repo_config_password = unicode(config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_PASSWORD'])
+  xa_audit_db_flavor = (config['configurations']['admin-properties']['DB_FLAVOR']).lower()
+  ranger_previous_jdbc_jar_name = None
+
+  if stack_supports_ranger_audit_db:
+    if xa_audit_db_flavor and xa_audit_db_flavor == 'mysql':
+      ranger_jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
+      ranger_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
+      audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
+      jdbc_driver = "com.mysql.jdbc.Driver"
+    elif xa_audit_db_flavor and xa_audit_db_flavor == 'oracle':
+      ranger_jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
+      ranger_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
+      colon_count = xa_db_host.count(':')
+      if colon_count == 2 or colon_count == 0:
+        audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
+      else:
+        audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
+      jdbc_driver = "oracle.jdbc.OracleDriver"
+    elif xa_audit_db_flavor and xa_audit_db_flavor == 'postgres':
+      ranger_jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
+      ranger_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
+      audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
+      jdbc_driver = "org.postgresql.Driver"
+    elif xa_audit_db_flavor and xa_audit_db_flavor == 'mssql':
+      ranger_jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
+      ranger_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
+      audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
+      jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
+    elif xa_audit_db_flavor and xa_audit_db_flavor == 'sqla':
+      ranger_jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
+      ranger_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
+      audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
+      jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
+
+  ranger_downloaded_custom_connector = format("{tmp_dir}/{ranger_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+  ranger_driver_curl_source = format("{jdk_location}/{ranger_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+  ranger_driver_curl_target = format("{hive_lib}/{ranger_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+  ranger_previous_jdbc_jar = format("{hive_lib}/{ranger_previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+  sql_connector_jar = ''
+
+  hive_ranger_plugin_config = {
+    'username': repo_config_username,
+    'password': repo_config_password,
+    'jdbc.driverClassName': jdbc_driver_class_name,
+    'jdbc.url': format("{hive_url}/default;principal={hive_principal}") if security_enabled else hive_url,
+    'commonNameForCertificate': common_name_for_certificate
+  }
+
+  hive_ranger_plugin_repo = {
+    'isActive': 'true',
+    'config': json.dumps(hive_ranger_plugin_config),
+    'description': 'hive repo',
+    'name': repo_name,
+    'repositoryType': 'hive',
+    'assetType': '3'
+  }
+
+  if stack_supports_ranger_kerberos and security_enabled:
+    hive_ranger_plugin_config['policy.download.auth.users'] = hive_user
+    hive_ranger_plugin_config['tag.download.auth.users'] = hive_user
+    hive_ranger_plugin_config['policy.grantrevoke.auth.users'] = hive_user
+
+  if stack_supports_ranger_kerberos:
+    hive_ranger_plugin_config['ambari.service.check.user'] = policy_user
+
+    hive_ranger_plugin_repo = {
+      'isEnabled': 'true',
+      'configs': hive_ranger_plugin_config,
+      'description': 'hive repo',
+      'name': repo_name,
+      'type': 'hive'
+    }
+
+
+  xa_audit_db_is_enabled = False
+  xa_audit_db_password = ''
+  if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db:
+    xa_audit_db_password = unicode(config['configurations']['admin-properties']['audit_db_password'])
+  ranger_audit_solr_urls = config['configurations']['ranger-admin-site']['ranger.audit.solr.urls']
+  if xml_configurations_supported and stack_supports_ranger_audit_db:
+    xa_audit_db_is_enabled = config['configurations']['ranger-hive-audit']['xasecure.audit.destination.db']
+  xa_audit_hdfs_is_enabled = config['configurations']['ranger-hive-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else None
+  ssl_keystore_password = unicode(config['configurations']['ranger-hive-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']) if xml_configurations_supported else None
+  ssl_truststore_password = unicode(config['configurations']['ranger-hive-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
+  credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None
+
+  #For SQLA explicitly disable audit to DB for Ranger
+  if xa_audit_db_flavor == 'sqla':
+    xa_audit_db_is_enabled = False
+

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params_windows.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params_windows.py
new file mode 100755
index 0000000..880fdb5
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params_windows.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from status_params import *
+
+# server configurations
+config = Script.get_config()
+
+# This is expected to be of the form #.#.#.#
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+stack_root = None
+hive_conf_dir = None
+hive_home = None
+hive_lib_dir = None
+hive_log_dir = None
+hive_opts = None
+hcat_home = None
+hcat_config_dir = None
+hive_bin = None
+
+try:
+  stack_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
+  hive_conf_dir = os.environ["HIVE_CONF_DIR"]
+  hive_home = os.environ["HIVE_HOME"]
+  hive_lib_dir = os.environ["HIVE_LIB_DIR"]
+  hive_log_dir = os.environ["HIVE_LOG_DIR"]
+  hive_opts = os.environ["HIVE_OPTS"]
+  hcat_home = os.environ["HCAT_HOME"]
+  hcat_config_dir = os.environ["WEBHCAT_CONF_DIR"]
+  hive_bin = os.path.join(hive_home, "bin")
+except:
+  pass
+
+hive_env_sh_template = config['configurations']['hive-env']['content']
+hive_warehouse_dir = config['configurations']['hive-site']['hive.metastore.warehouse.dir']
+hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
+hive_user = hadoop_user
+hcat_user = hadoop_user
+
+hive_metastore_db_type = config['configurations']['hive-env']['hive_database_type']
+hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
+hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
+
+hive_execution_engine = config["configurations"]["hive-site"]["hive.execution.engine"]
+
+######## Metastore Schema
+init_metastore_schema = not config['configurations']['hive-site']['datanucleus.autoCreateSchema']
+
+service_map = {
+  "metastore" : hive_metastore_win_service_name,
+  "client" : hive_client_win_service_name,
+  "hiveserver2" : hive_server_win_service_name,
+  "templeton" : webhcat_server_win_service_name
+}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/service_check.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/service_check.py
new file mode 100755
index 0000000..1836d0f
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/service_check.py
@@ -0,0 +1,190 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import socket
+import sys
+import time
+import subprocess
+
+from hcat_service_check import hcat_service_check
+from webhcat_service_check import webhcat_service_check
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+from resource_management.core import shell
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions import get_unique_id_and_date
+
+class HiveServiceCheck(Script):
+  pass
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HiveServiceCheckWindows(HiveServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    smoke_cmd = os.path.join(params.stack_root,"Run-SmokeTests.cmd")
+    service = "HIVE"
+    Execute(format("cmd /C {smoke_cmd} {service}"), user=params.hive_user, logoutput=True)
+
+    hcat_service_check()
+    webhcat_service_check()
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HiveServiceCheckDefault(HiveServiceCheck):
+
+  def __init__(self):
+    super(HiveServiceCheckDefault, self).__init__()
+    Logger.initialize_logger()
+
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    if params.security_enabled:
+      kinit_cmd = format(
+        "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; ")
+    else:
+      kinit_cmd = ""
+
+    # Check HiveServer
+    Logger.info("Running Hive Server checks")
+    Logger.info("--------------------------\n")
+    self.check_hive_server(env, 'Hive Server', kinit_cmd, params.hive_server_hosts,
+                           int(format("{hive_server_port}")))
+
+
+    if params.has_hive_interactive  and params.hive_interactive_enabled:
+      Logger.info("Running Hive Server2 checks")
+      Logger.info("--------------------------\n")
+
+      self.check_hive_server(env, 'Hive Server2', kinit_cmd, params.hive_interactive_hosts,
+                             int(format("{hive_server_interactive_port}")))
+
+      Logger.info("Running LLAP checks")
+      Logger.info("-------------------\n")
+      self.check_llap(env, kinit_cmd, params.hive_interactive_hosts, int(format("{hive_server_interactive_port}")),
+                      params.hive_llap_principal, params.hive_server2_authentication, params.hive_transport_mode,
+                      params.hive_http_endpoint)
+
+
+    Logger.info("Running HCAT checks")
+    Logger.info("-------------------\n")
+    hcat_service_check()
+
+    Logger.info("Running WEBHCAT checks")
+    Logger.info("---------------------\n")
+    webhcat_service_check()
+
+  def check_hive_server(self, env, server_component_name, kinit_cmd, address_list, server_port):
+    import params
+    env.set_params(params)
+    Logger.info("Server Address List : {0}, Port : {1}".format(address_list, server_port))
+
+    if not address_list:
+      raise Fail("Can not find any "+server_component_name+" ,host. Please check configuration.")
+
+    SOCKET_WAIT_SECONDS = 290
+
+    start_time = time.time()
+    end_time = start_time + SOCKET_WAIT_SECONDS
+
+    Logger.info("Waiting for the {0} to start...".format(server_component_name))
+
+    workable_server_available = False
+    i = 0
+    while time.time() < end_time and not workable_server_available:
+      address = address_list[i]
+      try:
+        check_thrift_port_sasl(address, server_port, params.hive_server2_authentication,
+                               params.hive_server_principal, kinit_cmd, params.smokeuser,
+                               transport_mode=params.hive_transport_mode, http_endpoint=params.hive_http_endpoint,
+                               ssl=params.hive_ssl, ssl_keystore=params.hive_ssl_keystore_path,
+                               ssl_password=params.hive_ssl_keystore_password)
+        Logger.info("Successfully connected to {0} on port {1}".format(address, server_port))
+        workable_server_available = True
+      except:
+        Logger.info("Connection to {0} on port {1} failed".format(address, server_port))
+        time.sleep(5)
+
+      i += 1
+      if i == len(address_list):
+        i = 0
+
+    elapsed_time = time.time() - start_time
+
+    if not workable_server_available:
+      raise Fail("Connection to '{0}' on host: {1} and port {2} failed after {3} seconds"
+                 .format(server_component_name, params.hostname, server_port, elapsed_time))
+
+    Logger.info("Successfully stayed connected to '{0}' on host: {1} and port {2} after {3} seconds"
+                .format(server_component_name, params.hostname, server_port, elapsed_time))
+
+  """
+  Performs Service check for LLAP app
+  """
+  def check_llap(self, env, kinit_cmd, address, port, key, hive_auth="NOSASL", transport_mode="binary", http_endpoint="cliservice"):
+    import params
+    env.set_params(params)
+
+    unique_id = get_unique_id_and_date()
+
+    beeline_url = ['jdbc:hive2://{address}:{port}/', "transportMode={transport_mode}"]
+
+    # Currently, HSI is supported on a single node only. The address list should be of size 1,
+    # thus picking the 1st node value.
+    address = address[0]
+
+    # append url according to used transport
+    if transport_mode == "http":
+      beeline_url.append('httpPath={http_endpoint}')
+
+    # append url according to used auth
+    if hive_auth == "NOSASL":
+      beeline_url.append('auth=noSasl')
+
+    # append url according to principal
+    if kinit_cmd:
+      beeline_url.append('principal={key}')
+
+    exec_path = params.execute_path
+    if params.version and params.stack_root:
+      upgrade_hive_bin = format("{stack_root}/{version}/hive2/bin")
+      exec_path =  os.environ['PATH'] + os.pathsep + params.hadoop_bin_dir + os.pathsep + upgrade_hive_bin
+
+    # beeline path
+    llap_cmd = "! beeline -u '%s'" % format(";".join(beeline_url))
+    # Append LLAP SQL script path
+    llap_cmd += format(" --hiveconf \"hiveLlapServiceCheck={unique_id}\" -f {stack_root}/current/hive-server2-hive2/scripts/llap/sql/serviceCheckScript.sql")
+    # Append grep patterns for detecting failure
+    llap_cmd += " -e '' 2>&1| awk '{print}'|grep -i -e 'Invalid status\|Invalid URL\|command not found\|Connection refused'"
+
+    Execute(llap_cmd,
+            user=params.hive_user,
+            path=['/usr/sbin', '/usr/local/bin', '/bin', '/usr/bin', exec_path],
+            tries=1,
+            wait_for_finish=True,
+            stderr=subprocess.PIPE,
+            logoutput=True)
+
+if __name__ == "__main__":
+  HiveServiceCheck().execute()
\ No newline at end of file


[45/52] bigtop git commit: BIGTOP-1406. package Ambari in Bigtop

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/metainfo.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/metainfo.xml
deleted file mode 100755
index 9e73118..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/metainfo.xml
+++ /dev/null
@@ -1,372 +0,0 @@
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services><service>
-    <name>HIVE</name>
-    <displayName>Hive</displayName>
-    <version>1.2.1+odpi</version>
-    <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
-    <components>
-        <component>
-            <name>HCAT</name>
-            <displayName>HCat Client</displayName>
-            <category>CLIENT</category>
-            <deleted>false</deleted>
-            <cardinality>0+</cardinality>
-            <versionAdvertised>false</versionAdvertised>
-            <versionAdvertisedInternal>false</versionAdvertisedInternal>
-            <commandScript>
-                <script>scripts/hcat_client.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>0</timeout>
-            </commandScript>
-            <configFiles>
-                <configFile>
-                    <type>env</type>
-                    <fileName>hcat-env.sh</fileName>
-                    <dictionaryName>hcat-env</dictionaryName>
-                </configFile>
-            </configFiles>
-            <configuration-dependencies>
-                <config-type>hive-site</config-type>
-            </configuration-dependencies>
-            <recovery_enabled>false</recovery_enabled>
-        </component>
-        <component>
-            <name>HIVE_SERVER</name>
-            <displayName>HiveServer2</displayName>
-            <category>MASTER</category>
-            <deleted>false</deleted>
-            <cardinality>1+</cardinality>
-            <versionAdvertisedInternal>false</versionAdvertisedInternal>
-            <commandScript>
-                <script>scripts/hive_server.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>0</timeout>
-            </commandScript>
-            <logs>
-                <log>
-                    <logId>hive_hiveserver2</logId>
-                    <primary>true</primary>
-                </log>
-            </logs>
-            <dependencies>
-                <dependency>
-                    <auto-deploy>
-                        <enabled>true</enabled>
-                    </auto-deploy>
-                    <autoDeploy>
-                        <enabled>true</enabled>
-                    </autoDeploy>
-                    <name>TEZ/TEZ_CLIENT</name>
-                    <scope>host</scope>
-                </dependency>
-                <dependency>
-                    <auto-deploy>
-                        <co-locate>HIVE/HIVE_SERVER</co-locate>
-                        <coLocate>HIVE/HIVE_SERVER</coLocate>
-                        <enabled>true</enabled>
-                    </auto-deploy>
-                    <autoDeploy>
-                        <co-locate>HIVE/HIVE_SERVER</co-locate>
-                        <coLocate>HIVE/HIVE_SERVER</coLocate>
-                        <enabled>true</enabled>
-                    </autoDeploy>
-                    <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-                    <scope>cluster</scope>
-                </dependency>
-                <dependency>
-                    <auto-deploy>
-                        <enabled>true</enabled>
-                    </auto-deploy>
-                    <autoDeploy>
-                        <enabled>true</enabled>
-                    </autoDeploy>
-                    <name>YARN/YARN_CLIENT</name>
-                    <scope>host</scope>
-                </dependency>
-                <dependency>
-                    <auto-deploy>
-                        <enabled>true</enabled>
-                    </auto-deploy>
-                    <autoDeploy>
-                        <enabled>true</enabled>
-                    </autoDeploy>
-                    <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-                    <scope>host</scope>
-                </dependency>
-            </dependencies>
-            <configuration-dependencies>
-                <config-type>hiveserver2-site</config-type>
-                <config-type>hive-site</config-type>
-            </configuration-dependencies>
-            <recovery_enabled>false</recovery_enabled>
-            <reassignAllowed>true</reassignAllowed>
-        </component>
-        <component>
-            <name>HIVE_CLIENT</name>
-            <displayName>Hive Client</displayName>
-            <category>CLIENT</category>
-            <deleted>false</deleted>
-            <cardinality>1+</cardinality>
-            <versionAdvertised>false</versionAdvertised>
-            <versionAdvertisedInternal>false</versionAdvertisedInternal>
-            <commandScript>
-                <script>scripts/hive_client.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>0</timeout>
-            </commandScript>
-            <configFiles>
-                <configFile>
-                    <type>xml</type>
-                    <fileName>hive-site.xml</fileName>
-                    <dictionaryName>hive-site</dictionaryName>
-                </configFile>
-                <configFile>
-                    <type>env</type>
-                    <fileName>hive-env.sh</fileName>
-                    <dictionaryName>hive-env</dictionaryName>
-                </configFile>
-                <configFile>
-                    <type>env</type>
-                    <fileName>hive-log4j.properties</fileName>
-                    <dictionaryName>hive-log4j</dictionaryName>
-                </configFile>
-                <configFile>
-                    <type>env</type>
-                    <fileName>hive-exec-log4j.properties</fileName>
-                    <dictionaryName>hive-exec-log4j</dictionaryName>
-                </configFile>
-            </configFiles>
-            <configuration-dependencies>
-                <config-type>hive-site</config-type>
-            </configuration-dependencies>
-            <recovery_enabled>false</recovery_enabled>
-        </component>
-        <component>
-            <name>WEBHCAT_SERVER</name>
-            <displayName>WebHCat Server</displayName>
-            <category>MASTER</category>
-            <deleted>false</deleted>
-            <cardinality>1+</cardinality>
-            <versionAdvertisedInternal>false</versionAdvertisedInternal>
-            <commandScript>
-                <script>scripts/webhcat_server.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>1200</timeout>
-            </commandScript>
-            <clientsToUpdateConfigs>
-                <client>HCAT</client>
-            </clientsToUpdateConfigs>
-            <dependencies>
-                <dependency>
-                    <auto-deploy>
-                        <enabled>true</enabled>
-                    </auto-deploy>
-                    <autoDeploy>
-                        <enabled>true</enabled>
-                    </autoDeploy>
-                    <name>HDFS/HDFS_CLIENT</name>
-                    <scope>host</scope>
-                </dependency>
-                <dependency>
-                    <auto-deploy>
-                        <enabled>true</enabled>
-                    </auto-deploy>
-                    <autoDeploy>
-                        <enabled>true</enabled>
-                    </autoDeploy>
-                    <name>HIVE/HIVE_CLIENT</name>
-                    <scope>host</scope>
-                </dependency>
-                <dependency>
-                    <auto-deploy>
-                        <enabled>true</enabled>
-                    </auto-deploy>
-                    <autoDeploy>
-                        <enabled>true</enabled>
-                    </autoDeploy>
-                    <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-                    <scope>host</scope>
-                </dependency>
-                <dependency>
-                    <auto-deploy>
-                        <co-locate>HIVE/WEBHCAT_SERVER</co-locate>
-                        <coLocate>HIVE/WEBHCAT_SERVER</coLocate>
-                        <enabled>true</enabled>
-                    </auto-deploy>
-                    <autoDeploy>
-                        <co-locate>HIVE/WEBHCAT_SERVER</co-locate>
-                        <coLocate>HIVE/WEBHCAT_SERVER</coLocate>
-                        <enabled>true</enabled>
-                    </autoDeploy>
-                    <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-                    <scope>cluster</scope>
-                </dependency>
-                <dependency>
-                    <auto-deploy>
-                        <enabled>true</enabled>
-                    </auto-deploy>
-                    <autoDeploy>
-                        <enabled>true</enabled>
-                    </autoDeploy>
-                    <name>ZOOKEEPER/ZOOKEEPER_CLIENT</name>
-                    <scope>host</scope>
-                </dependency>
-                <dependency>
-                    <auto-deploy>
-                        <enabled>true</enabled>
-                    </auto-deploy>
-                    <autoDeploy>
-                        <enabled>true</enabled>
-                    </autoDeploy>
-                    <name>YARN/YARN_CLIENT</name>
-                    <scope>host</scope>
-                </dependency>
-                <dependency>
-                    <auto-deploy>
-                        <enabled>true</enabled>
-                    </auto-deploy>
-                    <autoDeploy>
-                        <enabled>true</enabled>
-                    </autoDeploy>
-                    <name>PIG/PIG</name>
-                    <scope>host</scope>
-                </dependency>
-            </dependencies>
-            <configuration-dependencies>
-                <config-type>hive-site</config-type>
-            </configuration-dependencies>
-            <recovery_enabled>false</recovery_enabled>
-            <reassignAllowed>true</reassignAllowed>
-        </component>
-        <component>
-            <name>MYSQL_SERVER</name>
-            <displayName>MySQL Server</displayName>
-            <category>MASTER</category>
-            <deleted>false</deleted>
-            <cardinality>0-1</cardinality>
-            <versionAdvertised>false</versionAdvertised>
-            <versionAdvertisedInternal>false</versionAdvertisedInternal>
-            <commandScript>
-                <script>scripts/mysql_server.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>0</timeout>
-            </commandScript>
-            <customCommands>
-                <customCommand>
-                    <name>CLEAN</name>
-                    <commandScript>
-                        <script>scripts/mysql_server.py</script>
-                        <scriptType>PYTHON</scriptType>
-                        <timeout>600</timeout>
-                    </commandScript>
-                    <background>false</background>
-                </customCommand>
-            </customCommands>
-            <recovery_enabled>false</recovery_enabled>
-            <reassignAllowed>true</reassignAllowed>
-        </component>
-        <component>
-            <name>HIVE_METASTORE</name>
-            <displayName>Hive Metastore</displayName>
-            <category>MASTER</category>
-            <deleted>false</deleted>
-            <cardinality>1+</cardinality>
-            <versionAdvertisedInternal>false</versionAdvertisedInternal>
-            <commandScript>
-                <script>scripts/hive_metastore.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>1200</timeout>
-            </commandScript>
-            <logs>
-                <log>
-                    <logId>hive_metastore</logId>
-                    <primary>true</primary>
-                </log>
-            </logs>
-            <configuration-dependencies>
-                <config-type>hive-site</config-type>
-            </configuration-dependencies>
-            <auto-deploy>
-                <co-locate>HIVE/HIVE_SERVER</co-locate>
-                <coLocate>HIVE/HIVE_SERVER</coLocate>
-                <enabled>true</enabled>
-            </auto-deploy>
-            <recovery_enabled>false</recovery_enabled>
-            <reassignAllowed>true</reassignAllowed>
-        </component>
-    </components>
-    <configuration-dependencies>
-        <config-type>hive-log4j</config-type>
-        <config-type>hive-exec-log4j</config-type>
-        <config-type>tez-site</config-type>
-        <config-type>hive-env</config-type>
-        <config-type>hivemetastore-site.xml</config-type>
-        <config-type>webhcat-site</config-type>
-        <config-type>webhcat-env</config-type>
-        <config-type>mapred-site</config-type>
-    </configuration-dependencies>
-    <widgetsFileName>widgets.json</widgetsFileName>
-    <metricsFileName>metrics.json</metricsFileName>
-    <osSpecifics>
-        <osSpecific>
-            <osFamily>any</osFamily>
-            <packages>
-                <package>
-                    <name>mysql-connector-java</name>
-                    <condition>should_install_mysl_connector</condition>
-                    <skipUpgrade>true</skipUpgrade>
-                </package>
-            </packages>
-        </osSpecific>
-        <osSpecific>
-            <osFamily>redhat7,amazon2015,redhat6,suse11,suse12</osFamily>
-            <packages>
-                <package>
-                    <name>hive</name>
-                    <skipUpgrade>false</skipUpgrade>
-                </package>
-                <package>
-                    <name>hive-hcatalog</name>
-                    <skipUpgrade>false</skipUpgrade>
-                </package>
-                <package>
-                    <name>hive-webhcat</name>
-                    <skipUpgrade>false</skipUpgrade>
-                </package>
-            </packages>
-        </osSpecific>
-        <osSpecific>
-            <osFamily>amazon2015,redhat6,suse11,suse12</osFamily>
-            <packages>
-                <package>
-                    <name>mysql</name>
-                    <condition>should_install_mysql</condition>
-                    <skipUpgrade>true</skipUpgrade>
-                </package>
-            </packages>
-        </osSpecific>
-        <osSpecific>
-            <osFamily>amazon2015,redhat6,debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
-            <packages>
-                <package>
-                    <name>mysql-server</name>
-                    <condition>should_install_mysql</condition>
-                    <skipUpgrade>true</skipUpgrade>
-                </package>
-            </packages>
-        </osSpecific>
-    </osSpecifics>
-    <configuration-dir>configuration</configuration-dir>
-    <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-    </commandScript>
-    <requiredServices>
-        <service>ZOOKEEPER</service>
-        <service>HDFS</service>
-        <service>YARN</service>
-    </requiredServices>
-</service></services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_hive_interactive_thrift_port.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_hive_interactive_thrift_port.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_hive_interactive_thrift_port.py
deleted file mode 100755
index 6917160..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_hive_interactive_thrift_port.py
+++ /dev/null
@@ -1,216 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import os
-import socket
-import time
-import logging
-import traceback
-from resource_management.libraries.functions import hive_check
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions import get_kinit_path
-from ambari_commons.os_check import OSConst
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-
-OK_MESSAGE = "TCP OK - {0:.3f}s response on port {1}"
-CRITICAL_MESSAGE = "Connection failed on host {0}:{1} ({2})"
-
-HIVE_SERVER_INTERACTIVE_THRIFT_PORT_KEY = '{{hive-interactive-site/hive.server2.thrift.port}}'
-HIVE_SERVER_INTERACTIVE_THRIFT_HTTP_PORT_KEY = '{{hive-interactive-site/hive.server2.thrift.http.port}}'
-HIVE_SERVER_INTERACTIVE_TRANSPORT_MODE_KEY = '{{hive-site/hive.server2.transport.mode}}'
-SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
-HIVE_SERVER2_INTERACTIVE_AUTHENTICATION_KEY = '{{hive-interactive-site/hive.server2.authentication}}'
-HIVE_SERVER2_AUTHENTICATION_KEY = '{{hive-site/hive.server2.authentication}}'
-HIVE_SERVER_INTERACTIVE_PRINCIPAL_KEY = '{{hive-site/hive.server2.authentication.kerberos.principal}}'
-SMOKEUSER_KEYTAB_KEY = '{{cluster-env/smokeuser_keytab}}'
-SMOKEUSER_PRINCIPAL_KEY = '{{cluster-env/smokeuser_principal_name}}'
-SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
-HIVE_SSL = '{{hive-site/hive.server2.use.SSL}}'
-HIVE_SSL_KEYSTORE_PATH = '{{hive-site/hive.server2.keystore.path}}'
-HIVE_SSL_KEYSTORE_PASSWORD = '{{hive-site/hive.server2.keystore.password}}'
-
-# The configured Kerberos executable search paths, if any
-KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
-
-THRIFT_PORT_DEFAULT = 10500
-HIVE_SERVER_INTERACTIVE_TRANSPORT_MODE_DEFAULT = 'binary'
-HIVE_SERVER_INTERACTIVE_PRINCIPAL_DEFAULT = 'hive/_HOST@EXAMPLE.COM'
-HIVE_SERVER2_INTERACTIVE_AUTHENTICATION_DEFAULT = 'NOSASL'
-
-# default keytab location
-SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY = 'default.smoke.keytab'
-SMOKEUSER_KEYTAB_DEFAULT = '/etc/security/keytabs/smokeuser.headless.keytab'
-
-# default smoke principal
-SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY = 'default.smoke.principal'
-SMOKEUSER_PRINCIPAL_DEFAULT = 'ambari-qa@EXAMPLE.COM'
-
-# default smoke user
-SMOKEUSER_SCRIPT_PARAM_KEY = 'default.smoke.user'
-SMOKEUSER_DEFAULT = 'ambari-qa'
-
-HADOOPUSER_KEY = '{{cluster-env/hadoop.user.name}}'
-HADOOPUSER_DEFAULT = 'hadoop'
-
-CHECK_COMMAND_TIMEOUT_KEY = 'check.command.timeout'
-CHECK_COMMAND_TIMEOUT_DEFAULT = 60.0
-
-logger = logging.getLogger('ambari_alerts')
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return (HIVE_SERVER_INTERACTIVE_THRIFT_PORT_KEY, SECURITY_ENABLED_KEY, SMOKEUSER_KEY,
-          HIVE_SERVER2_INTERACTIVE_AUTHENTICATION_KEY, HIVE_SERVER2_AUTHENTICATION_KEY,
-          HIVE_SERVER_INTERACTIVE_PRINCIPAL_KEY, SMOKEUSER_KEYTAB_KEY, SMOKEUSER_PRINCIPAL_KEY,
-          HIVE_SERVER_INTERACTIVE_THRIFT_HTTP_PORT_KEY, HIVE_SERVER_INTERACTIVE_TRANSPORT_MODE_KEY,
-          KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY, HIVE_SSL, HIVE_SSL_KEYSTORE_PATH, HIVE_SSL_KEYSTORE_PASSWORD)
-
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def get_tokens():
-  pass
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def execute(configurations={}, parameters={}, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  configurations (dictionary): a mapping of configuration key to value
-  parameters (dictionary): a mapping of script parameter key to value
-  host_name (string): the name of this host where the alert is running
-  """
-
-  if configurations is None:
-    return ('UNKNOWN', ['There were no configurations supplied to the script.'])
-
-  transport_mode = HIVE_SERVER_INTERACTIVE_TRANSPORT_MODE_DEFAULT
-  if HIVE_SERVER_INTERACTIVE_TRANSPORT_MODE_KEY in configurations:
-    transport_mode = configurations[HIVE_SERVER_INTERACTIVE_TRANSPORT_MODE_KEY]
-
-  port = THRIFT_PORT_DEFAULT
-  if transport_mode.lower() == 'binary' and HIVE_SERVER_INTERACTIVE_THRIFT_PORT_KEY in configurations:
-    port = int(configurations[HIVE_SERVER_INTERACTIVE_THRIFT_PORT_KEY])
-  elif transport_mode.lower() == 'http' and HIVE_SERVER_INTERACTIVE_THRIFT_HTTP_PORT_KEY in configurations:
-    port = int(configurations[HIVE_SERVER_INTERACTIVE_THRIFT_HTTP_PORT_KEY])
-
-  security_enabled = False
-  if SECURITY_ENABLED_KEY in configurations:
-    security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
-
-  check_command_timeout = CHECK_COMMAND_TIMEOUT_DEFAULT
-  if CHECK_COMMAND_TIMEOUT_KEY in parameters:
-    check_command_timeout = float(parameters[CHECK_COMMAND_TIMEOUT_KEY])
-
-  hive_server2_authentication = HIVE_SERVER2_INTERACTIVE_AUTHENTICATION_DEFAULT
-  if HIVE_SERVER2_INTERACTIVE_AUTHENTICATION_KEY in configurations:
-    hive_server2_authentication = configurations[HIVE_SERVER2_INTERACTIVE_AUTHENTICATION_KEY]
-  elif HIVE_SERVER2_AUTHENTICATION_KEY in configurations:
-    hive_server2_authentication = configurations[HIVE_SERVER2_AUTHENTICATION_KEY]
-
-  hive_ssl = False
-  if HIVE_SSL in configurations:
-    hive_ssl = configurations[HIVE_SSL]
-
-  hive_ssl_keystore_path = None
-  if HIVE_SSL_KEYSTORE_PATH in configurations:
-    hive_ssl_keystore_path = configurations[HIVE_SSL_KEYSTORE_PATH]
-
-  hive_ssl_keystore_password = None
-  if HIVE_SSL_KEYSTORE_PASSWORD in configurations:
-    hive_ssl_keystore_password = configurations[HIVE_SSL_KEYSTORE_PASSWORD]
-
-  # defaults
-  smokeuser_keytab = SMOKEUSER_KEYTAB_DEFAULT
-  smokeuser_principal = SMOKEUSER_PRINCIPAL_DEFAULT
-  smokeuser = SMOKEUSER_DEFAULT
-
-  # check script params
-  if SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY in parameters:
-    smokeuser_principal = parameters[SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY]
-
-  if SMOKEUSER_SCRIPT_PARAM_KEY in parameters:
-    smokeuser = parameters[SMOKEUSER_SCRIPT_PARAM_KEY]
-
-  if SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY in parameters:
-    smokeuser_keytab = parameters[SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY]
-
-
-  # check configurations last as they should always take precedence
-  if SMOKEUSER_PRINCIPAL_KEY in configurations:
-    smokeuser_principal = configurations[SMOKEUSER_PRINCIPAL_KEY]
-
-  if SMOKEUSER_KEY in configurations:
-    smokeuser = configurations[SMOKEUSER_KEY]
-
-  result_code = None
-
-  if security_enabled:
-    hive_server_principal = HIVE_SERVER_INTERACTIVE_PRINCIPAL_DEFAULT
-    if HIVE_SERVER_INTERACTIVE_PRINCIPAL_KEY in configurations:
-      hive_server_principal = configurations[HIVE_SERVER_INTERACTIVE_PRINCIPAL_KEY]
-
-    if SMOKEUSER_KEYTAB_KEY in configurations:
-      smokeuser_keytab = configurations[SMOKEUSER_KEYTAB_KEY]
-
-    # Get the configured Kerberos executable search paths, if any
-    if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
-      kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
-    else:
-      kerberos_executable_search_paths = None
-
-    kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
-    kinitcmd=format("{kinit_path_local} -kt {smokeuser_keytab} {smokeuser_principal}; ")
-  else:
-    hive_server_principal = None
-    kinitcmd=None
-
-  try:
-    if host_name is None:
-      host_name = socket.getfqdn()
-
-    start_time = time.time()
-
-    try:
-      hive_check.check_thrift_port_sasl(host_name, port, hive_server2_authentication, hive_server_principal,
-                                        kinitcmd, smokeuser, transport_mode=transport_mode, ssl=hive_ssl,
-                                        ssl_keystore=hive_ssl_keystore_path, ssl_password=hive_ssl_keystore_password,
-                                        check_command_timeout=int(check_command_timeout))
-      result_code = 'OK'
-      total_time = time.time() - start_time
-      label = OK_MESSAGE.format(total_time, port)
-    except:
-      result_code = 'CRITICAL'
-      label = CRITICAL_MESSAGE.format(host_name, port, traceback.format_exc())
-
-  except:
-    label = traceback.format_exc()
-    result_code = 'UNKNOWN'
-
-  return (result_code, [label])
-
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def execute(configurations={}, parameters={}, host_name=None):
-  pass
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_hive_metastore.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_hive_metastore.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_hive_metastore.py
deleted file mode 100755
index e02ed5a..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_hive_metastore.py
+++ /dev/null
@@ -1,270 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import os
-import socket
-import time
-import traceback
-import logging
-
-from resource_management.core import global_lock
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions import get_kinit_path
-from resource_management.core.resources import Execute
-from ambari_commons.os_check import OSConst
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-
-
-OK_MESSAGE = "Metastore OK - Hive command took {0:.3f}s"
-CRITICAL_MESSAGE = "Metastore on {0} failed ({1})"
-SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
-SMOKEUSER_KEYTAB_KEY = '{{cluster-env/smokeuser_keytab}}'
-SMOKEUSER_PRINCIPAL_KEY = '{{cluster-env/smokeuser_principal_name}}'
-SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
-HIVE_METASTORE_URIS_KEY = '{{hive-site/hive.metastore.uris}}'
-
-# The configured Kerberos executable search paths, if any
-KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
-
-# default keytab location
-SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY = 'default.smoke.keytab'
-SMOKEUSER_KEYTAB_DEFAULT = '/etc/security/keytabs/smokeuser.headless.keytab'
-
-# default smoke principal
-SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY = 'default.smoke.principal'
-SMOKEUSER_PRINCIPAL_DEFAULT = 'ambari-qa@EXAMPLE.COM'
-
-# default smoke user
-SMOKEUSER_SCRIPT_PARAM_KEY = 'default.smoke.user'
-SMOKEUSER_DEFAULT = 'ambari-qa'
-
-STACK_ROOT = '{{cluster-env/stack_root}}'
-
-HIVE_CONF_DIR_LEGACY = '/etc/hive/conf.server'
-
-HIVE_BIN_DIR_LEGACY = '/usr/lib/hive/bin'
-
-CHECK_COMMAND_TIMEOUT_KEY = 'check.command.timeout'
-CHECK_COMMAND_TIMEOUT_DEFAULT = 60.0
-
-HADOOPUSER_KEY = '{{cluster-env/hadoop.user.name}}'
-HADOOPUSER_DEFAULT = 'hadoop'
-
-logger = logging.getLogger('ambari_alerts')
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return (SECURITY_ENABLED_KEY,SMOKEUSER_KEYTAB_KEY,SMOKEUSER_PRINCIPAL_KEY,
-    HIVE_METASTORE_URIS_KEY, SMOKEUSER_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY,
-    STACK_ROOT)
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return (HIVE_METASTORE_URIS_KEY, HADOOPUSER_KEY)
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def execute(configurations={}, parameters={}, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  configurations (dictionary): a mapping of configuration key to value
-  parameters (dictionary): a mapping of script parameter key to value
-  host_name (string): the name of this host where the alert is running
-  """
-
-  if configurations is None:
-    return (('UNKNOWN', ['There were no configurations supplied to the script.']))
-
-  if not HIVE_METASTORE_URIS_KEY in configurations:
-    return (('UNKNOWN', ['Hive metastore uris were not supplied to the script.']))
-
-  metastore_uris = configurations[HIVE_METASTORE_URIS_KEY].split(',')
-
-  security_enabled = False
-  if SECURITY_ENABLED_KEY in configurations:
-    security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
-
-  check_command_timeout = CHECK_COMMAND_TIMEOUT_DEFAULT
-  if CHECK_COMMAND_TIMEOUT_KEY in parameters:
-    check_command_timeout = float(parameters[CHECK_COMMAND_TIMEOUT_KEY])
-
-  # defaults
-  smokeuser_keytab = SMOKEUSER_KEYTAB_DEFAULT
-  smokeuser_principal = SMOKEUSER_PRINCIPAL_DEFAULT
-  smokeuser = SMOKEUSER_DEFAULT
-
-  # check script params
-  if SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY in parameters:
-    smokeuser_principal = parameters[SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY]
-
-  if SMOKEUSER_SCRIPT_PARAM_KEY in parameters:
-    smokeuser = parameters[SMOKEUSER_SCRIPT_PARAM_KEY]
-
-  if SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY in parameters:
-    smokeuser_keytab = parameters[SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY]
-
-
-  # check configurations last as they should always take precedence
-  if SMOKEUSER_PRINCIPAL_KEY in configurations:
-    smokeuser_principal = configurations[SMOKEUSER_PRINCIPAL_KEY]
-
-  if SMOKEUSER_KEY in configurations:
-    smokeuser = configurations[SMOKEUSER_KEY]
-
-  result_code = None
-
-  try:
-    if security_enabled:
-      if SMOKEUSER_KEYTAB_KEY in configurations:
-        smokeuser_keytab = configurations[SMOKEUSER_KEYTAB_KEY]
-
-      # Get the configured Kerberos executable search paths, if any
-      if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
-        kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
-      else:
-        kerberos_executable_search_paths = None
-
-      kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
-      kinitcmd=format("{kinit_path_local} -kt {smokeuser_keytab} {smokeuser_principal}; ")
-
-      # prevent concurrent kinit
-      kinit_lock = global_lock.get_lock(global_lock.LOCK_TYPE_KERBEROS)
-      kinit_lock.acquire()
-      try:
-        Execute(kinitcmd, user=smokeuser,
-          path=["/bin/", "/usr/bin/", "/usr/lib/hive/bin/", "/usr/sbin/"],
-          timeout=10)
-      finally:
-        kinit_lock.release()
-
-    if host_name is None:
-      host_name = socket.getfqdn()
-
-    for uri in metastore_uris:
-      if host_name in uri:
-        metastore_uri = uri
-
-    conf_dir = HIVE_CONF_DIR_LEGACY
-    bin_dir = HIVE_BIN_DIR_LEGACY
-
-
-    if STACK_ROOT in configurations:
-      hive_conf_dir = configurations[STACK_ROOT] + format("/current/hive-metastore/conf/conf.server")
-      hive_bin_dir = configurations[STACK_ROOT] + format("/current/hive-metastore/bin")
-
-      if os.path.exists(hive_conf_dir):
-        conf_dir = hive_conf_dir
-        bin_dir = hive_bin_dir
-
-    cmd = format("export HIVE_CONF_DIR='{conf_dir}' ; "
-                 "hive --hiveconf hive.metastore.uris={metastore_uri}\
-                 --hiveconf hive.metastore.client.connect.retry.delay=1\
-                 --hiveconf hive.metastore.failure.retries=1\
-                 --hiveconf hive.metastore.connect.retries=1\
-                 --hiveconf hive.metastore.client.socket.timeout=14\
-                 --hiveconf hive.execution.engine=mr -e 'show databases;'")
-
-    start_time = time.time()
-
-    try:
-      Execute(cmd, user=smokeuser,
-        path=["/bin/", "/usr/bin/", "/usr/sbin/", bin_dir],
-        timeout=int(check_command_timeout) )
-
-      total_time = time.time() - start_time
-
-      result_code = 'OK'
-      label = OK_MESSAGE.format(total_time)
-    except:
-      result_code = 'CRITICAL'
-      label = CRITICAL_MESSAGE.format(host_name, traceback.format_exc())
-
-  except:
-    label = traceback.format_exc()
-    result_code = 'UNKNOWN'
-
-  return ((result_code, [label]))
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def execute(configurations={}, parameters={}, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  configurations (dictionary): a mapping of configuration key to value
-  parameters (dictionary): a mapping of script parameter key to value
-  host_name (string): the name of this host where the alert is running
-  """
-
-  from resource_management.libraries.functions import reload_windows_env
-  reload_windows_env()
-  hive_home = os.environ['HIVE_HOME']
-
-  if configurations is None:
-    return (('UNKNOWN', ['There were no configurations supplied to the script.']))
-  if not HIVE_METASTORE_URIS_KEY in configurations:
-    return (('UNKNOWN', ['Hive metastore uris were not supplied to the script.']))
-
-  metastore_uris = configurations[HIVE_METASTORE_URIS_KEY].split(',')
-
-  # defaults
-  hiveuser = HADOOPUSER_DEFAULT
-
-  if HADOOPUSER_KEY in configurations:
-    hiveuser = configurations[HADOOPUSER_KEY]
-
-  result_code = None
-  try:
-    if host_name is None:
-      host_name = socket.getfqdn()
-    for uri in metastore_uris:
-      if host_name in uri:
-        metastore_uri = uri
-
-    hive_cmd = os.path.join(hive_home, "bin", "hive.cmd")
-    cmd = format("cmd /c {hive_cmd} --hiveconf hive.metastore.uris={metastore_uri}\
-                 --hiveconf hive.metastore.client.connect.retry.delay=1\
-                 --hiveconf hive.metastore.failure.retries=1\
-                 --hiveconf hive.metastore.connect.retries=1\
-                 --hiveconf hive.metastore.client.socket.timeout=14\
-                 --hiveconf hive.execution.engine=mr -e 'show databases;'")
-    start_time = time.time()
-    try:
-      Execute(cmd, user=hiveuser, timeout=30)
-      total_time = time.time() - start_time
-      result_code = 'OK'
-      label = OK_MESSAGE.format(total_time)
-    except:
-      result_code = 'CRITICAL'
-      label = CRITICAL_MESSAGE.format(host_name, traceback.format_exc())
-  except:
-    label = traceback.format_exc()
-    result_code = 'UNKNOWN'
-
-  return ((result_code, [label]))

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_hive_thrift_port.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_hive_thrift_port.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_hive_thrift_port.py
deleted file mode 100755
index 32da1cc..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_hive_thrift_port.py
+++ /dev/null
@@ -1,274 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import os
-import socket
-import time
-import logging
-import traceback
-from resource_management.libraries.functions import hive_check
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions import get_kinit_path
-from ambari_commons.os_check import OSConst
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-
-OK_MESSAGE = "TCP OK - {0:.3f}s response on port {1}"
-CRITICAL_MESSAGE = "Connection failed on host {0}:{1} ({2})"
-
-HIVE_SERVER_THRIFT_PORT_KEY = '{{hive-site/hive.server2.thrift.port}}'
-HIVE_SERVER_THRIFT_HTTP_PORT_KEY = '{{hive-site/hive.server2.thrift.http.port}}'
-HIVE_SERVER_TRANSPORT_MODE_KEY = '{{hive-site/hive.server2.transport.mode}}'
-SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
-HIVE_SERVER2_AUTHENTICATION_KEY = '{{hive-site/hive.server2.authentication}}'
-HIVE_SERVER_PRINCIPAL_KEY = '{{hive-site/hive.server2.authentication.kerberos.principal}}'
-SMOKEUSER_KEYTAB_KEY = '{{cluster-env/smokeuser_keytab}}'
-SMOKEUSER_PRINCIPAL_KEY = '{{cluster-env/smokeuser_principal_name}}'
-SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
-HIVE_SSL = '{{hive-site/hive.server2.use.SSL}}'
-HIVE_SSL_KEYSTORE_PATH = '{{hive-site/hive.server2.keystore.path}}'
-HIVE_SSL_KEYSTORE_PASSWORD = '{{hive-site/hive.server2.keystore.password}}'
-
-# The configured Kerberos executable search paths, if any
-KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
-
-THRIFT_PORT_DEFAULT = 10000
-HIVE_SERVER_TRANSPORT_MODE_DEFAULT = 'binary'
-HIVE_SERVER_PRINCIPAL_DEFAULT = 'hive/_HOST@EXAMPLE.COM'
-HIVE_SERVER2_AUTHENTICATION_DEFAULT = 'NOSASL'
-
-# default keytab location
-SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY = 'default.smoke.keytab'
-SMOKEUSER_KEYTAB_DEFAULT = '/etc/security/keytabs/smokeuser.headless.keytab'
-
-# default smoke principal
-SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY = 'default.smoke.principal'
-SMOKEUSER_PRINCIPAL_DEFAULT = 'ambari-qa@EXAMPLE.COM'
-
-# default smoke user
-SMOKEUSER_SCRIPT_PARAM_KEY = 'default.smoke.user'
-SMOKEUSER_DEFAULT = 'ambari-qa'
-
-HADOOPUSER_KEY = '{{cluster-env/hadoop.user.name}}'
-HADOOPUSER_DEFAULT = 'hadoop'
-
-CHECK_COMMAND_TIMEOUT_KEY = 'check.command.timeout'
-CHECK_COMMAND_TIMEOUT_DEFAULT = 60.0
-
-logger = logging.getLogger('ambari_alerts')
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return (HIVE_SERVER_THRIFT_PORT_KEY, SECURITY_ENABLED_KEY, SMOKEUSER_KEY,
-          HIVE_SERVER2_AUTHENTICATION_KEY, HIVE_SERVER_PRINCIPAL_KEY,
-          SMOKEUSER_KEYTAB_KEY, SMOKEUSER_PRINCIPAL_KEY, HIVE_SERVER_THRIFT_HTTP_PORT_KEY,
-          HIVE_SERVER_TRANSPORT_MODE_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY, HIVE_SSL,
-          HIVE_SSL_KEYSTORE_PATH, HIVE_SSL_KEYSTORE_PASSWORD)
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return (HIVE_SERVER_THRIFT_PORT_KEY, HIVE_SERVER_THRIFT_HTTP_PORT_KEY,
-          HIVE_SERVER_TRANSPORT_MODE_KEY, HADOOPUSER_KEY)
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def execute(configurations={}, parameters={}, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  configurations (dictionary): a mapping of configuration key to value
-  parameters (dictionary): a mapping of script parameter key to value
-  host_name (string): the name of this host where the alert is running
-  """
-
-  if configurations is None:
-    return ('UNKNOWN', ['There were no configurations supplied to the script.'])
-
-  transport_mode = HIVE_SERVER_TRANSPORT_MODE_DEFAULT
-  if HIVE_SERVER_TRANSPORT_MODE_KEY in configurations:
-    transport_mode = configurations[HIVE_SERVER_TRANSPORT_MODE_KEY]
-
-  port = THRIFT_PORT_DEFAULT
-  if transport_mode.lower() == 'binary' and HIVE_SERVER_THRIFT_PORT_KEY in configurations:
-    port = int(configurations[HIVE_SERVER_THRIFT_PORT_KEY])
-  elif transport_mode.lower() == 'http' and HIVE_SERVER_THRIFT_HTTP_PORT_KEY in configurations:
-    port = int(configurations[HIVE_SERVER_THRIFT_HTTP_PORT_KEY])
-
-  security_enabled = False
-  if SECURITY_ENABLED_KEY in configurations:
-    security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
-
-  check_command_timeout = CHECK_COMMAND_TIMEOUT_DEFAULT
-  if CHECK_COMMAND_TIMEOUT_KEY in parameters:
-    check_command_timeout = float(parameters[CHECK_COMMAND_TIMEOUT_KEY])
-
-  hive_server2_authentication = HIVE_SERVER2_AUTHENTICATION_DEFAULT
-  if HIVE_SERVER2_AUTHENTICATION_KEY in configurations:
-    hive_server2_authentication = configurations[HIVE_SERVER2_AUTHENTICATION_KEY]
-
-  hive_ssl = False
-  if HIVE_SSL in configurations:
-    hive_ssl = configurations[HIVE_SSL]
-
-  hive_ssl_keystore_path = None
-  if HIVE_SSL_KEYSTORE_PATH in configurations:
-    hive_ssl_keystore_path = configurations[HIVE_SSL_KEYSTORE_PATH]
-
-  hive_ssl_keystore_password = None
-  if HIVE_SSL_KEYSTORE_PASSWORD in configurations:
-    hive_ssl_keystore_password = configurations[HIVE_SSL_KEYSTORE_PASSWORD]
-
-  # defaults
-  smokeuser_keytab = SMOKEUSER_KEYTAB_DEFAULT
-  smokeuser_principal = SMOKEUSER_PRINCIPAL_DEFAULT
-  smokeuser = SMOKEUSER_DEFAULT
-
-  # check script params
-  if SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY in parameters:
-    smokeuser_principal = parameters[SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY]
-
-  if SMOKEUSER_SCRIPT_PARAM_KEY in parameters:
-    smokeuser = parameters[SMOKEUSER_SCRIPT_PARAM_KEY]
-
-  if SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY in parameters:
-    smokeuser_keytab = parameters[SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY]
-
-
-  # check configurations last as they should always take precedence
-  if SMOKEUSER_PRINCIPAL_KEY in configurations:
-    smokeuser_principal = configurations[SMOKEUSER_PRINCIPAL_KEY]
-
-  if SMOKEUSER_KEY in configurations:
-    smokeuser = configurations[SMOKEUSER_KEY]
-
-  result_code = None
-
-  if security_enabled:
-    hive_server_principal = HIVE_SERVER_PRINCIPAL_DEFAULT
-    if HIVE_SERVER_PRINCIPAL_KEY in configurations:
-      hive_server_principal = configurations[HIVE_SERVER_PRINCIPAL_KEY]
-
-    if SMOKEUSER_KEYTAB_KEY in configurations:
-      smokeuser_keytab = configurations[SMOKEUSER_KEYTAB_KEY]
-
-    # Get the configured Kerberos executable search paths, if any
-    if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
-      kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
-    else:
-      kerberos_executable_search_paths = None
-
-    kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
-    kinitcmd=format("{kinit_path_local} -kt {smokeuser_keytab} {smokeuser_principal}; ")
-  else:
-    hive_server_principal = None
-    kinitcmd=None
-
-  try:
-    if host_name is None:
-      host_name = socket.getfqdn()
-
-    start_time = time.time()
-
-    try:
-      hive_check.check_thrift_port_sasl(host_name, port, hive_server2_authentication, hive_server_principal,
-                                        kinitcmd, smokeuser, transport_mode=transport_mode, ssl=hive_ssl,
-                                        ssl_keystore=hive_ssl_keystore_path, ssl_password=hive_ssl_keystore_password,
-                                        check_command_timeout=int(check_command_timeout))
-      result_code = 'OK'
-      total_time = time.time() - start_time
-      label = OK_MESSAGE.format(total_time, port)
-    except:
-      result_code = 'CRITICAL'
-      label = CRITICAL_MESSAGE.format(host_name, port, traceback.format_exc())
-
-  except:
-    label = traceback.format_exc()
-    result_code = 'UNKNOWN'
-
-  return (result_code, [label])
-
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def execute(configurations={}, parameters={}, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  configurations (dictionary): a mapping of configuration key to value
-  parameters (dictionary): a mapping of script parameter key to value
-  host_name (string): the name of this host where the alert is running
-  """
-
-  from resource_management.libraries.functions import reload_windows_env
-  from resource_management.core.resources import Execute
-  reload_windows_env()
-  hive_home = os.environ['HIVE_HOME']
-
-  if configurations is None:
-    return ('UNKNOWN', ['There were no configurations supplied to the script.'])
-
-  transport_mode = HIVE_SERVER_TRANSPORT_MODE_DEFAULT
-  if HIVE_SERVER_TRANSPORT_MODE_KEY in configurations:
-    transport_mode = configurations[HIVE_SERVER_TRANSPORT_MODE_KEY]
-
-  port = THRIFT_PORT_DEFAULT
-  if transport_mode.lower() == 'binary' and HIVE_SERVER_THRIFT_PORT_KEY in configurations:
-    port = int(configurations[HIVE_SERVER_THRIFT_PORT_KEY])
-  elif transport_mode.lower() == 'http' and HIVE_SERVER_THRIFT_HTTP_PORT_KEY in configurations:
-    port = int(configurations[HIVE_SERVER_THRIFT_HTTP_PORT_KEY])
-
-  hiveuser = HADOOPUSER_DEFAULT
-  if HADOOPUSER_KEY in configurations:
-    hiveuser = configurations[HADOOPUSER_KEY]
-
-  result_code = None
-  try:
-    if host_name is None:
-      host_name = socket.getfqdn()
-
-    beeline_url = ['jdbc:hive2://{host_name}:{port}/', "transportMode={transport_mode}"]
-    # append url according to used transport
-    if transport_mode == "http":
-      beeline_url.append('httpPath=cliservice')
-    beeline_url_string = format(";".join(beeline_url))
-    beeline_cmd = os.path.join(hive_home, "bin", "beeline.cmd")
-    cmd = format("cmd /c {beeline_cmd} -u {beeline_url_string} -e '' 2>&1 | findstr Connected")
-
-    start_time = time.time()
-    try:
-      Execute(cmd, user=hiveuser, timeout=30)
-      total_time = time.time() - start_time
-      result_code = 'OK'
-      label = OK_MESSAGE.format(total_time, port)
-    except:
-      result_code = 'CRITICAL'
-      label = CRITICAL_MESSAGE.format(host_name, port, traceback.format_exc())
-  except:
-    label = traceback.format_exc()
-    result_code = 'UNKNOWN'
-
-  return (result_code, [label])

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_llap_app_status.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_llap_app_status.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_llap_app_status.py
deleted file mode 100755
index 095be3f..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_llap_app_status.py
+++ /dev/null
@@ -1,299 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import time
-import logging
-import traceback
-import json
-import subprocess
-
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions import get_kinit_path
-from ambari_commons.os_check import OSConst
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-from resource_management.core import shell
-from resource_management.core.resources import Execute
-from resource_management.core import global_lock
-from resource_management.core.exceptions import Fail
-from resource_management.libraries.script.script import Script
-
-OK_MESSAGE = "The application reported a '{0}' state in {1:.3f}s"
-MESSAGE_WITH_STATE_AND_INSTANCES = "The application reported a '{0}' state in {1:.3f}s. [Live: {2}, Desired: {3}]"
-CRITICAL_MESSAGE_WITH_STATE = "The application reported a '{0}' state. Check took {1:.3f}s"
-CRITICAL_MESSAGE = "Application information could not be retrieved"
-
-# results codes
-CRITICAL_RESULT_CODE = 'CRITICAL'
-OK_RESULT_CODE = 'OK'
-UKNOWN_STATUS_CODE = 'UNKNOWN'
-
-
-SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
-
-HIVE_PRINCIPAL_KEY = '{{hive-interactive-site/hive.llap.zk.sm.principal}}'
-HIVE_PRINCIPAL_DEFAULT = 'default.hive.principal'
-
-HIVE_PRINCIPAL_KEYTAB_KEY = '{{hive-interactive-site/hive.llap.zk.sm.keytab.file}}'
-HIVE_PRINCIPAL_KEYTAB_DEFAULT = 'default.hive.keytab'
-
-HIVE_AUTHENTICATION_DEFAULT = 'NOSASL'
-
-HIVE_USER_KEY = '{{hive-env/hive_user}}'
-HIVE_USER_DEFAULT = 'default.smoke.user'
-
-STACK_ROOT = '{{cluster-env/stack_root}}'
-STACK_ROOT_DEFAULT = Script.get_stack_root()
-
-LLAP_APP_NAME_KEY = '{{hive-interactive-env/llap_app_name}}'
-LLAP_APP_NAME_DEFAULT = 'llap0'
-
-# The configured Kerberos executable search paths, if any
-KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
-
-
-CHECK_COMMAND_TIMEOUT_KEY = 'check.command.timeout'
-CHECK_COMMAND_TIMEOUT_DEFAULT = 120.0
-
-
-# Mapping of LLAP app states to 'user friendly' state names.
-llap_app_state_dict = {'RUNNING_ALL': 'RUNNING',
-                       'RUNNING_PARTIAL': 'RUNNING',
-                       'COMPLETE': 'NOT RUNNING',
-                       'LAUNCHING': 'LAUNCHING',
-                       'APP_NOT_FOUND': 'APP NOT FOUND'}
-
-logger = logging.getLogger('ambari_alerts')
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return (SECURITY_ENABLED_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY, HIVE_PRINCIPAL_KEY, HIVE_PRINCIPAL_KEYTAB_KEY,
-          HIVE_USER_KEY, STACK_ROOT, LLAP_APP_NAME_KEY)
-
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def execute(configurations={}, parameters={}, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  configurations (dictionary): a mapping of configuration key to value
-  parameters (dictionary): a mapping of script parameter key to value
-  host_name (string): the name of this host where the alert is running
-  """
-
-  LLAP_APP_STATUS_CMD_TIMEOUT = 0
-
-  if configurations is None:
-    return ('UNKNOWN', ['There were no configurations supplied to the script.'])
-
-  result_code = None
-
-  try:
-    security_enabled = False
-    if SECURITY_ENABLED_KEY in configurations:
-      security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
-
-    check_command_timeout = CHECK_COMMAND_TIMEOUT_DEFAULT
-    if CHECK_COMMAND_TIMEOUT_KEY in configurations:
-      check_command_timeout = int(parameters[CHECK_COMMAND_TIMEOUT_KEY])
-
-    hive_user = HIVE_USER_DEFAULT
-    if HIVE_USER_KEY in configurations:
-      hive_user = configurations[HIVE_USER_KEY]
-
-    llap_app_name = LLAP_APP_NAME_DEFAULT
-    if LLAP_APP_NAME_KEY in configurations:
-      llap_app_name = configurations[LLAP_APP_NAME_KEY]
-
-    if security_enabled:
-      if HIVE_PRINCIPAL_KEY in configurations:
-        llap_principal = configurations[HIVE_PRINCIPAL_KEY]
-      else:
-        llap_principal = HIVE_PRINCIPAL_DEFAULT
-      llap_principal = llap_principal.replace('_HOST',host_name.lower())
-
-      llap_keytab = HIVE_PRINCIPAL_KEYTAB_DEFAULT
-      if HIVE_PRINCIPAL_KEYTAB_KEY in configurations:
-        llap_keytab = configurations[HIVE_PRINCIPAL_KEYTAB_KEY]
-
-      # Get the configured Kerberos executable search paths, if any
-      if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
-        kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
-      else:
-        kerberos_executable_search_paths = None
-
-      kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
-      kinitcmd=format("{kinit_path_local} -kt {llap_keytab} {llap_principal}; ")
-
-      # prevent concurrent kinit
-      kinit_lock = global_lock.get_lock(global_lock.LOCK_TYPE_KERBEROS)
-      kinit_lock.acquire()
-      try:
-        Execute(kinitcmd, user=hive_user,
-                path=["/bin/", "/usr/bin/", "/usr/lib/hive/bin/", "/usr/sbin/"],
-                timeout=10)
-      finally:
-        kinit_lock.release()
-
-
-
-    start_time = time.time()
-    if STACK_ROOT in configurations:
-      llap_status_cmd = configurations[STACK_ROOT] + format("/current/hive-server2-hive2/bin/hive --service llapstatus --name {llap_app_name}  --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
-    else:
-      llap_status_cmd = STACK_ROOT_DEFAULT + format("/current/hive-server2-hive2/bin/hive --service llapstatus --name {llap_app_name} --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
-
-    code, output, error = shell.checked_call(llap_status_cmd, user=hive_user, stderr=subprocess.PIPE,
-                                             timeout=check_command_timeout,
-                                             logoutput=False)
-    # Call for getting JSON
-    llap_app_info = make_valid_json(output)
-
-    if llap_app_info is None or 'state' not in llap_app_info:
-      alert_label = traceback.format_exc()
-      result_code = UKNOWN_STATUS_CODE
-      return (result_code, [alert_label])
-
-    retrieved_llap_app_state = llap_app_info['state'].upper()
-    if retrieved_llap_app_state in ['RUNNING_ALL']:
-      result_code = OK_RESULT_CODE
-      total_time = time.time() - start_time
-      alert_label = OK_MESSAGE.format(llap_app_state_dict.get(retrieved_llap_app_state, retrieved_llap_app_state), total_time)
-    elif retrieved_llap_app_state in ['RUNNING_PARTIAL']:
-      live_instances = 0
-      desired_instances = 0
-      percentInstancesUp = 0
-      percent_desired_instances_to_be_up = 80
-      # Get 'live' and 'desired' instances
-      if 'liveInstances' not in llap_app_info or 'desiredInstances' not in llap_app_info:
-        result_code = CRITICAL_RESULT_CODE
-        total_time = time.time() - start_time
-        alert_label = CRITICAL_MESSAGE_WITH_STATE.format(llap_app_state_dict.get(retrieved_llap_app_state, retrieved_llap_app_state), total_time)
-        return (result_code, [alert_label])
-
-      live_instances = llap_app_info['liveInstances']
-      desired_instances = llap_app_info['desiredInstances']
-      if live_instances < 0 or desired_instances <= 0:
-        result_code = CRITICAL_RESULT_CODE
-        total_time = time.time() - start_time
-        alert_label = CRITICAL_MESSAGE_WITH_STATE.format(llap_app_state_dict.get(retrieved_llap_app_state, retrieved_llap_app_state), total_time)
-        return (result_code, [alert_label])
-
-      percentInstancesUp = float(live_instances) / desired_instances * 100
-      if percentInstancesUp >= percent_desired_instances_to_be_up:
-        result_code = OK_RESULT_CODE
-        total_time = time.time() - start_time
-        alert_label = MESSAGE_WITH_STATE_AND_INSTANCES.format(llap_app_state_dict.get(retrieved_llap_app_state, retrieved_llap_app_state),
-                                                              total_time,
-                                                              llap_app_info['liveInstances'],
-                                                              llap_app_info['desiredInstances'])
-      else:
-        result_code = CRITICAL_RESULT_CODE
-        total_time = time.time() - start_time
-        alert_label = MESSAGE_WITH_STATE_AND_INSTANCES.format(llap_app_state_dict.get(retrieved_llap_app_state, retrieved_llap_app_state),
-                                                              total_time,
-                                                              llap_app_info['liveInstances'],
-                                                              llap_app_info['desiredInstances'])
-    else:
-      result_code = CRITICAL_RESULT_CODE
-      total_time = time.time() - start_time
-      alert_label = CRITICAL_MESSAGE_WITH_STATE.format(llap_app_state_dict.get(retrieved_llap_app_state, retrieved_llap_app_state), total_time)
-  except:
-    alert_label = traceback.format_exc()
-    traceback.format_exc()
-    result_code = UKNOWN_STATUS_CODE
-  return (result_code, [alert_label])
-
-
-"""
-Remove extra lines from 'llapstatus' status output (eg: because of MOTD logging) so as to have a valid JSON data to be passed in
-to JSON converter.
-"""
-def make_valid_json(output):
-  '''
-
-  Note: It is assumed right now that extra lines will be only at the start and not at the end.
-
-  Sample expected JSON to be passed for 'loads' is either of the form :
-
-  Case 'A':
-  {
-      "amInfo" : {
-      "appName" : "llap0",
-      "appType" : "org-apache-slider",
-      "appId" : "APP1",
-      "containerId" : "container_1466036628595_0010_01_000001",
-      "hostname" : "hostName",
-      "amWebUrl" : "http://hostName:port/"
-    },
-    "state" : "LAUNCHING",
-    ....
-    "desiredInstances" : 1,
-    "liveInstances" : 0,
-    ....
-    ....
-  }
-
-  or
-
-  Case 'B':
-  {
-    "state" : "APP_NOT_FOUND"
-  }
-
-  '''
-  splits = output.split("\n")
-
-  len_splits = len(splits)
-  if (len_splits < 3):
-    raise Fail("Malformed JSON data received from 'llapstatus' command. Exiting ....")
-
-  marker_idx = None  # To detect where from to start reading for JSON data
-  for idx, split in enumerate(splits):
-    curr_elem = split.strip()
-    if idx + 2 > len_splits:
-      raise Fail(
-        "Iterated over the received 'llapstatus' comamnd. Couldn't validate the received output for JSON parsing.")
-    next_elem = (splits[(idx + 1)]).strip()
-    if curr_elem == "{":
-      if next_elem == "\"amInfo\" : {" and (splits[len_splits - 1]).strip() == '}':
-        # For Case 'A'
-        marker_idx = idx
-        break;
-      elif idx + 3 == len_splits and next_elem.startswith('"state" : ') and (splits[idx + 2]).strip() == '}':
-        # For Case 'B'
-        marker_idx = idx
-        break;
-
-
-  # Remove extra logging from possible JSON output
-  if marker_idx is None:
-    raise Fail("Couldn't validate the received output for JSON parsing.")
-  else:
-    if marker_idx != 0:
-      del splits[0:marker_idx]
-
-  scanned_output = '\n'.join(splits)
-  llap_app_info = json.loads(scanned_output)
-  return llap_app_info
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_webhcat_server.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_webhcat_server.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_webhcat_server.py
deleted file mode 100755
index c9575c0..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_webhcat_server.py
+++ /dev/null
@@ -1,228 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-import socket
-import time
-import urllib2
-import traceback
-import logging
-
-from resource_management.core.environment import Environment
-from resource_management.libraries.functions.curl_krb_request import curl_krb_request
-from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
-from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
-
-
-RESULT_CODE_OK = "OK"
-RESULT_CODE_CRITICAL = "CRITICAL"
-RESULT_CODE_UNKNOWN = "UNKNOWN"
-
-OK_MESSAGE = "WebHCat status was OK ({0:.3f}s response from {1})"
-CRITICAL_CONNECTION_MESSAGE = "Connection failed to {0} + \n{1}"
-CRITICAL_HTTP_MESSAGE = "HTTP {0} response from {1} \n{2}"
-CRITICAL_WEBHCAT_STATUS_MESSAGE = 'WebHCat returned an unexpected status of "{0}"'
-CRITICAL_WEBHCAT_UNKNOWN_JSON_MESSAGE = "Unable to determine WebHCat health from unexpected JSON response"
-
-TEMPLETON_PORT_KEY = '{{webhcat-site/templeton.port}}'
-SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
-WEBHCAT_PRINCIPAL_KEY = '{{webhcat-site/templeton.kerberos.principal}}'
-WEBHCAT_KEYTAB_KEY = '{{webhcat-site/templeton.kerberos.keytab}}'
-
-SMOKEUSER_KEYTAB_KEY = '{{cluster-env/smokeuser_keytab}}'
-SMOKEUSER_PRINCIPAL_KEY = '{{cluster-env/smokeuser_principal_name}}'
-SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
-
-# The configured Kerberos executable search paths, if any
-KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
-
-WEBHCAT_OK_RESPONSE = 'ok'
-WEBHCAT_PORT_DEFAULT = 50111
-
-CONNECTION_TIMEOUT_KEY = 'connection.timeout'
-CONNECTION_TIMEOUT_DEFAULT = 5.0
-CURL_CONNECTION_TIMEOUT_DEFAULT = str(int(CONNECTION_TIMEOUT_DEFAULT))
-
-# default keytab location
-SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY = 'default.smoke.keytab'
-SMOKEUSER_KEYTAB_DEFAULT = '/etc/security/keytabs/smokeuser.headless.keytab'
-
-# default smoke principal
-SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY = 'default.smoke.principal'
-SMOKEUSER_PRINCIPAL_DEFAULT = 'ambari-qa@EXAMPLE.COM'
-
-# default smoke user
-SMOKEUSER_DEFAULT = 'ambari-qa'
-logger = logging.getLogger('ambari_alerts')
-
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return (TEMPLETON_PORT_KEY, SECURITY_ENABLED_KEY, SMOKEUSER_KEYTAB_KEY,SMOKEUSER_PRINCIPAL_KEY,
-          KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY, SMOKEUSER_KEY)
-
-
-def execute(configurations={}, parameters={}, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  configurations (dictionary): a mapping of configuration key to value
-  parameters (dictionary): a mapping of script parameter key to value
-  host_name (string): the name of this host where the alert is running
-  """
-
-  result_code = RESULT_CODE_UNKNOWN
-
-  if configurations is None:
-    return (result_code, ['There were no configurations supplied to the script.'])
-
-  webhcat_port = WEBHCAT_PORT_DEFAULT
-  if TEMPLETON_PORT_KEY in configurations:
-    webhcat_port = int(configurations[TEMPLETON_PORT_KEY])
-
-  security_enabled = False
-  if SECURITY_ENABLED_KEY in configurations:
-    security_enabled = configurations[SECURITY_ENABLED_KEY].lower() == 'true'
-
-  # parse script arguments
-  connection_timeout = CONNECTION_TIMEOUT_DEFAULT
-  curl_connection_timeout = CURL_CONNECTION_TIMEOUT_DEFAULT
-  if CONNECTION_TIMEOUT_KEY in parameters:
-    connection_timeout = float(parameters[CONNECTION_TIMEOUT_KEY])
-    curl_connection_timeout = str(int(connection_timeout))
-
-
-  # the alert will always run on the webhcat host
-  if host_name is None:
-    host_name = socket.getfqdn()
-
-  smokeuser = SMOKEUSER_DEFAULT
-
-  if SMOKEUSER_KEY in configurations:
-    smokeuser = configurations[SMOKEUSER_KEY]
-
-  # webhcat always uses http, never SSL
-  query_url = "http://{0}:{1}/templeton/v1/status?user.name={2}".format(host_name, webhcat_port, smokeuser)
-
-  # initialize
-  total_time = 0
-  json_response = {}
-
-  if security_enabled:
-    try:
-      # defaults
-      smokeuser_keytab = SMOKEUSER_KEYTAB_DEFAULT
-      smokeuser_principal = SMOKEUSER_PRINCIPAL_DEFAULT
-
-      # check script params
-      if SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY in parameters:
-        smokeuser_principal = parameters[SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY]
-      if SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY in parameters:
-        smokeuser_keytab = parameters[SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY]
-
-      # check configurations last as they should always take precedence
-      if SMOKEUSER_PRINCIPAL_KEY in configurations:
-        smokeuser_principal = configurations[SMOKEUSER_PRINCIPAL_KEY]
-      if SMOKEUSER_KEYTAB_KEY in configurations:
-        smokeuser_keytab = configurations[SMOKEUSER_KEYTAB_KEY]
-
-      # Get the configured Kerberos executable search paths, if any
-      kerberos_executable_search_paths = None
-      if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
-        kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
-
-      kinit_timer_ms = parameters.get(KERBEROS_KINIT_TIMER_PARAMETER, DEFAULT_KERBEROS_KINIT_TIMER_MS)
-
-      env = Environment.get_instance()
-      stdout, stderr, time_millis = curl_krb_request(env.tmp_dir, smokeuser_keytab, smokeuser_principal,
-        query_url, "webhcat_alert_cc_", kerberos_executable_search_paths, True,
-        "WebHCat Server Status", smokeuser, connection_timeout=curl_connection_timeout,
-        kinit_timer_ms = kinit_timer_ms)
-
-      # check the response code
-      response_code = int(stdout)
-
-      # 0 indicates no connection
-      if response_code == 0:
-        label = CRITICAL_CONNECTION_MESSAGE.format(query_url, traceback.format_exc())
-        return (RESULT_CODE_CRITICAL, [label])
-
-      # any other response aside from 200 is a problem
-      if response_code != 200:
-        label = CRITICAL_HTTP_MESSAGE.format(response_code, query_url, traceback.format_exc())
-        return (RESULT_CODE_CRITICAL, [label])
-
-      # now that we have the http status and it was 200, get the content
-      stdout, stderr, total_time = curl_krb_request(env.tmp_dir, smokeuser_keytab, smokeuser_principal,
-        query_url, "webhcat_alert_cc_", kerberos_executable_search_paths,
-        False, "WebHCat Server Status", smokeuser, connection_timeout=curl_connection_timeout,
-        kinit_timer_ms = kinit_timer_ms)
-
-      json_response = json.loads(stdout)
-    except:
-      return (RESULT_CODE_CRITICAL, [traceback.format_exc()])
-  else:
-    url_response = None
-
-    try:
-      # execute the query for the JSON that includes WebHCat status
-      start_time = time.time()
-      url_response = urllib2.urlopen(query_url, timeout=connection_timeout)
-      total_time = time.time() - start_time
-
-      json_response = json.loads(url_response.read())
-    except urllib2.HTTPError as httpError:
-      label = CRITICAL_HTTP_MESSAGE.format(httpError.code, query_url, traceback.format_exc())
-      return (RESULT_CODE_CRITICAL, [label])
-    except:
-      label = CRITICAL_CONNECTION_MESSAGE.format(query_url, traceback.format_exc())
-      return (RESULT_CODE_CRITICAL, [label])
-    finally:
-      if url_response is not None:
-        try:
-          url_response.close()
-        except:
-          pass
-
-
-  # if status is not in the response, we can't do any check; return CRIT
-  if 'status' not in json_response:
-    return (RESULT_CODE_CRITICAL, [CRITICAL_WEBHCAT_UNKNOWN_JSON_MESSAGE + str(json_response)])
-
-
-  # URL response received, parse it
-  try:
-    webhcat_status = json_response['status']
-  except:
-    return (RESULT_CODE_CRITICAL, [CRITICAL_WEBHCAT_UNKNOWN_JSON_MESSAGE + "\n" + traceback.format_exc()])
-
-
-  # proper JSON received, compare against known value
-  if webhcat_status.lower() == WEBHCAT_OK_RESPONSE:
-    result_code = RESULT_CODE_OK
-    label = OK_MESSAGE.format(total_time, query_url)
-  else:
-    result_code = RESULT_CODE_CRITICAL
-    label = CRITICAL_WEBHCAT_STATUS_MESSAGE.format(webhcat_status)
-
-  return (result_code, [label])


[02/52] bigtop git commit: Working around ODPI-186

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/mapreduce2_client.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/mapreduce2_client.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/mapreduce2_client.py
new file mode 100755
index 0000000..424157b
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/mapreduce2_client.py
@@ -0,0 +1,98 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+# Python imports
+import os
+import sys
+
+# Local imports
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.core.exceptions import ClientComponentHasNoStatus
+from yarn import yarn
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+from resource_management.core.logger import Logger
+
+
+class MapReduce2Client(Script):
+  def install(self, env):
+    import params
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env, config_dir=None, upgrade_type=None):
+    """
+    :param env: Python environment
+    :param config_dir: During rolling upgrade, which config directory to save configs to.
+    """
+    import params
+    env.set_params(params)
+    yarn(config_dir=config_dir)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+  def stack_upgrade_save_new_config(self, env):
+    """
+    Because this gets called during a Rolling Upgrade, the new mapreduce configs have already been saved, so we must be
+    careful to only call configure() on the directory of the new version.
+    :param env:
+    """
+    import params
+    env.set_params(params)
+
+    conf_select_name = "hadoop"
+    base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+    config_dir = self.get_config_dir_during_stack_upgrade(env, base_dir, conf_select_name)
+
+    if config_dir:
+      Logger.info("stack_upgrade_save_new_config(): Calling conf-select on %s using version %s" % (conf_select_name, str(params.version)))
+
+      # Because this script was called from ru_execute_tasks.py which already enters an Environment with its own basedir,
+      # must change it now so this function can find the Jinja Templates for the service.
+      env.config.basedir = base_dir
+      conf_select.select(params.stack_name, conf_select_name, params.version)
+      self.configure(env, config_dir=config_dir)
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class MapReduce2ClientWindows(MapReduce2Client):
+  pass
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class MapReduce2ClientDefault(MapReduce2Client):
+  def get_component_name(self):
+    return "hadoop-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-client", params.version)
+
+
+if __name__ == "__main__":
+  MapReduce2Client().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/nodemanager.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/nodemanager.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/nodemanager.py
new file mode 100755
index 0000000..b235cad
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/nodemanager.py
@@ -0,0 +1,161 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import nodemanager_upgrade
+
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+from yarn import yarn
+from service import service
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+
+class Nodemanager(Script):
+  def install(self, env):
+    self.install_packages(env)
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    service('nodemanager',action='stop')
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    service('nodemanager',action='start')
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn(name="nodemanager")
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class NodemanagerWindows(Nodemanager):
+  def status(self, env):
+    service('nodemanager', action='status')
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class NodemanagerDefault(Nodemanager):
+  def get_component_name(self):
+    return "hadoop-yarn-nodemanager"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing NodeManager Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-yarn-nodemanager", params.version)
+
+  def post_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing NodeManager Stack Upgrade post-restart")
+    import params
+    env.set_params(params)
+
+    nodemanager_upgrade.post_upgrade_check()
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.nodemanager_pid_file)
+
+  def security_status(self, env):
+    import status_params
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
+                           "yarn.acl.enable": "true"}
+      props_empty_check = ["yarn.nodemanager.principal",
+                           "yarn.nodemanager.keytab",
+                           "yarn.nodemanager.webapp.spnego-principal",
+                           "yarn.nodemanager.webapp.spnego-keytab-file"]
+
+      props_read_check = ["yarn.nodemanager.keytab",
+                          "yarn.nodemanager.webapp.spnego-keytab-file"]
+      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
+                                           props_read_check)
+
+      yarn_expectations ={}
+      yarn_expectations.update(yarn_site_props)
+
+      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
+                                                   {'yarn-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, yarn_site_props)
+      if not result_issues: # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ( 'yarn-site' not in security_params
+               or 'yarn.nodemanager.keytab' not in security_params['yarn-site']
+               or 'yarn.nodemanager.principal' not in security_params['yarn-site']) \
+            or 'yarn.nodemanager.webapp.spnego-keytab-file' not in security_params['yarn-site'] \
+            or 'yarn.nodemanager.webapp.spnego-principal' not in security_params['yarn-site']:
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.yarn_user,
+                                security_params['yarn-site']['yarn.nodemanager.keytab'],
+                                security_params['yarn-site']['yarn.nodemanager.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.yarn_user,
+                                security_params['yarn-site']['yarn.nodemanager.webapp.spnego-keytab-file'],
+                                security_params['yarn-site']['yarn.nodemanager.webapp.spnego-principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+  def get_log_folder(self):
+    import params
+    return params.yarn_log_dir
+  
+  def get_user(self):
+    import params
+    return params.yarn_user
+
+if __name__ == "__main__":
+  Nodemanager().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/nodemanager_upgrade.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/nodemanager_upgrade.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/nodemanager_upgrade.py
new file mode 100755
index 0000000..1c886f9
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/nodemanager_upgrade.py
@@ -0,0 +1,73 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import subprocess
+
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.core.resources.system import Execute
+from resource_management.core import shell
+from resource_management.libraries.functions.decorator import retry
+from resource_management.libraries.functions.show_logs import show_logs
+
+
+def post_upgrade_check():
+  '''
+  Checks that the NodeManager has rejoined the cluster.
+  This function will obtain the Kerberos ticket if security is enabled.
+  :return:
+  '''
+  import params
+
+  Logger.info('NodeManager executing "yarn node -list -states=RUNNING" to verify the node has rejoined the cluster...')
+  if params.security_enabled and params.nodemanager_kinit_cmd:
+    Execute(params.nodemanager_kinit_cmd, user=params.yarn_user)
+
+  try:
+    _check_nodemanager_startup()
+  except Fail:
+    show_logs(params.yarn_log_dir, params.yarn_user)
+    raise
+    
+
+@retry(times=30, sleep_time=10, err_class=Fail)
+def _check_nodemanager_startup():
+  '''
+  Checks that a NodeManager is in a RUNNING state in the cluster via
+  "yarn node -list -states=RUNNING" command. Once the NodeManager is found to be
+  alive this method will return, otherwise it will raise a Fail(...) and retry
+  automatically.
+  :return:
+  '''
+  import params
+  import socket
+
+  command = 'yarn node -list -states=RUNNING'
+  return_code, yarn_output = shell.checked_call(command, user=params.yarn_user)
+  
+  hostname = params.hostname.lower()
+  hostname_ip = socket.gethostbyname(params.hostname.lower())
+  nodemanager_address = params.nm_address.lower()
+  yarn_output = yarn_output.lower()
+
+  if hostname in yarn_output or nodemanager_address in yarn_output or hostname_ip in yarn_output:
+    Logger.info('NodeManager with ID \'{0}\' has rejoined the cluster.'.format(nodemanager_address))
+    return
+  else:
+    raise Fail('NodeManager with ID \'{0}\' was not found in the list of running NodeManagers. \'{1}\' output was:\n{2}'.format(nodemanager_address, command, yarn_output))

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params.py
new file mode 100755
index 0000000..073e84f
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from ambari_commons import OSCheck
+from resource_management.libraries.functions.default import default
+
+if OSCheck.is_windows_family():
+  from params_windows import *
+else:
+  from params_linux import *
+
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+retryAble = default("/commandParams/command_retry_enabled", False)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params_linux.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params_linux.py
new file mode 100755
index 0000000..4d42861
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params_linux.py
@@ -0,0 +1,469 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import os
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries import functions
+from resource_management.libraries.functions import is_empty
+
+import status_params
+
+# a map of the Ambari role to the component name
+# for use with <stack-root>/current/<component>
+MAPR_SERVER_ROLE_DIRECTORY_MAP = {
+  'HISTORYSERVER' : 'hadoop-mapreduce-historyserver',
+  'MAPREDUCE2_CLIENT' : 'hadoop-mapreduce-client',
+}
+
+YARN_SERVER_ROLE_DIRECTORY_MAP = {
+  'APP_TIMELINE_SERVER' : 'hadoop-yarn-timelineserver',
+  'NODEMANAGER' : 'hadoop-yarn-nodemanager',
+  'RESOURCEMANAGER' : 'hadoop-yarn-resourcemanager',
+  'YARN_CLIENT' : 'hadoop-yarn-client'
+}
+
+# server configurations
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+stack_name = status_params.stack_name
+stack_root = Script.get_stack_root()
+tarball_map = default("/configurations/cluster-env/tarball_map", None)
+
+config_path = os.path.join(stack_root, "current/hadoop-client/conf")
+config_dir = os.path.realpath(config_path)
+
+# This is expected to be of the form #.#.#.#
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted_major = format_stack_version(stack_version_unformatted)
+stack_version_formatted = functions.get_stack_version('hadoop-yarn-resourcemanager')
+
+stack_supports_ru = stack_version_formatted_major and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted_major)
+stack_supports_timeline_state_store = stack_version_formatted_major and check_stack_feature(StackFeature.TIMELINE_STATE_STORE, stack_version_formatted_major)
+
+# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade.
+# It cannot be used during the initial Cluser Install because the version is not yet known.
+version = default("/commandParams/version", None)
+
+# get the correct version to use for checking stack features
+version_for_stack_feature_checks = get_stack_feature_version(config)
+
+stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
+stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
+
+hostname = config['hostname']
+
+# hadoop default parameters
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
+hadoop_bin = stack_select.get_hadoop_dir("sbin")
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hadoop_yarn_home = '/usr/lib/hadoop-yarn'
+hadoop_mapred2_jar_location = "/usr/lib/hadoop-mapreduce"
+mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
+yarn_bin = "/usr/lib/hadoop-yarn/sbin"
+yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
+hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
+
+# hadoop parameters stack supporting rolling_uprade
+if stack_supports_ru:
+  # MapR directory root
+  mapred_role_root = "hadoop-mapreduce-client"
+  command_role = default("/role", "")
+  if command_role in MAPR_SERVER_ROLE_DIRECTORY_MAP:
+    mapred_role_root = MAPR_SERVER_ROLE_DIRECTORY_MAP[command_role]
+
+  # YARN directory root
+  yarn_role_root = "hadoop-yarn-client"
+  if command_role in YARN_SERVER_ROLE_DIRECTORY_MAP:
+    yarn_role_root = YARN_SERVER_ROLE_DIRECTORY_MAP[command_role]
+
+  hadoop_mapred2_jar_location = format("{stack_root}/current/{mapred_role_root}")
+  mapred_bin = format("{stack_root}/current/{mapred_role_root}/sbin")
+
+  hadoop_yarn_home = format("{stack_root}/current/{yarn_role_root}")
+  yarn_bin = format("{stack_root}/current/{yarn_role_root}/sbin")
+  yarn_container_bin = format("{stack_root}/current/{yarn_role_root}/bin")
+
+if stack_supports_timeline_state_store:
+  # Timeline Service property that was added timeline_state_store stack feature
+  ats_leveldb_state_store_dir = config['configurations']['yarn-site']['yarn.timeline-service.leveldb-state-store.path']
+
+# ats 1.5 properties
+entity_groupfs_active_dir = config['configurations']['yarn-site']['yarn.timeline-service.entity-group-fs-store.active-dir']
+entity_groupfs_active_dir_mode = 01777
+entity_groupfs_store_dir = config['configurations']['yarn-site']['yarn.timeline-service.entity-group-fs-store.done-dir']
+entity_groupfs_store_dir_mode = 0700
+
+hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
+
+limits_conf_dir = "/etc/security/limits.d"
+yarn_user_nofile_limit = default("/configurations/yarn-env/yarn_user_nofile_limit", "32768")
+yarn_user_nproc_limit = default("/configurations/yarn-env/yarn_user_nproc_limit", "65536")
+
+mapred_user_nofile_limit = default("/configurations/mapred-env/mapred_user_nofile_limit", "32768")
+mapred_user_nproc_limit = default("/configurations/mapred-env/mapred_user_nproc_limit", "65536")
+
+execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir + os.pathsep + yarn_container_bin
+
+ulimit_cmd = "ulimit -c unlimited;"
+
+mapred_user = status_params.mapred_user
+yarn_user = status_params.yarn_user
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']
+
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
+smoke_hdfs_user_mode = 0770
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+nm_security_marker_dir = "/var/lib/hadoop-yarn"
+nm_security_marker = format('{nm_security_marker_dir}/nm_security_enabled')
+current_nm_security_state = os.path.isfile(nm_security_marker)
+toggle_nm_security = (current_nm_security_state and not security_enabled) or (not current_nm_security_state and security_enabled)
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+
+yarn_executor_container_group = config['configurations']['yarn-site']['yarn.nodemanager.linux-container-executor.group']
+yarn_nodemanager_container_executor_class =  config['configurations']['yarn-site']['yarn.nodemanager.container-executor.class']
+is_linux_container_executor = (yarn_nodemanager_container_executor_class == 'org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor')
+container_executor_mode = 06050 if is_linux_container_executor else 02050
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+yarn_http_policy = config['configurations']['yarn-site']['yarn.http.policy']
+yarn_https_on = (yarn_http_policy.upper() == 'HTTPS_ONLY')
+rm_hosts = config['clusterHostInfo']['rm_host']
+rm_host = rm_hosts[0]
+rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
+rm_https_port = default('/configurations/yarn-site/yarn.resourcemanager.webapp.https.address', ":8090").split(':')[-1]
+# TODO UPGRADE default, update site during upgrade
+rm_nodes_exclude_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
+rm_nodes_exclude_dir = os.path.dirname(rm_nodes_exclude_path)
+
+java64_home = config['hostLevelParams']['java_home']
+hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
+
+yarn_heapsize = config['configurations']['yarn-env']['yarn_heapsize']
+resourcemanager_heapsize = config['configurations']['yarn-env']['resourcemanager_heapsize']
+nodemanager_heapsize = config['configurations']['yarn-env']['nodemanager_heapsize']
+apptimelineserver_heapsize = default("/configurations/yarn-env/apptimelineserver_heapsize", 1024)
+ats_leveldb_dir = config['configurations']['yarn-site']['yarn.timeline-service.leveldb-timeline-store.path']
+ats_leveldb_lock_file = os.path.join(ats_leveldb_dir, "leveldb-timeline-store.ldb", "LOCK")
+yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix']
+yarn_pid_dir_prefix = status_params.yarn_pid_dir_prefix
+mapred_pid_dir_prefix = status_params.mapred_pid_dir_prefix
+mapred_log_dir_prefix = config['configurations']['mapred-env']['mapred_log_dir_prefix']
+mapred_env_sh_template = config['configurations']['mapred-env']['content']
+yarn_env_sh_template = config['configurations']['yarn-env']['content']
+yarn_nodemanager_recovery_dir = default('/configurations/yarn-site/yarn.nodemanager.recovery.dir', None)
+service_check_queue_name = default('/configurations/yarn-env/service_check.queue.name', 'default')
+
+if len(rm_hosts) > 1:
+  additional_rm_host = rm_hosts[1]
+  rm_webui_address = format("{rm_host}:{rm_port},{additional_rm_host}:{rm_port}")
+  rm_webui_https_address = format("{rm_host}:{rm_https_port},{additional_rm_host}:{rm_https_port}")
+else:
+  rm_webui_address = format("{rm_host}:{rm_port}")
+  rm_webui_https_address = format("{rm_host}:{rm_https_port}")
+
+nm_webui_address = config['configurations']['yarn-site']['yarn.nodemanager.webapp.address']
+hs_webui_address = config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address']
+nm_address = config['configurations']['yarn-site']['yarn.nodemanager.address']  # still contains 0.0.0.0
+if hostname and nm_address and nm_address.startswith("0.0.0.0:"):
+  nm_address = nm_address.replace("0.0.0.0", hostname)
+
+# Initialize lists of work directories.
+nm_local_dirs = default("/configurations/yarn-site/yarn.nodemanager.local-dirs", "")
+nm_log_dirs = default("/configurations/yarn-site/yarn.nodemanager.log-dirs", "")
+
+nm_local_dirs_list = nm_local_dirs.split(',')
+nm_log_dirs_list = nm_log_dirs.split(',')
+
+nm_log_dir_to_mount_file = "/var/lib/ambari-agent/data/yarn/yarn_log_dir_mount.hist"
+nm_local_dir_to_mount_file = "/var/lib/ambari-agent/data/yarn/yarn_local_dir_mount.hist"
+
+distrAppJarName = "hadoop-yarn-applications-distributedshell-2.*.jar"
+hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
+
+entity_file_history_directory = "/tmp/entity-file-history/active"
+
+yarn_pid_dir = status_params.yarn_pid_dir
+mapred_pid_dir = status_params.mapred_pid_dir
+
+mapred_log_dir = format("{mapred_log_dir_prefix}/{mapred_user}")
+yarn_log_dir = format("{yarn_log_dir_prefix}/{yarn_user}")
+mapred_job_summary_log = format("{mapred_log_dir_prefix}/{mapred_user}/hadoop-mapreduce.jobsummary.log")
+yarn_job_summary_log = format("{yarn_log_dir_prefix}/{yarn_user}/hadoop-mapreduce.jobsummary.log")
+
+user_group = config['configurations']['cluster-env']['user_group']
+
+#exclude file
+exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
+exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
+
+ats_host = set(default("/clusterHostInfo/app_timeline_server_hosts", []))
+has_ats = not len(ats_host) == 0
+
+nm_hosts = default("/clusterHostInfo/nm_hosts", [])
+
+# don't using len(nm_hosts) here, because check can take too much time on large clusters
+number_of_nm = 1
+
+# default kinit commands
+rm_kinit_cmd = ""
+yarn_timelineservice_kinit_cmd = ""
+nodemanager_kinit_cmd = ""
+
+if security_enabled:
+  rm_principal_name = config['configurations']['yarn-site']['yarn.resourcemanager.principal']
+  rm_principal_name = rm_principal_name.replace('_HOST',hostname.lower())
+  rm_keytab = config['configurations']['yarn-site']['yarn.resourcemanager.keytab']
+  rm_kinit_cmd = format("{kinit_path_local} -kt {rm_keytab} {rm_principal_name};")
+
+  # YARN timeline security options
+  if has_ats:
+    _yarn_timelineservice_principal_name = config['configurations']['yarn-site']['yarn.timeline-service.principal']
+    _yarn_timelineservice_principal_name = _yarn_timelineservice_principal_name.replace('_HOST', hostname.lower())
+    _yarn_timelineservice_keytab = config['configurations']['yarn-site']['yarn.timeline-service.keytab']
+    yarn_timelineservice_kinit_cmd = format("{kinit_path_local} -kt {_yarn_timelineservice_keytab} {_yarn_timelineservice_principal_name};")
+
+  if 'yarn.nodemanager.principal' in config['configurations']['yarn-site']:
+    _nodemanager_principal_name = default('/configurations/yarn-site/yarn.nodemanager.principal', None)
+    if _nodemanager_principal_name:
+      _nodemanager_principal_name = _nodemanager_principal_name.replace('_HOST', hostname.lower())
+
+    _nodemanager_keytab = config['configurations']['yarn-site']['yarn.nodemanager.keytab']
+    nodemanager_kinit_cmd = format("{kinit_path_local} -kt {_nodemanager_keytab} {_nodemanager_principal_name};")
+
+
+yarn_log_aggregation_enabled = config['configurations']['yarn-site']['yarn.log-aggregation-enable']
+yarn_nm_app_log_dir =  config['configurations']['yarn-site']['yarn.nodemanager.remote-app-log-dir']
+mapreduce_jobhistory_intermediate_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.intermediate-done-dir']
+mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.done-dir']
+jobhistory_heapsize = default("/configurations/mapred-env/jobhistory_heapsize", "900")
+jhs_leveldb_state_store_dir = default('/configurations/mapred-site/mapreduce.jobhistory.recovery.store.leveldb.path', "/hadoop/mapreduce/jhs")
+
+# Tez-related properties
+tez_user = config['configurations']['tez-env']['tez_user']
+
+# Tez jars
+tez_local_api_jars = '/usr/lib/tez/tez*.jar'
+tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'
+app_dir_files = {tez_local_api_jars:None}
+
+# Tez libraries
+tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None)
+
+#for create_hdfs_directory
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+is_webhdfs_enabled = hdfs_site['dfs.webhdfs.enabled']
+
+# Path to file that contains list of HDFS resources to be skipped during processing
+hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore"
+
+dfs_type = default("/commandParams/dfs_type", "")
+
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file = hdfs_resource_ignore_file,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
+ )
+update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
+
+mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
+
+#taskcontroller.cfg
+
+mapred_local_dir = "/tmp/hadoop-mapred/mapred/local"
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+min_user_id = config['configurations']['yarn-env']['min_user_id']
+
+# Node labels
+node_labels_dir = default("/configurations/yarn-site/yarn.node-labels.fs-store.root-dir", None)
+node_label_enable = config['configurations']['yarn-site']['yarn.node-labels.enabled']
+
+cgroups_dir = "/cgroups_test/cpu"
+
+# ***********************  RANGER PLUGIN CHANGES ***********************
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+# hostname of the active HDFS HA Namenode (only used when HA is enabled)
+dfs_ha_namenode_active = default("/configurations/hadoop-env/dfs_ha_initial_namenode_active", None)
+if dfs_ha_namenode_active is not None: 
+  namenode_hostname = dfs_ha_namenode_active
+else:
+  namenode_hostname = config['clusterHostInfo']['namenode_host'][0]
+
+ranger_admin_log_dir = default("/configurations/ranger-env/ranger_admin_log_dir","/var/log/ranger/admin")
+
+scheme = 'http' if not yarn_https_on else 'https'
+yarn_rm_address = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'] if not yarn_https_on else config['configurations']['yarn-site']['yarn.resourcemanager.webapp.https.address']
+rm_active_port = rm_https_port if yarn_https_on else rm_port
+
+rm_ha_enabled = False
+rm_ha_ids_list = []
+rm_webapp_addresses_list = [yarn_rm_address]
+rm_ha_ids = default("/configurations/yarn-site/yarn.resourcemanager.ha.rm-ids", None)
+
+if rm_ha_ids:
+  rm_ha_ids_list = rm_ha_ids.split(",")
+  if len(rm_ha_ids_list) > 1:
+    rm_ha_enabled = True
+
+if rm_ha_enabled:
+  rm_webapp_addresses_list = []
+  for rm_id in rm_ha_ids_list:
+    rm_webapp_address_property = format('yarn.resourcemanager.webapp.address.{rm_id}') if not yarn_https_on else format('yarn.resourcemanager.webapp.https.address.{rm_id}')
+    rm_webapp_address = config['configurations']['yarn-site'][rm_webapp_address_property]
+    rm_webapp_addresses_list.append(rm_webapp_address)
+
+#ranger yarn properties
+if has_ranger_admin:
+  is_supported_yarn_ranger = config['configurations']['yarn-env']['is_supported_yarn_ranger']
+
+  if is_supported_yarn_ranger:
+    enable_ranger_yarn = (config['configurations']['ranger-yarn-plugin-properties']['ranger-yarn-plugin-enabled'].lower() == 'yes')
+    policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
+    if 'admin-properties' in config['configurations'] and 'policymgr_external_url' in config['configurations']['admin-properties'] and policymgr_mgr_url.endswith('/'):
+      policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
+    xa_audit_db_flavor = (config['configurations']['admin-properties']['DB_FLAVOR']).lower()
+    xa_audit_db_name = default('/configurations/admin-properties/audit_db_name', 'ranger_audits')
+    xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
+    xa_audit_db_password = ''
+    if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db:
+      xa_audit_db_password = unicode(config['configurations']['admin-properties']['audit_db_password'])
+    xa_db_host = config['configurations']['admin-properties']['db_host']
+    repo_name = str(config['clusterName']) + '_yarn'
+
+    ranger_env = config['configurations']['ranger-env']
+    ranger_plugin_properties = config['configurations']['ranger-yarn-plugin-properties']
+    policy_user = config['configurations']['ranger-yarn-plugin-properties']['policy_user']
+    yarn_rest_url = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address']  
+
+    ranger_plugin_config = {
+      'username' : config['configurations']['ranger-yarn-plugin-properties']['REPOSITORY_CONFIG_USERNAME'],
+      'password' : unicode(config['configurations']['ranger-yarn-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']),
+      'yarn.url' : format('{scheme}://{yarn_rest_url}'),
+      'commonNameForCertificate' : config['configurations']['ranger-yarn-plugin-properties']['common.name.for.certificate']
+    }
+
+    yarn_ranger_plugin_repo = {
+      'isEnabled': 'true',
+      'configs': ranger_plugin_config,
+      'description': 'yarn repo',
+      'name': repo_name,
+      'repositoryType': 'yarn',
+      'type': 'yarn',
+      'assetType': '1'
+    }
+
+    if stack_supports_ranger_kerberos:
+      ranger_plugin_config['ambari.service.check.user'] = policy_user
+      ranger_plugin_config['hadoop.security.authentication'] = 'kerberos' if security_enabled else 'simple'
+
+    if stack_supports_ranger_kerberos and security_enabled:
+      ranger_plugin_config['policy.download.auth.users'] = yarn_user
+      ranger_plugin_config['tag.download.auth.users'] = yarn_user
+
+    #For curl command in ranger plugin to get db connector
+    jdk_location = config['hostLevelParams']['jdk_location']
+    java_share_dir = '/usr/share/java'
+    previous_jdbc_jar_name = None
+    if stack_supports_ranger_audit_db:
+      if xa_audit_db_flavor and xa_audit_db_flavor == 'mysql':
+        jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
+        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
+        audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
+        jdbc_driver = "com.mysql.jdbc.Driver"
+      elif xa_audit_db_flavor and xa_audit_db_flavor == 'oracle':
+        jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
+        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
+        colon_count = xa_db_host.count(':')
+        if colon_count == 2 or colon_count == 0:
+          audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
+        else:
+          audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
+        jdbc_driver = "oracle.jdbc.OracleDriver"
+      elif xa_audit_db_flavor and xa_audit_db_flavor == 'postgres':
+        jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
+        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
+        audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
+        jdbc_driver = "org.postgresql.Driver"
+      elif xa_audit_db_flavor and xa_audit_db_flavor == 'mssql':
+        jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
+        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
+        audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
+        jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
+      elif xa_audit_db_flavor and xa_audit_db_flavor == 'sqla':
+        jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
+        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
+        audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
+        jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
+
+    downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    driver_curl_target = format("{hadoop_yarn_home}/lib/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    previous_jdbc_jar = format("{hadoop_yarn_home}/lib/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+
+    xa_audit_db_is_enabled = False
+    ranger_audit_solr_urls = config['configurations']['ranger-admin-site']['ranger.audit.solr.urls']
+    if xml_configurations_supported and stack_supports_ranger_audit_db:
+      xa_audit_db_is_enabled = config['configurations']['ranger-yarn-audit']['xasecure.audit.destination.db']
+    xa_audit_hdfs_is_enabled = config['configurations']['ranger-yarn-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else None
+    ssl_keystore_password = unicode(config['configurations']['ranger-yarn-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']) if xml_configurations_supported else None
+    ssl_truststore_password = unicode(config['configurations']['ranger-yarn-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
+    credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None
+
+    #For SQLA explicitly disable audit to DB for Ranger
+    if xa_audit_db_flavor == 'sqla':
+      xa_audit_db_is_enabled = False

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params_windows.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params_windows.py
new file mode 100755
index 0000000..0f8ce73
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/params_windows.py
@@ -0,0 +1,59 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+from resource_management.libraries import functions
+import os
+from status_params import *
+
+# server configurations
+config = Script.get_config()
+
+hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
+yarn_user = hadoop_user
+hdfs_user = hadoop_user
+smokeuser = hadoop_user
+config_dir = os.environ["HADOOP_CONF_DIR"]
+hadoop_home = os.environ["HADOOP_HOME"]
+
+yarn_home = os.environ["HADOOP_YARN_HOME"]
+
+hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
+_authentication = config['configurations']['core-site']['hadoop.security.authentication']
+security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
+smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+rm_host = config['clusterHostInfo']['rm_host'][0]
+rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
+rm_https_port = "8090"
+rm_webui_address = format("{rm_host}:{rm_port}")
+rm_webui_https_address = format("{rm_host}:{rm_https_port}")
+
+hs_host = config['clusterHostInfo']['hs_host'][0]
+hs_port = config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address'].split(':')[-1]
+hs_webui_address = format("{hs_host}:{hs_port}")
+
+hadoop_mapred2_jar_location = os.path.join(os.environ["HADOOP_COMMON_HOME"], "share", "hadoop", "mapreduce")
+hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
+
+exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
+exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
+update_exclude_file_only = config['commandParams']['update_exclude_file_only']

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/resourcemanager.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/resourcemanager.py
new file mode 100755
index 0000000..6a7eea7
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/resourcemanager.py
@@ -0,0 +1,289 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+from resource_management.libraries.functions.decorator import retry
+from resource_management.core.resources.system import File, Execute
+from resource_management.core.source import Template
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
+from resource_management.libraries.providers.hdfs_resource import HdfsResourceProvider
+from resource_management import is_empty
+from resource_management import shell
+
+
+from yarn import yarn
+from service import service
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+from setup_ranger_yarn import setup_ranger_yarn
+
+
+class Resourcemanager(Script):
+  def install(self, env):
+    self.install_packages(env)
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    service('resourcemanager', action='stop')
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn(name='resourcemanager')
+
+  def refreshqueues(self, env):
+    pass
+
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class ResourcemanagerWindows(Resourcemanager):
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    service('resourcemanager', action='start')
+
+  def status(self, env):
+    service('resourcemanager', action='status')
+
+  def decommission(self, env):
+    import params
+
+    env.set_params(params)
+    yarn_user = params.yarn_user
+
+    yarn_refresh_cmd = format("cmd /c yarn rmadmin -refreshNodes")
+
+    File(params.exclude_file_path,
+         content=Template("exclude_hosts_list.j2"),
+         owner=yarn_user,
+         mode="f"
+    )
+
+    if params.update_exclude_file_only == False:
+      Execute(yarn_refresh_cmd, user=yarn_user)
+
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class ResourcemanagerDefault(Resourcemanager):
+  def get_component_name(self):
+    return "hadoop-yarn-resourcemanager"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade post-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-yarn-resourcemanager", params.version)
+
+  def start(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    if params.has_ranger_admin and params.is_supported_yarn_ranger:
+      setup_ranger_yarn() #Ranger Yarn Plugin related calls
+
+    # wait for active-dir and done-dir to be created by ATS if needed
+    if params.has_ats:
+      Logger.info("Verifying DFS directories where ATS stores time line data for active and completed applications.")
+      self.wait_for_dfs_directories_created(params.entity_groupfs_store_dir, params.entity_groupfs_active_dir)
+
+    service('resourcemanager', action='start')
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.resourcemanager_pid_file)
+    pass
+
+  def security_status(self, env):
+    import status_params
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
+                           "yarn.acl.enable": "true"}
+      props_empty_check = ["yarn.resourcemanager.principal",
+                           "yarn.resourcemanager.keytab",
+                           "yarn.resourcemanager.webapp.spnego-principal",
+                           "yarn.resourcemanager.webapp.spnego-keytab-file"]
+
+      props_read_check = ["yarn.resourcemanager.keytab",
+                          "yarn.resourcemanager.webapp.spnego-keytab-file"]
+      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
+                                           props_read_check)
+
+      yarn_expectations ={}
+      yarn_expectations.update(yarn_site_props)
+
+      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
+                                                   {'yarn-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, yarn_site_props)
+      if not result_issues: # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ( 'yarn-site' not in security_params
+               or 'yarn.resourcemanager.keytab' not in security_params['yarn-site']
+               or 'yarn.resourcemanager.principal' not in security_params['yarn-site']) \
+            or 'yarn.resourcemanager.webapp.spnego-keytab-file' not in security_params['yarn-site'] \
+            or 'yarn.resourcemanager.webapp.spnego-principal' not in security_params['yarn-site']:
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.yarn_user,
+                                security_params['yarn-site']['yarn.resourcemanager.keytab'],
+                                security_params['yarn-site']['yarn.resourcemanager.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.yarn_user,
+                                security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-keytab-file'],
+                                security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+  def refreshqueues(self, env):
+    import params
+
+    self.configure(env)
+    env.set_params(params)
+
+    service('resourcemanager',
+            action='refreshQueues'
+    )
+
+  def decommission(self, env):
+    import params
+
+    env.set_params(params)
+    rm_kinit_cmd = params.rm_kinit_cmd
+    yarn_user = params.yarn_user
+    conf_dir = params.hadoop_conf_dir
+    user_group = params.user_group
+
+    yarn_refresh_cmd = format("{rm_kinit_cmd} yarn --config {conf_dir} rmadmin -refreshNodes")
+
+    File(params.exclude_file_path,
+         content=Template("exclude_hosts_list.j2"),
+         owner=yarn_user,
+         group=user_group
+    )
+
+    if params.update_exclude_file_only == False:
+      Execute(yarn_refresh_cmd,
+            environment= {'PATH' : params.execute_path },
+            user=yarn_user)
+      pass
+    pass
+
+
+
+
+  def wait_for_dfs_directories_created(self, *dirs):
+    import params
+
+    ignored_dfs_dirs = HdfsResourceProvider.get_ignored_resources_list(params.hdfs_resource_ignore_file)
+
+    if params.security_enabled:
+      Execute(params.rm_kinit_cmd,
+              user=params.yarn_user
+      )
+      Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
+        user=params.hdfs_user
+      )
+
+    for dir_path in dirs:
+      self.wait_for_dfs_directory_created(dir_path, ignored_dfs_dirs)
+
+
+  @retry(times=8, sleep_time=20, backoff_factor=1, err_class=Fail)
+  def wait_for_dfs_directory_created(self, dir_path, ignored_dfs_dirs):
+    import params
+
+
+    if not is_empty(dir_path):
+      dir_path = HdfsResourceProvider.parse_path(dir_path)
+
+      if dir_path in ignored_dfs_dirs:
+        Logger.info("Skipping DFS directory '" + dir_path + "' as it's marked to be ignored.")
+        return
+
+      Logger.info("Verifying if DFS directory '" + dir_path + "' exists.")
+
+      dir_exists = None
+
+      if WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
+        # check with webhdfs is much faster than executing hdfs dfs -test
+        util = WebHDFSUtil(params.hdfs_site, params.hdfs_user, params.security_enabled)
+        list_status = util.run_command(dir_path, 'GETFILESTATUS', method='GET', ignore_status_codes=['404'], assertable_result=False)
+        dir_exists = ('FileStatus' in list_status)
+      else:
+        # have to do time expensive hdfs dfs -d check.
+        dfs_ret_code = shell.call(format("hdfs --config {hadoop_conf_dir} dfs -test -d " + dir_path), user=params.yarn_user)[0]
+        dir_exists = not dfs_ret_code #dfs -test -d returns 0 in case the dir exists
+
+      if not dir_exists:
+        raise Fail("DFS directory '" + dir_path + "' does not exist !")
+      else:
+        Logger.info("DFS directory '" + dir_path + "' exists.")
+
+  def get_log_folder(self):
+    import params
+    return params.yarn_log_dir
+  
+  def get_user(self):
+    import params
+    return params.yarn_user
+  
+if __name__ == "__main__":
+  Resourcemanager().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/service.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/service.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/service.py
new file mode 100755
index 0000000..b1179b9
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/service.py
@@ -0,0 +1,105 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+from resource_management.core.shell import as_user
+from resource_management.libraries.functions.show_logs import show_logs
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def service(componentName, action='start', serviceName='yarn'):
+  import status_params
+  if status_params.service_map.has_key(componentName):
+    service_name = status_params.service_map[componentName]
+    if action == 'start' or action == 'stop':
+      Service(service_name, action=action)
+    elif action == 'status':
+      check_windows_service_status(service_name)
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def service(componentName, action='start', serviceName='yarn'):
+  import params
+
+  if serviceName == 'mapreduce' and componentName == 'historyserver':
+    delete_pid_file = True
+    daemon = format("{mapred_bin}/mr-jobhistory-daemon.sh")
+    pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-{componentName}.pid")
+    usr = params.mapred_user
+    log_dir = params.mapred_log_dir
+  else:
+    # !!! yarn-daemon.sh deletes the PID for us; if we remove it the script
+    # may not work correctly when stopping the service
+    delete_pid_file = False
+    daemon = format("{yarn_bin}/yarn-daemon.sh")
+    pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-{componentName}.pid")
+    usr = params.yarn_user
+    log_dir = params.yarn_log_dir
+
+  cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {daemon} --config {hadoop_conf_dir}")
+
+  if action == 'start':
+    daemon_cmd = format("{ulimit_cmd} {cmd} start {componentName}")
+    check_process = as_user(format("ls {pid_file} && ps -p `cat {pid_file}`"), user=usr)
+
+    # Remove the pid file if its corresponding process is not running.
+    File(pid_file, action = "delete", not_if = check_process)
+
+    if componentName == 'timelineserver' and serviceName == 'yarn':
+      File(params.ats_leveldb_lock_file,
+         action = "delete",
+         only_if = format("ls {params.ats_leveldb_lock_file}"),
+         not_if = check_process,
+         ignore_failures = True
+      )
+
+    try:
+      # Attempt to start the process. Internally, this is skipped if the process is already running.
+      Execute(daemon_cmd, user = usr, not_if = check_process)
+  
+      # Ensure that the process with the expected PID exists.
+      Execute(check_process,
+              not_if = check_process,
+              tries=5,
+              try_sleep=1,
+      )
+    except:
+      show_logs(log_dir, usr)
+      raise
+
+  elif action == 'stop':
+    daemon_cmd = format("{cmd} stop {componentName}")
+    try:
+      Execute(daemon_cmd, user=usr)
+    except:
+      show_logs(log_dir, usr)
+      raise
+
+    # !!! yarn-daemon doesn't need us to delete PIDs
+    if delete_pid_file is True:
+      File(pid_file, action="delete")
+
+
+  elif action == 'refreshQueues':
+    rm_kinit_cmd = params.rm_kinit_cmd
+    refresh_cmd = format("{rm_kinit_cmd} export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {yarn_container_bin}/yarn rmadmin -refreshQueues")
+    Execute(refresh_cmd, user=usr)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/service_check.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/service_check.py
new file mode 100755
index 0000000..daa8e7e
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/service_check.py
@@ -0,0 +1,159 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+import sys
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+import re
+import subprocess
+from ambari_commons import os_utils
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.get_user_call_output import get_user_call_output
+from resource_management.core.exceptions import Fail
+from resource_management.core.logger import Logger
+
+CURL_CONNECTION_TIMEOUT = '5'
+
+class ServiceCheck(Script):
+  def service_check(self, env):
+    pass
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class ServiceCheckWindows(ServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    yarn_exe = os_utils.quote_path(os.path.join(params.yarn_home, "bin", "yarn.cmd"))
+
+    run_yarn_check_cmd = "cmd /C %s node -list" % yarn_exe
+
+    component_type = 'rm'
+    if params.hadoop_ssl_enabled:
+      component_address = params.rm_webui_https_address
+    else:
+      component_address = params.rm_webui_address
+
+    #temp_dir = os.path.abspath(os.path.join(params.hadoop_home, os.pardir)), "/tmp"
+    temp_dir = os.path.join(os.path.dirname(params.hadoop_home), "temp")
+    validateStatusFileName = "validateYarnComponentStatusWindows.py"
+    validateStatusFilePath = os.path.join(temp_dir, validateStatusFileName)
+    python_executable = sys.executable
+    validateStatusCmd = "%s %s %s -p %s -s %s" % (python_executable, validateStatusFilePath, component_type, component_address, params.hadoop_ssl_enabled)
+
+    if params.security_enabled:
+      kinit_cmd = "%s -kt %s %s;" % (params.kinit_path_local, params.smoke_user_keytab, params.smokeuser)
+      smoke_cmd = kinit_cmd + ' ' + validateStatusCmd
+    else:
+      smoke_cmd = validateStatusCmd
+
+    File(validateStatusFilePath,
+         content=StaticFile(validateStatusFileName)
+    )
+
+    Execute(smoke_cmd,
+            tries=3,
+            try_sleep=5,
+            logoutput=True
+    )
+
+    Execute(run_yarn_check_cmd, logoutput=True)
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class ServiceCheckDefault(ServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    params.HdfsResource(format("/user/{smokeuser}"),
+                        type="directory",
+                        action="create_on_execute",
+                        owner=params.smokeuser,
+                        mode=params.smoke_hdfs_user_mode,
+                        )
+
+    if params.stack_version_formatted_major and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.stack_version_formatted_major):
+      path_to_distributed_shell_jar = format("{stack_root}/current/hadoop-yarn-client/hadoop-yarn-applications-distributedshell.jar")
+    else:
+      path_to_distributed_shell_jar = "/usr/lib/hadoop-yarn/hadoop-yarn-applications-distributedshell*.jar"
+
+    yarn_distrubuted_shell_check_params = ["yarn org.apache.hadoop.yarn.applications.distributedshell.Client",
+                                           "-shell_command", "ls", "-num_containers", "{number_of_nm}",
+                                           "-jar", "{path_to_distributed_shell_jar}", "-timeout", "300000",
+                                           "--queue", "{service_check_queue_name}"]
+    yarn_distrubuted_shell_check_cmd = format(" ".join(yarn_distrubuted_shell_check_params))
+
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
+      smoke_cmd = format("{kinit_cmd} {yarn_distrubuted_shell_check_cmd}")
+    else:
+      smoke_cmd = yarn_distrubuted_shell_check_cmd
+
+    return_code, out = shell.checked_call(smoke_cmd,
+                                          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+                                          user=params.smokeuser,
+                                          )
+
+    m = re.search("appTrackingUrl=(.*),\s", out)
+    app_url = m.group(1)
+
+    splitted_app_url = str(app_url).split('/')
+
+    for item in splitted_app_url:
+      if "application" in item:
+        application_name = item
+
+    for rm_webapp_address in params.rm_webapp_addresses_list:
+      info_app_url = params.scheme + "://" + rm_webapp_address + "/ws/v1/cluster/apps/" + application_name
+
+      get_app_info_cmd = "curl --negotiate -u : -ksL --connect-timeout " + CURL_CONNECTION_TIMEOUT + " " + info_app_url
+
+      return_code, stdout, _ = get_user_call_output(get_app_info_cmd,
+                                            user=params.smokeuser,
+                                            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+                                            )
+      
+      # Handle HDP<2.2.8.1 where RM doesn't do automatic redirection from standby to active
+      if stdout.startswith("This is standby RM. Redirecting to the current active RM:"):
+        Logger.info(format("Skipped checking of {rm_webapp_address} since returned '{stdout}'"))
+        continue
+
+      try:
+        json_response = json.loads(stdout)
+      except Exception as e:
+        raise Fail(format("Response from YARN API was not a valid JSON. Response: {stdout}"))
+      
+      if json_response is None or 'app' not in json_response or \
+              'state' not in json_response['app'] or 'finalStatus' not in json_response['app']:
+        raise Fail("Application " + app_url + " returns invalid data.")
+
+      if json_response['app']['state'] != "FINISHED" or json_response['app']['finalStatus'] != "SUCCEEDED":
+        raise Fail("Application " + app_url + " state/status is not valid. Should be FINISHED/SUCCEEDED.")
+
+
+
+if __name__ == "__main__":
+  ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/setup_ranger_yarn.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/setup_ranger_yarn.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/setup_ranger_yarn.py
new file mode 100755
index 0000000..6ea7f82
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/setup_ranger_yarn.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+    http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+from resource_management.core.logger import Logger
+
+def setup_ranger_yarn():
+  import params
+
+  if params.has_ranger_admin:
+
+    from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
+
+    if params.retryAble:
+      Logger.info("YARN: Setup ranger: command retry enables thus retrying if ranger admin is down !")
+    else:
+      Logger.info("YARN: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
+
+    if params.xml_configurations_supported and params.enable_ranger_yarn and params.xa_audit_hdfs_is_enabled:
+      params.HdfsResource("/ranger/audit",
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hdfs_user,
+                         group=params.hdfs_user,
+                         mode=0755,
+                         recursive_chmod=True
+      )
+      params.HdfsResource("/ranger/audit/yarn",
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.yarn_user,
+                         group=params.yarn_user,
+                         mode=0700,
+                         recursive_chmod=True
+      )
+      params.HdfsResource(None, action="execute")
+
+    setup_ranger_plugin('hadoop-yarn-resourcemanager', 'yarn', params.previous_jdbc_jar,
+                        params.downloaded_custom_connector, params.driver_curl_source,
+                        params.driver_curl_target, params.java64_home,
+                        params.repo_name, params.yarn_ranger_plugin_repo,
+                        params.ranger_env, params.ranger_plugin_properties,
+                        params.policy_user, params.policymgr_mgr_url,
+                        params.enable_ranger_yarn, conf_dict=params.hadoop_conf_dir,
+                        component_user=params.yarn_user, component_group=params.user_group, cache_service_list=['yarn'],
+                        plugin_audit_properties=params.config['configurations']['ranger-yarn-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-yarn-audit'],
+                        plugin_security_properties=params.config['configurations']['ranger-yarn-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-yarn-security'],
+                        plugin_policymgr_ssl_properties=params.config['configurations']['ranger-yarn-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-yarn-policymgr-ssl'],
+                        component_list=['hadoop-yarn-resourcemanager'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
+                        credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password, 
+                        ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
+                        api_version = 'v2', skip_if_rangeradmin_down= not params.retryAble,
+                        is_security_enabled = params.security_enabled,
+                        is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos,
+                        component_user_principal=params.rm_principal_name if params.security_enabled else None,
+                        component_user_keytab=params.rm_keytab if params.security_enabled else None
+      )
+  else:
+    Logger.info('Ranger admin not installed')

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/status_params.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/status_params.py
new file mode 100755
index 0000000..c2e9d92
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/status_params.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.libraries.script.script import Script
+from resource_management.libraries import functions
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.default import default
+from ambari_commons import OSCheck
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+if OSCheck.is_windows_family():
+  resourcemanager_win_service_name = 'resourcemanager'
+  nodemanager_win_service_name = 'nodemanager'
+  historyserver_win_service_name = 'historyserver'
+  timelineserver_win_service_name = 'timelineserver'
+
+  service_map = {
+    'resourcemanager' : resourcemanager_win_service_name,
+    'nodemanager' : nodemanager_win_service_name,
+    'historyserver' : historyserver_win_service_name,
+    'timelineserver' : timelineserver_win_service_name
+  }
+else:
+  mapred_user = config['configurations']['mapred-env']['mapred_user']
+  yarn_user = config['configurations']['yarn-env']['yarn_user']
+  yarn_pid_dir_prefix = config['configurations']['yarn-env']['yarn_pid_dir_prefix']
+  mapred_pid_dir_prefix = config['configurations']['mapred-env']['mapred_pid_dir_prefix']
+  yarn_pid_dir = format("{yarn_pid_dir_prefix}/{yarn_user}")
+  mapred_pid_dir = format("{mapred_pid_dir_prefix}/{mapred_user}")
+
+  resourcemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-resourcemanager.pid")
+  nodemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-nodemanager.pid")
+  yarn_historyserver_pid_file_old = format("{yarn_pid_dir}/yarn-{yarn_user}-historyserver.pid")
+  yarn_historyserver_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-timelineserver.pid")  # *-historyserver.pid is deprecated
+  mapred_historyserver_pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-historyserver.pid")
+
+  hadoop_conf_dir = functions.conf_select.get_hadoop_conf_dir()
+
+  hostname = config['hostname']
+  kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+  security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+stack_name = default("/hostLevelParams/stack_name", None)
\ No newline at end of file


[51/52] bigtop git commit: BIGTOP-1406. package Ambari in Bigtop

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/params.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/params.py
deleted file mode 100755
index fc2c61f..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/params.py
+++ /dev/null
@@ -1,318 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import default
-from resource_management.libraries.functions import format_jvm_option
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.version import format_stack_version, compare_versions
-from ambari_commons.os_check import OSCheck
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import get_kinit_path
-from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
-from resource_management.libraries.resources.hdfs_resource import HdfsResource
-
-config = Script.get_config()
-
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-
-dfs_type = default("/commandParams/dfs_type", "")
-hadoop_conf_dir = "/etc/hadoop/conf"
-
-component_list = default("/localComponents", [])
-
-hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']
-
-# hadoop default params
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-
-hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
-hadoop_lib_home = stack_select.get_hadoop_dir("lib")
-hadoop_bin = stack_select.get_hadoop_dir("sbin")
-hadoop_home = '/usr'
-create_lib_snappy_symlinks = True
-
-# HDP 2.2+ params
-if Script.is_stack_greater_or_equal("2.2"):
-  mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
-  hadoop_home = stack_select.get_hadoop_dir("home")
-  create_lib_snappy_symlinks = False
-  
-current_service = config['serviceName']
-
-#security params
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-#users and groups
-has_hadoop_env = 'hadoop-env' in config['configurations']
-mapred_user = config['configurations']['mapred-env']['mapred_user']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-yarn_user = config['configurations']['yarn-env']['yarn_user']
-
-user_group = config['configurations']['cluster-env']['user_group']
-
-#hosts
-hostname = config["hostname"]
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
-
-has_namenode = not len(namenode_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_slaves = not len(slave_hosts) == 0
-has_oozie_server = not len(oozie_servers) == 0
-has_hcat_server_host = not len(hcat_server_hosts) == 0
-has_hive_server_host = not len(hive_server_host) == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_zk_host = not len(zk_hosts) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_metric_collector = not len(ams_collector_hosts) == 0
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-if has_metric_collector:
-  if 'cluster-env' in config['configurations'] and \
-      'metrics_collector_vip_host' in config['configurations']['cluster-env']:
-    metric_collector_host = config['configurations']['cluster-env']['metrics_collector_vip_host']
-  else:
-    metric_collector_host = ams_collector_hosts[0]
-  if 'cluster-env' in config['configurations'] and \
-      'metrics_collector_vip_port' in config['configurations']['cluster-env']:
-    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
-  else:
-    metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "localhost:6188")
-    if metric_collector_web_address.find(':') != -1:
-      metric_collector_port = metric_collector_web_address.split(':')[1]
-    else:
-      metric_collector_port = '6188'
-  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
-    metric_collector_protocol = 'https'
-  else:
-    metric_collector_protocol = 'http'
-  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
-  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
-  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
-
-  pass
-metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
-metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
-
-#hadoop params
-
-if has_namenode or dfs_type == 'HCFS':
-  hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
-  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
-  task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
-
-hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hbase_tmp_dir = "/tmp/hbase-hbase"
-#db params
-server_db_name = config['hostLevelParams']['db_name']
-db_driver_filename = config['hostLevelParams']['db_driver_filename']
-oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
-mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
-ambari_server_resources = config['hostLevelParams']['jdk_location']
-oracle_driver_symlink_url = format("{ambari_server_resources}oracle-jdbc-driver.jar")
-mysql_driver_symlink_url = format("{ambari_server_resources}mysql-jdbc-driver.jar")
-
-ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url'][0]
-ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0]
-ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0]
-ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]
-
-if has_namenode and 'rca_enabled' in config['configurations']['hadoop-env']:
-  rca_enabled =  config['configurations']['hadoop-env']['rca_enabled']
-else:
-  rca_enabled = False
-rca_disabled_prefix = "###"
-if rca_enabled == True:
-  rca_prefix = ""
-else:
-  rca_prefix = rca_disabled_prefix
-
-#hadoop-env.sh
-java_home = config['hostLevelParams']['java_home']
-
-jsvc_path = "/usr/lib/bigtop-utils"
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
-namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
-
-jtnode_opt_newsize = "200m"
-jtnode_opt_maxnewsize = "200m"
-jtnode_heapsize =  "1024m"
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-
-#log4j.properties
-
-yarn_log_dir_prefix = default("/configurations/yarn-env/yarn_log_dir_prefix","/var/log/hadoop-yarn")
-
-dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
-
-#log4j.properties
-if (('hdfs-log4j' in config['configurations']) and ('content' in config['configurations']['hdfs-log4j'])):
-  log4j_props = config['configurations']['hdfs-log4j']['content']
-  if (('yarn-log4j' in config['configurations']) and ('content' in config['configurations']['yarn-log4j'])):
-    log4j_props += config['configurations']['yarn-log4j']['content']
-else:
-  log4j_props = None
-
-refresh_topology = False
-command_params = config["commandParams"] if "commandParams" in config else None
-if command_params is not None:
-  refresh_topology = bool(command_params["refresh_topology"]) if "refresh_topology" in command_params else False
-  
-ambari_libs_dir = "/var/lib/ambari-agent/lib"
-is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled']
-default_fs = config['configurations']['core-site']['fs.defaultFS']
-
-#host info
-all_hosts = default("/clusterHostInfo/all_hosts", [])
-all_racks = default("/clusterHostInfo/all_racks", [])
-all_ipv4_ips = default("/clusterHostInfo/all_ipv4_ips", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-
-#topology files
-net_topology_script_file_path = "/etc/hadoop/conf/topology_script.py"
-net_topology_script_dir = os.path.dirname(net_topology_script_file_path)
-net_topology_mapping_data_file_name = 'topology_mappings.data'
-net_topology_mapping_data_file_path = os.path.join(net_topology_script_dir, net_topology_mapping_data_file_name)
-
-#Added logic to create /tmp and /user directory for HCFS stack.  
-has_core_site = 'core-site' in config['configurations']
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-kinit_path_local = get_kinit_path()
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
-hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
-hdfs_site = config['configurations']['hdfs-site']
-default_fs = config['configurations']['core-site']['fs.defaultFS']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
-smoke_hdfs_user_dir = format("/user/{smoke_user}")
-smoke_hdfs_user_mode = 0770
-
-
-##### Namenode RPC ports - metrics config section start #####
-
-# Figure out the rpc ports for current namenode
-nn_rpc_client_port = None
-nn_rpc_dn_port = None
-nn_rpc_healthcheck_port = None
-
-namenode_id = None
-namenode_rpc = None
-
-dfs_ha_enabled = False
-dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
-if dfs_ha_nameservices is None:
-  dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None)
-dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
-
-dfs_ha_namemodes_ids_list = []
-other_namenode_id = None
-
-if dfs_ha_namenode_ids:
- dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
- dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
- if dfs_ha_namenode_ids_array_len > 1:
-   dfs_ha_enabled = True
-
-if dfs_ha_enabled:
- for nn_id in dfs_ha_namemodes_ids_list:
-   nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
-   if hostname in nn_host:
-     namenode_id = nn_id
-     namenode_rpc = nn_host
-   pass
- pass
-else:
- namenode_rpc = default('/configurations/hdfs-site/dfs.namenode.rpc-address', None)
-
-if namenode_rpc:
- nn_rpc_client_port = namenode_rpc.split(':')[1].strip()
-
-if dfs_ha_enabled:
- dfs_service_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.servicerpc-address.{dfs_ha_nameservices}.{namenode_id}'), None)
- dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address.{dfs_ha_nameservices}.{namenode_id}'), None)
-else:
- dfs_service_rpc_address = default('/configurations/hdfs-site/dfs.namenode.servicerpc-address', None)
- dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address'), None)
-
-if dfs_service_rpc_address:
- nn_rpc_dn_port = dfs_service_rpc_address.split(':')[1].strip()
-
-if dfs_lifeline_rpc_address:
- nn_rpc_healthcheck_port = dfs_lifeline_rpc_address.split(':')[1].strip()
-
-is_nn_client_port_configured = False if nn_rpc_client_port is None else True
-is_nn_dn_port_configured = False if nn_rpc_dn_port is None else True
-is_nn_healthcheck_port_configured = False if nn_rpc_healthcheck_port is None else True
-
-##### end #####
-
-import functools
-#create partial functions with common arguments for every HdfsResource call
-#to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code
-HdfsResource = functools.partial(
-  HdfsResource,
-  user=hdfs_user,
-  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir,
-  principal_name = hdfs_principal_name,
-  hdfs_site = hdfs_site,
-  default_fs = default_fs,
-  immutable_paths = get_not_managed_resources(),
-  dfs_type = dfs_type
-)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/rack_awareness.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/rack_awareness.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/rack_awareness.py
deleted file mode 100755
index 548f051..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/rack_awareness.py
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management.core.resources import File
-from resource_management.core.source import StaticFile, Template
-from resource_management.libraries.functions import format
-
-
-def create_topology_mapping():
-  import params
-
-  File(params.net_topology_mapping_data_file_path,
-       content=Template("topology_mappings.data.j2"),
-       owner=params.hdfs_user,
-       group=params.user_group,
-       only_if=format("test -d {net_topology_script_dir}"))
-
-def create_topology_script():
-  import params
-
-  File(params.net_topology_script_file_path,
-       content=StaticFile('topology_script.py'),
-       mode=0755,
-       only_if=format("test -d {net_topology_script_dir}"))
-
-def create_topology_script_and_mapping():
-  import params
-  if params.has_hadoop_env:
-    create_topology_mapping()
-    create_topology_script()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/shared_initialization.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/shared_initialization.py
deleted file mode 100755
index ba9c8fb..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/shared_initialization.py
+++ /dev/null
@@ -1,175 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
-
-from resource_management import *
-
-def setup_hadoop():
-  """
-  Setup hadoop files and directories
-  """
-  import params
-
-  Execute(("setenforce","0"),
-          only_if="test -f /selinux/enforce",
-          not_if="(! which getenforce ) || (which getenforce && getenforce | grep -q Disabled)",
-          sudo=True,
-  )
-
-  #directories
-  if params.has_namenode or params.dfs_type == 'HCFS':
-    Directory(params.hdfs_log_dir_prefix,
-              create_parents = True,
-              owner='root',
-              group=params.user_group,
-              mode=0775,
-              cd_access='a',
-    )
-    if params.has_namenode:
-      Directory(params.hadoop_pid_dir_prefix,
-              create_parents = True,
-              owner='root',
-              group='root',
-              cd_access='a',
-      )
-    Directory(params.hadoop_tmp_dir,
-              create_parents = True,
-              owner=params.hdfs_user,
-              cd_access='a',
-              )
-  #files
-    if params.security_enabled:
-      tc_owner = "root"
-    else:
-      tc_owner = params.hdfs_user
-      
-    # if WebHDFS is not enabled we need this jar to create hadoop folders.
-    if params.host_sys_prepped:
-      print "Skipping copying of fast-hdfs-resource.jar as host is sys prepped"
-    elif params.dfs_type == 'HCFS' or not WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
-      # for source-code of jar goto contrib/fast-hdfs-resource
-      File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"),
-           mode=0644,
-           content=StaticFile("fast-hdfs-resource.jar")
-      )
-      
-    if os.path.exists(params.hadoop_conf_dir):
-      File(os.path.join(params.hadoop_conf_dir, 'commons-logging.properties'),
-           owner=tc_owner,
-           content=Template('commons-logging.properties.j2')
-      )
-
-      health_check_template_name = "health_check"
-      File(os.path.join(params.hadoop_conf_dir, health_check_template_name),
-           owner=tc_owner,
-           content=Template(health_check_template_name + ".j2")
-      )
-
-      log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties")
-      if (params.log4j_props != None):
-        File(log4j_filename,
-             mode=0644,
-             group=params.user_group,
-             owner=params.hdfs_user,
-             content=params.log4j_props
-        )
-      elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))):
-        File(log4j_filename,
-             mode=0644,
-             group=params.user_group,
-             owner=params.hdfs_user,
-        )
-
-      File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
-           owner=params.hdfs_user,
-           group=params.user_group,
-           content=Template("hadoop-metrics2.properties.j2")
-      )
-
-    if params.dfs_type == 'HCFS' and params.has_core_site and 'ECS_CLIENT' in params.component_list:
-       create_dirs()
-
-
-def setup_configs():
-  """
-  Creates configs for services HDFS mapred
-  """
-  import params
-
-  if params.has_namenode or params.dfs_type == 'HCFS':
-    if os.path.exists(params.hadoop_conf_dir):
-      File(params.task_log4j_properties_location,
-           content=StaticFile("task-log4j.properties"),
-           mode=0755
-      )
-
-    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
-      File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
-           owner=params.hdfs_user,
-           group=params.user_group
-      )
-    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
-      File(os.path.join(params.hadoop_conf_dir, 'masters'),
-                owner=params.hdfs_user,
-                group=params.user_group
-      )
-
-  generate_include_file()
-
-
-def generate_include_file():
-  import params
-
-  if params.has_namenode and params.dfs_hosts and params.has_slaves:
-    include_hosts_list = params.slave_hosts
-    File(params.dfs_hosts,
-         content=Template("include_hosts_list.j2"),
-         owner=params.hdfs_user,
-         group=params.user_group
-    )
-
-def create_javahome_symlink():
-  if os.path.exists("/usr/jdk/jdk1.6.0_31") and not os.path.exists("/usr/jdk64/jdk1.6.0_31"):
-    Directory("/usr/jdk64/",
-         create_parents = True,
-    )
-    Link("/usr/jdk/jdk1.6.0_31",
-         to="/usr/jdk64/jdk1.6.0_31",
-    )
-
-def create_dirs():
-   import params
-   params.HdfsResource(params.hdfs_tmp_dir,
-                       type="directory",
-                       action="create_on_execute",
-                       owner=params.hdfs_user,
-                       mode=0777
-   )
-   params.HdfsResource(params.smoke_hdfs_user_dir,
-                       type="directory",
-                       action="create_on_execute",
-                       owner=params.smoke_user,
-                       mode=params.smoke_hdfs_user_mode
-   )
-   params.HdfsResource(None,
-                      action="execute"
-   )
-

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/commons-logging.properties.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/commons-logging.properties.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/commons-logging.properties.j2
deleted file mode 100755
index 2197ba5..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/commons-logging.properties.j2
+++ /dev/null
@@ -1,43 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-#Logging Implementation
-
-#Log4J
-org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
-
-#JDK Logger
-#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/exclude_hosts_list.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/exclude_hosts_list.j2
deleted file mode 100755
index 1adba80..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/exclude_hosts_list.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in hdfs_exclude_file %}
-{{host}}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
deleted file mode 100755
index fcd9b23..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
+++ /dev/null
@@ -1,104 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# syntax: [prefix].[source|sink|jmx].[instance].[options]
-# See package.html for org.apache.hadoop.metrics2 for details
-
-{% if has_ganglia_server %}
-*.period=60
-
-*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-*.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-# Hook up to the server
-namenode.sink.ganglia.servers={{ganglia_server_host}}:8661
-datanode.sink.ganglia.servers={{ganglia_server_host}}:8659
-jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662
-tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658
-maptask.sink.ganglia.servers={{ganglia_server_host}}:8660
-reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660
-resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664
-nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657
-historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666
-journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654
-nimbus.sink.ganglia.servers={{ganglia_server_host}}:8649
-supervisor.sink.ganglia.servers={{ganglia_server_host}}:8650
-
-resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
-
-{% endif %}
-
-{% if has_metric_collector %}
-
-*.period={{metrics_collection_period}}
-*.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
-*.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
-*.sink.timeline.period={{metrics_collection_period}}
-*.sink.timeline.sendInterval={{metrics_report_interval}}000
-*.sink.timeline.slave.host.name = {{hostname}}
-
-# HTTPS properties
-*.sink.timeline.truststore.path = {{metric_truststore_path}}
-*.sink.timeline.truststore.type = {{metric_truststore_type}}
-*.sink.timeline.truststore.password = {{metric_truststore_password}}
-
-datanode.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
-namenode.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
-resourcemanager.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
-nodemanager.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
-jobhistoryserver.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
-journalnode.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
-maptask.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
-reducetask.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
-applicationhistoryserver.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
-
-resourcemanager.sink.timeline.tagsForPrefix.yarn=Queue
-
-{% if is_nn_client_port_configured %}
-# Namenode rpc ports customization
-namenode.sink.timeline.metric.rpc.client.port={{nn_rpc_client_port}}
-{% endif %}
-{% if is_nn_dn_port_configured %}
-namenode.sink.timeline.metric.rpc.datanode.port={{nn_rpc_dn_port}}
-{% endif %}
-{% if is_nn_healthcheck_port_configured %}
-namenode.sink.timeline.metric.rpc.healthcheck.port={{nn_rpc_healthcheck_port}}
-{% endif %}
-
-{% endif %}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/health_check.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/health_check.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/health_check.j2
deleted file mode 100755
index 0a03d17..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/health_check.j2
+++ /dev/null
@@ -1,81 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#!/bin/bash
-#
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-err=0;
-
-function check_disks {
-
-  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
-    fsdev=""
-    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
-    if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then
-      msg_="$msg_ $m(u)"
-    else
-      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
-    fi
-  done
-
-  if [ -z "$msg_" ] ; then
-    echo "disks ok" ; exit 0
-  else
-    echo "$msg_" ; exit 2
-  fi
-
-}
-
-# Run all checks
-for check in disks ; do
-  msg=`check_${check}` ;
-  if [ $? -eq 0 ] ; then
-    ok_msg="$ok_msg$msg,"
-  else
-    err_msg="$err_msg$msg,"
-  fi
-done
-
-if [ ! -z "$err_msg" ] ; then
-  echo -n "ERROR $err_msg "
-fi
-if [ ! -z "$ok_msg" ] ; then
-  echo -n "OK: $ok_msg"
-fi
-
-echo
-
-# Success!
-exit 0

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/include_hosts_list.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/include_hosts_list.j2
deleted file mode 100755
index 4a9e713..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/include_hosts_list.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in slave_hosts %}
-{{host}}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/topology_mappings.data.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/topology_mappings.data.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/topology_mappings.data.j2
deleted file mode 100755
index 15034d6..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/topology_mappings.data.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-    #
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-[network_topology]
-{% for host in all_hosts %}
-{% if host in slave_hosts %}
-{{host}}={{all_racks[loop.index-1]}}
-{{all_ipv4_ips[loop.index-1]}}={{all_racks[loop.index-1]}}
-{% endif %}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/kerberos.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/kerberos.json b/bigtop-packages/src/common/ambari/ODPi/1.0/kerberos.json
deleted file mode 100644
index 3aad080..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/kerberos.json
+++ /dev/null
@@ -1,60 +0,0 @@
-{
-  "properties": {
-    "realm": "${kerberos-env/realm}",
-    "keytab_dir": "/etc/security/keytabs",
-    "additional_realms": ""
-  },
-  "identities": [
-    {
-      "name": "spnego",
-      "principal": {
-        "value": "HTTP/_HOST@${realm}",
-        "type" : "service"
-      },
-      "keytab": {
-        "file": "${keytab_dir}/spnego.service.keytab",
-        "owner": {
-          "name": "root",
-          "access": "r"
-        },
-        "group": {
-          "name": "${cluster-env/user_group}",
-          "access": "r"
-        }
-      }
-    },
-    {
-      "name": "smokeuser",
-      "principal": {
-        "value": "${cluster-env/smokeuser}-${cluster_name|toLower()}@${realm}",
-        "type" : "user",
-        "configuration": "cluster-env/smokeuser_principal_name",
-        "local_username" : "${cluster-env/smokeuser}"
-      },
-      "keytab": {
-        "file": "${keytab_dir}/smokeuser.headless.keytab",
-        "owner": {
-          "name": "${cluster-env/smokeuser}",
-          "access": "r"
-        },
-        "group": {
-          "name": "${cluster-env/user_group}",
-          "access": "r"
-        },
-        "configuration": "cluster-env/smokeuser_keytab"
-      }
-    },
-    {
-      "name": "ambari-server",
-      "principal": {
-        "value": "ambari-server-${cluster_name|toLower()}@${realm}",
-        "type" : "user",
-        "configuration": "cluster-env/ambari_principal_name"
-      },
-      "keytab": {
-        "file": "${keytab_dir}/ambari.server.keytab"
-      }
-    }
-  ]
-
-}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/metainfo.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/metainfo.xml
deleted file mode 100755
index ca45822..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/metainfo.xml
+++ /dev/null
@@ -1,22 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <versions>
-	  <active>true</active>
-    </versions>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_features.json b/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_features.json
deleted file mode 100755
index 0c3e305..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_features.json
+++ /dev/null
@@ -1,51 +0,0 @@
-{
-  "stack_features": [
-    {
-      "name": "snappy",
-      "description": "Snappy compressor/decompressor support",
-      "min_version": "2.0.0.0",
-      "max_version": "2.2.0.0"
-    },
-    {
-      "name": "lzo",
-      "description": "LZO libraries support",
-      "min_version": "2.2.1.0"
-    },
-    {
-      "name": "copy_tarball_to_hdfs",
-      "description": "Copy tarball to HDFS support (AMBARI-12113)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "hive_metastore_upgrade_schema",
-      "description": "Hive metastore upgrade schema support (AMBARI-11176)",
-      "min_version": "2.3.0.0"
-     },
-    {
-      "name": "hive_webhcat_specific_configs",
-      "description": "Hive webhcat specific configurations support (AMBARI-12364)",
-      "min_version": "2.3.0.0"
-     },
-    {
-      "name": "hive_purge_table",
-      "description": "Hive purge table support (AMBARI-12260)",
-      "min_version": "2.3.0.0"
-     },
-    {
-      "name": "hive_server2_kerberized_env",
-      "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
-      "min_version": "2.2.3.0",
-      "max_version": "2.2.5.0"
-     },
-    {
-      "name": "hive_env_heapsize",
-      "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "hive_metastore_site_support",
-      "description": "Hive Metastore site support",
-      "min_version": "2.5.0.0"
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_tools.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_tools.json b/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_tools.json
deleted file mode 100755
index c3df235..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_tools.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
-  "stack_selector": ["distro-select", "/usr/bin/distro-select", "distro-select"],
-  "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"]
-}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/repos/repoinfo.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/repos/repoinfo.xml
deleted file mode 100755
index 60eae65..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/repos/repoinfo.xml
+++ /dev/null
@@ -1,33 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<reposinfo>
-  <os family="redhat6">
-    <repo>
-      <baseurl>http://repo.odpi.org/ODPi/trunk/centos-6/</baseurl>
-      <repoid>ODPi-trunk</repoid>
-      <reponame>ODPi</reponame>
-    </repo>
-  </os>
-  <os family="ubuntu14">
-    <repo>
-      <baseurl>http://repo.odpi.org/ODPi/trunk/ubuntu-14.04/apt</baseurl>
-      <repoid>ODPi-trunk</repoid>
-      <reponame>odpi</reponame>
-    </repo>
-  </os>
-</reposinfo>

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/role_command_order.json b/bigtop-packages/src/common/ambari/ODPi/1.0/role_command_order.json
deleted file mode 100755
index 05beb76..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/role_command_order.json
+++ /dev/null
@@ -1,75 +0,0 @@
-{
-  "_comment" : "Record format:",
-  "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
-  "general_deps" : {
-    "_comment" : "dependencies for all cases",
-    "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
-    "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
-    "APP_TIMELINE_SERVER-START": ["NAMENODE-START", "DATANODE-START"],
-    "OOZIE_SERVER-START": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
-    "WEBHCAT_SERVER-START": ["NODEMANAGER-START", "HIVE_SERVER-START"],
-    "WEBHCAT_SERVER-RESTART": ["NODEMANAGER-RESTART", "HIVE_SERVER-RESTART"],
-    "HIVE_METASTORE-START": ["MYSQL_SERVER-START", "NAMENODE-START"],
-    "HIVE_METASTORE-RESTART": ["MYSQL_SERVER-RESTART", "NAMENODE-RESTART"],
-    "HIVE_SERVER-START": ["NODEMANAGER-START", "MYSQL_SERVER-START"],
-    "HIVE_SERVER-RESTART": ["NODEMANAGER-RESTART", "MYSQL_SERVER-RESTART", "ZOOKEEPER_SERVER-RESTART"],
-    "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"],
-    "FLUME_HANDLER-START": ["OOZIE_SERVER-START"],
-    "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
-    "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START", "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK"],
-    "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
-    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START", "WEBHCAT_SERVER-START"],
-    "PIG_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
-    "SQOOP_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
-    "ZOOKEEPER_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
-    "ZOOKEEPER_QUORUM_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
-    "ZOOKEEPER_SERVER-STOP" : ["HBASE_MASTER-STOP", "HBASE_REGIONSERVER-STOP", "METRICS_COLLECTOR-STOP"],
-    "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"]
-  },
-  "_comment" : "GLUSTERFS-specific dependencies",
-  "optional_glusterfs": {
-    "HBASE_MASTER-START": ["PEERSTATUS-START"],
-    "GLUSTERFS_SERVICE_CHECK-SERVICE_CHECK": ["PEERSTATUS-START"]
-  },
-  "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster",
-  "optional_no_glusterfs": {
-    "METRICS_COLLECTOR-START": ["NAMENODE-START", "DATANODE-START", "SECONDARY_NAMENODE-START", "ZOOKEEPER_SERVER-START"],
-    "AMBARI_METRICS_SERVICE_CHECK-SERVICE_CHECK": ["METRICS_COLLECTOR-START", "HDFS_SERVICE_CHECK-SERVICE_CHECK"],
-    "SECONDARY_NAMENODE-START": ["NAMENODE-START"],
-    "SECONDARY_NAMENODE-RESTART": ["NAMENODE-RESTART"],
-    "RESOURCEMANAGER-START": ["NAMENODE-START", "DATANODE-START"],
-    "NODEMANAGER-START": ["NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START"],
-    "HISTORYSERVER-START": ["NAMENODE-START", "DATANODE-START"],
-    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START"],
-    "HIVE_SERVER-START": ["DATANODE-START"],
-    "WEBHCAT_SERVER-START": ["DATANODE-START"],
-    "HISTORYSERVER-RESTART": ["NAMENODE-RESTART"],
-    "RESOURCEMANAGER-RESTART": ["NAMENODE-RESTART"],
-    "NODEMANAGER-RESTART": ["NAMENODE-RESTART"],
-    "OOZIE_SERVER-RESTART": ["NAMENODE-RESTART"],
-    "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
-        "SECONDARY_NAMENODE-START"],
-    "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
-        "RESOURCEMANAGER-START", "HISTORYSERVER-START", "YARN_SERVICE_CHECK-SERVICE_CHECK"],
-    "YARN_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
-    "RESOURCEMANAGER_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START"],
-    "PIG_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"],
-    "NAMENODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP",
-        "HISTORYSERVER-STOP", "HBASE_MASTER-STOP", "METRICS_COLLECTOR-STOP"],
-    "DATANODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP",
-        "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"],
-    "METRICS_GRAFANA-START": ["METRICS_COLLECTOR-START"],
-    "METRICS_COLLECTOR-STOP": ["METRICS_GRAFANA-STOP"]
-  },
-  "_comment" : "Dependencies that are used in HA NameNode cluster",
-  "namenode_optional_ha": {
-    "NAMENODE-START": ["ZKFC-START", "JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
-    "ZKFC-START": ["ZOOKEEPER_SERVER-START"],
-    "ZKFC-STOP": ["NAMENODE-STOP"],
-    "JOURNALNODE-STOP": ["NAMENODE-STOP"]
-  },
-  "_comment" : "Dependencies that are used in ResourceManager HA cluster",
-  "resourcemanager_optional_ha" : {
-    "RESOURCEMANAGER-START": ["ZOOKEEPER_SERVER-START"]
-  }
-}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HDFS/metainfo.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HDFS/metainfo.xml
deleted file mode 100755
index d6e30b7..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HDFS/metainfo.xml
+++ /dev/null
@@ -1,27 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>HDFS</name>
-      <version>2.7.1+odpi</version>
-      <extends>common-services/HDFS/2.1.0.2.0</extends>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/alerts.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/alerts.json b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/alerts.json
deleted file mode 100755
index e2431c3..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/alerts.json
+++ /dev/null
@@ -1,232 +0,0 @@
-{
-  "HIVE": {
-    "service": [],
-    "HIVE_METASTORE": [
-      {
-        "name": "hive_metastore_process",
-        "label": "Hive Metastore Process",
-        "description": "This host-level alert is triggered if the Hive Metastore process cannot be determined to be up and listening on the network.",
-        "interval": 3,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py",
-          "parameters": [
-            {
-              "name": "check.command.timeout",
-              "display_name": "Command Timeout",
-              "value": 60.0,
-              "type": "NUMERIC",
-              "description": "The maximum time before check command will be killed by timeout",
-              "units": "seconds",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "default.smoke.user",
-              "display_name": "Default Smoke User",
-              "value": "ambari-qa",
-              "type": "STRING",
-              "description": "The user that will run the Hive commands if not specified in cluster-env/smokeuser",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "default.smoke.principal",
-              "display_name": "Default Smoke Principal",
-              "value": "ambari-qa@EXAMPLE.COM",
-              "type": "STRING",
-              "description": "The principal to use when retrieving the kerberos ticket if not specified in cluster-env/smokeuser_principal_name",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "default.smoke.keytab",
-              "display_name": "Default Smoke Keytab",
-              "value": "/etc/security/keytabs/smokeuser.headless.keytab",
-              "type": "STRING",
-              "description": "The keytab to use when retrieving the kerberos ticket if not specified in cluster-env/smokeuser_keytab",
-              "visibility": "HIDDEN"
-            }
-          ]
-        }
-      }
-    ],
-    "HIVE_SERVER": [
-      {
-        "name": "hive_server_process",
-        "label": "HiveServer2 Process",
-        "description": "This host-level alert is triggered if the HiveServer cannot be determined to be up and responding to client requests.",
-        "interval": 3,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HIVE/0.12.0.2.0/package/alerts/alert_hive_thrift_port.py",
-          "parameters": [
-            {
-              "name": "check.command.timeout",
-              "display_name": "Command Timeout",
-              "value": 60.0,
-              "type": "NUMERIC",
-              "description": "The maximum time before check command will be killed by timeout",
-              "units": "seconds",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "default.smoke.user",
-              "display_name": "Default Smoke User",
-              "value": "ambari-qa",
-              "type": "STRING",
-              "description": "The user that will run the Hive commands if not specified in cluster-env/smokeuser",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "default.smoke.principal",
-              "display_name": "Default Smoke Principal",
-              "value": "ambari-qa@EXAMPLE.COM",
-              "type": "STRING",
-              "description": "The principal to use when retrieving the kerberos ticket if not specified in cluster-env/smokeuser_principal_name",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "default.smoke.keytab",
-              "display_name": "Default Smoke Keytab",
-              "value": "/etc/security/keytabs/smokeuser.headless.keytab",
-              "type": "STRING",
-              "description": "The keytab to use when retrieving the kerberos ticket if not specified in cluster-env/smokeuser_keytab",
-              "visibility": "HIDDEN"
-            }
-          ]
-        }
-      }
-    ],
-    "HIVE_SERVER_INTERACTIVE": [
-      {
-        "name": "hive_server_interactive_process",
-        "label": "HiveServer2 Interactive Process",
-        "description": "This host-level alert is triggered if the HiveServerInteractive cannot be determined to be up and responding to client requests.",
-        "interval": 3,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HIVE/0.12.0.2.0/package/alerts/alert_hive_interactive_thrift_port.py",
-          "parameters": [
-            {
-              "name": "check.command.timeout",
-              "display_name": "Command Timeout",
-              "value": 60.0,
-              "type": "NUMERIC",
-              "description": "The maximum time before check command will be killed by timeout",
-              "units": "seconds",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "default.smoke.user",
-              "display_name": "Default Smoke User",
-              "value": "ambari-qa",
-              "type": "STRING",
-              "description": "The user that will run the Hive commands if not specified in cluster-env/smokeuser",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "default.smoke.principal",
-              "display_name": "Default Smoke Principal",
-              "value": "ambari-qa@EXAMPLE.COM",
-              "type": "STRING",
-              "description": "The principal to use when retrieving the kerberos ticket if not specified in cluster-env/smokeuser_principal_name",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "default.smoke.keytab",
-              "display_name": "Default Smoke Keytab",
-              "value": "/etc/security/keytabs/smokeuser.headless.keytab",
-              "type": "STRING",
-              "description": "The keytab to use when retrieving the kerberos ticket if not specified in cluster-env/smokeuser_keytab",
-              "visibility": "HIDDEN"
-            }
-          ]
-        }
-      },
-      {
-        "name": "llap_application",
-        "label": "LLAP Application",
-        "description": "This alert is triggered if the LLAP Application cannot be determined to be up and responding to requests.",
-        "interval": 3,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py",
-          "parameters": [
-            {
-              "name": "check.command.timeout",
-              "display_name": "Command Timeout",
-              "value": 120.0,
-              "type": "NUMERIC",
-              "description": "The maximum time before check command will be killed by timeout",
-              "units": "seconds",
-              "threshold": "CRITICAL"
-            },
-            {
-              "name": "default.hive.user",
-              "display_name": "Default HIVE User",
-              "value": "hive",
-              "type": "STRING",
-              "description": "The user that will run the Hive commands if not specified in cluster-env",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "default.hive.principal",
-              "display_name": "Default HIVE Principal",
-              "value": "hive/_HOST@EXAMPLE.COM",
-              "type": "STRING",
-              "description": "The principal to use when retrieving the kerberos ticket if not specified in cluster-env",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "default.hive.keytab",
-              "display_name": "Default HIVE Keytab",
-              "value": "/etc/security/keytabs/hive.llap.zk.sm.keytab",
-              "type": "STRING",
-              "description": "The keytab to use when retrieving the kerberos ticket if not specified in cluster-env.",
-              "visibility": "HIDDEN"
-            }
-          ]
-        }
-      }
-    ],
-    "WEBHCAT_SERVER": [
-      {
-        "name": "hive_webhcat_server_status",
-        "label": "WebHCat Server Status",
-        "description": "This host-level alert is triggered if the templeton server status is not healthy.",
-        "interval": 1,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HIVE/0.12.0.2.0/package/alerts/alert_webhcat_server.py",
-          "parameters": [
-            {
-              "name": "default.smoke.user",
-              "display_name": "Default Smoke User",
-              "value": "ambari-qa",
-              "type": "STRING",
-              "description": "The user that will run the Hive commands if not specified in cluster-env/smokeuser",
-              "visibility": "HIDDEN"
-            },
-            {
-              "name": "connection.timeout",
-              "display_name": "Connection Timeout",
-              "value": 5.0,
-              "type": "NUMERIC",
-              "description": "The maximum time before this alert is considered to be CRITICAL",
-              "units": "seconds",
-              "threshold": "CRITICAL"
-            }
-          ]
-        }
-      }    
-    ]
-  }
-}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hcat-env.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hcat-env.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hcat-env.xml
deleted file mode 100755
index 3908d61..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hcat-env.xml
+++ /dev/null
@@ -1,41 +0,0 @@
-<configuration><property require-input="false">
-    <name>content</name>
-    <value>
-      # Licensed to the Apache Software Foundation (ASF) under one
-      # or more contributor license agreements. See the NOTICE file
-      # distributed with this work for additional information
-      # regarding copyright ownership. The ASF licenses this file
-      # to you under the Apache License, Version 2.0 (the
-      # "License"); you may not use this file except in compliance
-      # with the License. You may obtain a copy of the License at
-      #
-      # http://www.apache.org/licenses/LICENSE-2.0
-      #
-      # Unless required by applicable law or agreed to in writing, software
-      # distributed under the License is distributed on an "AS IS" BASIS,
-      # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-      # See the License for the specific language governing permissions and
-      # limitations under the License.
-
-      JAVA_HOME={{java64_home}}
-      HCAT_PID_DIR={{hcat_pid_dir}}/
-      HCAT_LOG_DIR={{hcat_log_dir}}/
-      HCAT_CONF_DIR={{hcat_conf_dir}}
-      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-      #DBROOT is the path where the connector jars are downloaded
-      DBROOT={{hcat_dbroot}}
-      USER={{hcat_user}}
-      METASTORE_PORT={{hive_metastore_port}}
-    </value>
-    <description>This is the jinja template for hcat-env.sh file</description>
-    <display-name>hcat-env template</display-name>
-    <filename>hcat-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>content</type>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-env.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-env.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-env.xml
deleted file mode 100755
index e5ed319..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-env.xml
+++ /dev/null
@@ -1,540 +0,0 @@
-<configuration><property require-input="false">
-    <name>content</name>
-    <value>
-      export HADOOP_USER_CLASSPATH_FIRST=true  #this prevents old metrics libs from mapreduce lib from bringing in old jar deps overriding HIVE_LIB
-      if [ "$SERVICE" = "cli" ]; then
-      if [ -z "$DEBUG" ]; then
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseNUMA -XX:+UseParallelGC -XX:-UseGCOverheadLimit"
-      else
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
-      fi
-      fi
-
-      # The heap size of the jvm stared by hive shell script can be controlled via:
-
-      if [ "$SERVICE" = "metastore" ]; then
-      export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
-      else
-      export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client
-      fi
-
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
-
-      # Larger heap size may be required when running queries over large number of files or partitions.
-      # By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
-      # appropriate for hive server (hwi etc).
-
-
-      # Set HADOOP_HOME to point to a specific hadoop install directory
-      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-
-      export HIVE_HOME=${HIVE_HOME:-{{hive_home_dir}}}
-
-      # Hive Configuration Directory can be controlled by:
-      export HIVE_CONF_DIR=${HIVE_CONF_DIR:-{{hive_config_dir}}}
-
-      # Folder containing extra libraries required for hive compilation/execution can be controlled by:
-      if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
-      if [ -f "${HIVE_AUX_JARS_PATH}" ]; then
-      export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
-      elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
-      export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
-      fi
-      elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
-      export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
-      fi
-
-      export METASTORE_PORT={{hive_metastore_port}}
-
-      {% if sqla_db_used or lib_dir_available %}
-      export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:{{jdbc_libs_dir}}"
-      export JAVA_LIBRARY_PATH="$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}"
-      {% endif %}
-        </value>
-    <description>This is the jinja template for hive-env.sh file</description>
-    <display-name>hive-env template</display-name>
-    <filename>hive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>content</type>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.heapsize</name>
-    <value>512</value>
-    <description>Hive Java heap size</description>
-    <display-name>HiveServer2 Heap Size</display-name>
-    <filename>hive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>int</type>
-        <maximum>2048</maximum>
-        <minimum>512</minimum>
-        <unit>MB</unit>
-        <overridable>false</overridable>
-        <increment-step>512</increment-step>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive_exec_orc_storage_strategy</name>
-    <value>SPEED</value>
-    <description>
-      Define the encoding strategy to use while writing data. Changing this will only affect the light weight encoding for integers.
-      This flag will not change the compression level of higher level compression codec (like ZLIB). Possible options are SPEED and COMPRESSION.
-    </description>
-    <display-name>ORC Storage Strategy</display-name>
-    <filename>hive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>value-list</type>
-        <entries>
-            <entry>
-                <value>SPEED</value>
-                <label>Speed</label>
-            </entry>
-            <entry>
-                <value>COMPRESSION</value>
-                <label>Compression</label>
-            </entry>
-        </entries>
-        <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by>
-        <dependedByProperties>
-            <name>hive.exec.orc.encoding.strategy</name>
-            <type>hive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.exec.orc.compression.strategy</name>
-            <type>hive-site</type>
-        </dependedByProperties>
-    </property_depended_by>
-</property><property require-input="false">
-    <name>hive_txn_acid</name>
-    <value>off</value>
-    <display-name>ACID Transactions</display-name>
-    <filename>hive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>value-list</type>
-        <entries>
-            <entry>
-                <value>on</value>
-                <label>On</label>
-            </entry>
-            <entry>
-                <value>off</value>
-                <label>Off</label>
-            </entry>
-        </entries>
-        <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by>
-        <dependedByProperties>
-            <name>hive.enforce.bucketing</name>
-            <type>hive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.exec.dynamic.partition.mode</name>
-            <type>hive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.support.concurrency</name>
-            <type>hive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.txn.manager</name>
-            <type>hive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.compactor.initiator.on</name>
-            <type>hive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.compactor.worker.threads</name>
-            <type>hive-site</type>
-        </dependedByProperties>
-    </property_depended_by>
-</property><property require-input="false">
-    <name>hive_security_authorization</name>
-    <value>None</value>
-    <description>
-      Authorization mode, default NONE. Options are NONE, Ranger, SQLStdAuth.
-      SQL standard authorization provides grant/revoke functionality at database, table level. 
-      Ranger provides a centralized authorization interface for Hive and provides more granular
-      access control at column level through the Hive plugin.
-    </description>
-    <display-name>Choose Authorization</display-name>
-    <filename>hive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>value-list</type>
-        <entries>
-            <entry>
-                <value>None</value>
-                <label>None</label>
-            </entry>
-            <entry>
-                <value>SQLStdAuth</value>
-                <label>SQLStdAuth</label>
-            </entry>
-            <entry>
-                <value>Ranger</value>
-                <label>Ranger</label>
-            </entry>
-        </entries>
-    </value-attributes>
-    <depends-on>
-        <property>
-            <name>ranger-hive-plugin-enabled</name>
-            <type>ranger-env</type>
-        </property>
-    </depends-on>
-    <property_depended_by>
-        <dependedByProperties>
-            <name>hive.server2.enable.doAs</name>
-            <type>hive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.security.authenticator.manager</name>
-            <type>hiveserver2-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.server2.enable.doAs</name>
-            <type>hive-interactive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.security.authorization.enabled</name>
-            <type>hive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.conf.restricted.list</name>
-            <type>hiveserver2-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.security.authenticator.manager</name>
-            <type>hive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.security.authorization.manager</name>
-            <type>hiveserver2-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.security.authorization.manager</name>
-            <type>hive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.security.authorization.enabled</name>
-            <type>hiveserver2-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.security.metastore.authorization.manager</name>
-            <type>hive-site</type>
-        </dependedByProperties>
-    </property_depended_by>
-</property><property require-input="false">
-    <name>hive_timeline_logging_enabled</name>
-    <value>true</value>
-    <display-name>Use ATS Logging</display-name>
-    <filename>hive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>value-list</type>
-        <entries>
-            <entry>
-                <value>true</value>
-                <label>True</label>
-            </entry>
-            <entry>
-                <value>false</value>
-                <label>False</label>
-            </entry>
-        </entries>
-        <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by>
-        <dependedByProperties>
-            <name>hive.exec.pre.hooks</name>
-            <type>hive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.exec.post.hooks</name>
-            <type>hive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>hive.exec.failure.hooks</name>
-            <type>hive-site</type>
-        </dependedByProperties>
-    </property_depended_by>
-</property><property require-input="false">
-    <name>hive.client.heapsize</name>
-    <value>512</value>
-    <description>Hive Client Java heap size</description>
-    <display-name>Client Heap Size</display-name>
-    <filename>hive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>int</type>
-        <maximum>2048</maximum>
-        <minimum>512</minimum>
-        <unit>MB</unit>
-        <overridable>false</overridable>
-        <increment-step>512</increment-step>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive.metastore.heapsize</name>
-    <value>1024</value>
-    <description>Hive Metastore Java heap size</description>
-    <display-name>Metastore Heap Size</display-name>
-    <filename>hive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>int</type>
-        <maximum>2048</maximum>
-        <minimum>512</minimum>
-        <unit>MB</unit>
-        <increment-step>512</increment-step>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive_database_type</name>
-    <value>mysql</value>
-    <description>Default HIVE DB type.</description>
-    <display-name>Hive Database Type</display-name>
-    <filename>hive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <overridable>false</overridable>
-    </value-attributes>
-    <depends-on>
-        <property>
-            <name>hive_database</name>
-            <type>hive-env</type>
-        </property>
-    </depends-on>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive_database</name>
-    <value>New MySQL Database</value>
-    <description>
-      Property that determines whether the HIVE DB is managed by Ambari.
-    </description>
-    <display-name>Hive Database</display-name>
-    <filename>hive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <overridable>false</overridable>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by>
-        <dependedByProperties>
-            <name>hive_database_type</name>
-            <type>hive-env</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>javax.jdo.option.ConnectionURL</name>
-            <type>hive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>javax.jdo.option.ConnectionDriverName</name>
-            <type>hive-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>datanucleus.rdbms.datastoreAdapterClassName</name>
-            <type>hive-site</type>
-        </dependedByProperties>
-    </property_depended_by>
-</property><property require-input="false">
-    <name>hive_ambari_database</name>
-    <value>MySQL</value>
-    <description>Database type.</description>
-    <filename>hive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive_database_name</name>
-    <value>hive</value>
-    <description>Database name.</description>
-    <filename>hive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>database</type>
-        <visible>false</visible>
-        <overridable>false</overridable>
-        <editable-only-at-install>true</editable-only-at-install>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive_log_dir</name>
-    <value>/var/log/hive</value>
-    <description>Directory for Hive Log files.</description>
-    <display-name>Hive Log Dir</display-name>
-    <filename>hive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>directory</type>
-        <overridable>false</overridable>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive_pid_dir</name>
-    <value>/var/run/hive</value>
-    <description>Hive PID Dir.</description>
-    <display-name>Hive PID Dir</display-name>
-    <filename>hive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>directory</type>
-        <overridable>false</overridable>
-        <editable-only-at-install>true</editable-only-at-install>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive_user</name>
-    <value>hive</value>
-    <description>Hive User.</description>
-    <display-name>Hive User</display-name>
-    <filename>hive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type>USER</property-type>
-    <value-attributes>
-        <type>user</type>
-        <overridable>false</overridable>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by>
-        <dependedByProperties>
-            <name>ranger.plugins.hive.serviceuser</name>
-            <type>ranger-admin-site</type>
-        </dependedByProperties>
-        <dependedByProperties>
-            <name>ranger.kms.service.user.hive</name>
-            <type>ranger-admin-site</type>
-        </dependedByProperties>
-    </property_depended_by>
-</property><property require-input="false">
-    <name>hcat_log_dir</name>
-    <value>/var/log/webhcat</value>
-    <description>WebHCat Log Dir.</description>
-    <display-name>WebHCat Log Dir</display-name>
-    <filename>hive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>directory</type>
-        <overridable>false</overridable>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hcat_pid_dir</name>
-    <value>/var/run/webhcat</value>
-    <description>WebHCat Pid Dir.</description>
-    <display-name>WebHCat Pid Dir</display-name>
-    <filename>hive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes>
-        <type>directory</type>
-        <overridable>false</overridable>
-        <editable-only-at-install>true</editable-only-at-install>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hcat_user</name>
-    <value>hcat</value>
-    <description>HCat User.</description>
-    <display-name>HCat User</display-name>
-    <filename>hive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type>USER</property-type>
-    <value-attributes>
-        <type>user</type>
-        <overridable>false</overridable>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>webhcat_user</name>
-    <value>hcat</value>
-    <description>WebHCat User.</description>
-    <display-name>WebHCat User</display-name>
-    <filename>hive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type>USER</property-type>
-    <value-attributes>
-        <type>user</type>
-        <overridable>false</overridable>
-    </value-attributes>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive_user_nofile_limit</name>
-    <value>32000</value>
-    <description>Max open files limit setting for HIVE user.</description>
-    <filename>hive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property><property require-input="false">
-    <name>hive_user_nproc_limit</name>
-    <value>16000</value>
-    <description>Max number of processes limit setting for HIVE user.</description>
-    <filename>hive-env.xml</filename>
-    <deleted>false</deleted>
-    <on-ambari-upgrade add="true" delete="false" update="false"/>
-    <property-type></property-type>
-    <value-attributes/>
-    <depends-on/>
-    <property_depended_by/>
-</property></configuration>
\ No newline at end of file


[34/52] bigtop git commit: BIGTOP-1406. package Ambari in Bigtop

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/yarn.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/yarn.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/yarn.py
deleted file mode 100755
index f5acb11..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/yarn.py
+++ /dev/null
@@ -1,499 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-# Python Imports
-import os
-
-# Ambari Common and Resource Management Imports
-from resource_management.libraries.script.script import Script
-from resource_management.core.resources.service import ServiceConfig
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.is_empty import is_empty
-from resource_management.core.resources.system import Directory
-from resource_management.core.resources.system import File
-from resource_management.libraries.resources.xml_config import XmlConfig
-from resource_management.core.source import InlineTemplate
-from resource_management.core.source import Template
-from resource_management.core.logger import Logger
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-from ambari_commons import OSConst
-
-from resource_management.libraries.functions.mounted_dirs_helper import handle_mounted_dirs
-
-# Local Imports
-
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def yarn(name = None):
-  import params
-  XmlConfig("mapred-site.xml",
-            conf_dir=params.config_dir,
-            configurations=params.config['configurations']['mapred-site'],
-            owner=params.yarn_user,
-            mode='f'
-  )
-  XmlConfig("yarn-site.xml",
-            conf_dir=params.config_dir,
-            configurations=params.config['configurations']['yarn-site'],
-            owner=params.yarn_user,
-            mode='f',
-            configuration_attributes=params.config['configuration_attributes']['yarn-site']
-  )
-  XmlConfig("capacity-scheduler.xml",
-            conf_dir=params.config_dir,
-            configurations=params.config['configurations']['capacity-scheduler'],
-            owner=params.yarn_user,
-            mode='f'
-  )
-
-  if params.service_map.has_key(name):
-    service_name = params.service_map[name]
-
-    ServiceConfig(service_name,
-                  action="change_user",
-                  username = params.yarn_user,
-                  password = Script.get_password(params.yarn_user))
-
-def create_log_dir(dir_name):
-  import params
-  Directory(dir_name,
-            create_parents = True,
-            cd_access="a",
-            mode=0775,
-            owner=params.yarn_user,
-            group=params.user_group,
-            ignore_failures=True,
-  )
-  
-def create_local_dir(dir_name):
-  import params
-  Directory(dir_name,
-            create_parents = True,
-            cd_access="a",
-            mode=0755,
-            owner=params.yarn_user,
-            group=params.user_group,
-            ignore_failures=True,
-            recursive_mode_flags = {'f': 'a+rw', 'd': 'a+rwx'},
-  )
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def yarn(name=None, config_dir=None):
-  """
-  :param name: Component name, apptimelineserver, nodemanager, resourcemanager, or None (defaults for client)
-  :param config_dir: Which config directory to write configs to, which could be different during rolling upgrade.
-  """
-  import params
-
-  if config_dir is None:
-    config_dir = params.hadoop_conf_dir
-
-  if name == "historyserver":
-    if params.yarn_log_aggregation_enabled:
-      params.HdfsResource(params.yarn_nm_app_log_dir,
-                           action="create_on_execute",
-                           type="directory",
-                           owner=params.yarn_user,
-                           group=params.user_group,
-                           mode=0777,
-                           recursive_chmod=True
-      )
-
-    # create the /tmp folder with proper permissions if it doesn't exist yet
-    if params.entity_file_history_directory.startswith('/tmp'):
-        params.HdfsResource(params.hdfs_tmp_dir,
-                            action="create_on_execute",
-                            type="directory",
-                            owner=params.hdfs_user,
-                            mode=0777,
-        )
-
-    params.HdfsResource(params.entity_file_history_directory,
-                           action="create_on_execute",
-                           type="directory",
-                           owner=params.yarn_user,
-                           group=params.user_group
-    )
-    params.HdfsResource("/mapred",
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.mapred_user
-    )
-    params.HdfsResource("/mapred/system",
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.hdfs_user
-    )
-    params.HdfsResource(params.mapreduce_jobhistory_done_dir,
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.mapred_user,
-                         group=params.user_group,
-                         change_permissions_for_parents=True,
-                         mode=0777
-    )
-    params.HdfsResource(None, action="execute")
-    Directory(params.jhs_leveldb_state_store_dir,
-              owner=params.mapred_user,
-              group=params.user_group,
-              create_parents = True,
-              cd_access="a",
-              recursive_ownership = True,
-              )
-
-  #<editor-fold desc="Node Manager Section">
-  if name == "nodemanager":
-
-    # First start after enabling/disabling security
-    if params.toggle_nm_security:
-      Directory(params.nm_local_dirs_list + params.nm_log_dirs_list,
-                action='delete'
-      )
-
-      # If yarn.nodemanager.recovery.dir exists, remove this dir
-      if params.yarn_nodemanager_recovery_dir:
-        Directory(InlineTemplate(params.yarn_nodemanager_recovery_dir).get_content(),
-                  action='delete'
-        )
-
-      # Setting NM marker file
-      if params.security_enabled:
-        Directory(params.nm_security_marker_dir)
-        File(params.nm_security_marker,
-             content="Marker file to track first start after enabling/disabling security. "
-                     "During first start yarn local, log dirs are removed and recreated"
-             )
-      elif not params.security_enabled:
-        File(params.nm_security_marker, action="delete")
-
-
-    if not params.security_enabled or params.toggle_nm_security:
-      # handle_mounted_dirs ensures that we don't create dirs which are temporary unavailable (unmounted), and intended to reside on a different mount.
-      nm_log_dir_to_mount_file_content = handle_mounted_dirs(create_log_dir, params.nm_log_dirs, params.nm_log_dir_to_mount_file, params)
-      # create a history file used by handle_mounted_dirs
-      File(params.nm_log_dir_to_mount_file,
-           owner=params.hdfs_user,
-           group=params.user_group,
-           mode=0644,
-           content=nm_log_dir_to_mount_file_content
-      )
-      nm_local_dir_to_mount_file_content = handle_mounted_dirs(create_local_dir, params.nm_local_dirs, params.nm_local_dir_to_mount_file, params)
-      File(params.nm_local_dir_to_mount_file,
-           owner=params.hdfs_user,
-           group=params.user_group,
-           mode=0644,
-           content=nm_local_dir_to_mount_file_content
-      )
-  #</editor-fold>
-
-  if params.yarn_nodemanager_recovery_dir:
-    Directory(InlineTemplate(params.yarn_nodemanager_recovery_dir).get_content(),
-              owner=params.yarn_user,
-              group=params.user_group,
-              create_parents = True,
-              mode=0755,
-              cd_access = 'a',
-    )
-
-  Directory([params.yarn_pid_dir_prefix, params.yarn_pid_dir, params.yarn_log_dir],
-            owner=params.yarn_user,
-            group=params.user_group,
-            create_parents = True,
-            cd_access = 'a',
-  )
-
-  Directory([params.mapred_pid_dir_prefix, params.mapred_pid_dir, params.mapred_log_dir_prefix, params.mapred_log_dir],
-            owner=params.mapred_user,
-            group=params.user_group,
-            create_parents = True,
-            cd_access = 'a',
-  )
-  Directory([params.yarn_log_dir_prefix],
-            owner=params.yarn_user,
-            group=params.user_group,
-            create_parents = True,
-            ignore_failures=True,
-            cd_access = 'a',
-  )
-
-  XmlConfig("core-site.xml",
-            conf_dir=config_dir,
-            configurations=params.config['configurations']['core-site'],
-            configuration_attributes=params.config['configuration_attributes']['core-site'],
-            owner=params.hdfs_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  # During RU, Core Masters and Slaves need hdfs-site.xml
-  # TODO, instead of specifying individual configs, which is susceptible to breaking when new configs are added,
-  # RU should rely on all available in <stack-root>/<version>/hadoop/conf
-  if 'hdfs-site' in params.config['configurations']:
-    XmlConfig("hdfs-site.xml",
-              conf_dir=config_dir,
-              configurations=params.config['configurations']['hdfs-site'],
-              configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
-              owner=params.hdfs_user,
-              group=params.user_group,
-              mode=0644
-    )
-
-  XmlConfig("mapred-site.xml",
-            conf_dir=config_dir,
-            configurations=params.config['configurations']['mapred-site'],
-            configuration_attributes=params.config['configuration_attributes']['mapred-site'],
-            owner=params.yarn_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  XmlConfig("yarn-site.xml",
-            conf_dir=config_dir,
-            configurations=params.config['configurations']['yarn-site'],
-            configuration_attributes=params.config['configuration_attributes']['yarn-site'],
-            owner=params.yarn_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  XmlConfig("capacity-scheduler.xml",
-            conf_dir=config_dir,
-            configurations=params.config['configurations']['capacity-scheduler'],
-            configuration_attributes=params.config['configuration_attributes']['capacity-scheduler'],
-            owner=params.yarn_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  if name == 'resourcemanager':
-    Directory(params.rm_nodes_exclude_dir,
-         mode=0755,
-         create_parents=True,
-         cd_access='a',
-    )
-    File(params.rm_nodes_exclude_path,
-         owner=params.yarn_user,
-         group=params.user_group
-    )
-    File(params.yarn_job_summary_log,
-       owner=params.yarn_user,
-       group=params.user_group
-    )
-    if not is_empty(params.node_label_enable) and params.node_label_enable or is_empty(params.node_label_enable) and params.node_labels_dir:
-      params.HdfsResource(params.node_labels_dir,
-                           type="directory",
-                           action="create_on_execute",
-                           change_permissions_for_parents=True,
-                           owner=params.yarn_user,
-                           group=params.user_group,
-                           mode=0700
-      )
-      params.HdfsResource(None, action="execute")
-
-
-  elif name == 'apptimelineserver':
-    Directory(params.ats_leveldb_dir,
-       owner=params.yarn_user,
-       group=params.user_group,
-       create_parents = True,
-       cd_access="a",
-    )
-
-    # if stack support application timeline-service state store property (timeline_state_store stack feature)
-    if params.stack_supports_timeline_state_store:
-      Directory(params.ats_leveldb_state_store_dir,
-       owner=params.yarn_user,
-       group=params.user_group,
-       create_parents = True,
-       cd_access="a",
-      )
-    # app timeline server 1.5 directories
-    if not is_empty(params.entity_groupfs_store_dir):
-      parent_path = os.path.dirname(params.entity_groupfs_store_dir)
-      params.HdfsResource(parent_path,
-                          type="directory",
-                          action="create_on_execute",
-                          change_permissions_for_parents=True,
-                          owner=params.yarn_user,
-                          group=params.user_group,
-                          mode=0755
-                          )
-      params.HdfsResource(params.entity_groupfs_store_dir,
-                          type="directory",
-                          action="create_on_execute",
-                          owner=params.yarn_user,
-                          group=params.user_group,
-                          mode=params.entity_groupfs_store_dir_mode
-                          )
-    if not is_empty(params.entity_groupfs_active_dir):
-      parent_path = os.path.dirname(params.entity_groupfs_active_dir)
-      params.HdfsResource(parent_path,
-                          type="directory",
-                          action="create_on_execute",
-                          change_permissions_for_parents=True,
-                          owner=params.yarn_user,
-                          group=params.user_group,
-                          mode=0755
-                          )
-      params.HdfsResource(params.entity_groupfs_active_dir,
-                          type="directory",
-                          action="create_on_execute",
-                          owner=params.yarn_user,
-                          group=params.user_group,
-                          mode=params.entity_groupfs_active_dir_mode
-                          )
-    params.HdfsResource(None, action="execute")
-
-  File(format("{limits_conf_dir}/yarn.conf"),
-       mode=0644,
-       content=Template('yarn.conf.j2')
-  )
-
-  File(format("{limits_conf_dir}/mapreduce.conf"),
-       mode=0644,
-       content=Template('mapreduce.conf.j2')
-  )
-
-  File(os.path.join(config_dir, "yarn-env.sh"),
-       owner=params.yarn_user,
-       group=params.user_group,
-       mode=0755,
-       content=InlineTemplate(params.yarn_env_sh_template)
-  )
-
-  container_executor = format("{yarn_container_bin}/container-executor")
-  File(container_executor,
-      group=params.yarn_executor_container_group,
-      mode=params.container_executor_mode
-  )
-
-  File(os.path.join(config_dir, "container-executor.cfg"),
-      group=params.user_group,
-      mode=0644,
-      content=Template('container-executor.cfg.j2')
-  )
-
-  Directory(params.cgroups_dir,
-            group=params.user_group,
-            create_parents = True,
-            mode=0755,
-            cd_access="a")
-
-  if params.security_enabled:
-    tc_mode = 0644
-    tc_owner = "root"
-  else:
-    tc_mode = None
-    tc_owner = params.hdfs_user
-
-  File(os.path.join(config_dir, "mapred-env.sh"),
-       owner=tc_owner,
-       mode=0755,
-       content=InlineTemplate(params.mapred_env_sh_template)
-  )
-
-  if params.security_enabled:
-    File(os.path.join(params.hadoop_bin, "task-controller"),
-         owner="root",
-         group=params.mapred_tt_group,
-         mode=06050
-    )
-    File(os.path.join(config_dir, 'taskcontroller.cfg'),
-         owner = tc_owner,
-         mode = tc_mode,
-         group = params.mapred_tt_group,
-         content=Template("taskcontroller.cfg.j2")
-    )
-  else:
-    File(os.path.join(config_dir, 'taskcontroller.cfg'),
-         owner=tc_owner,
-         content=Template("taskcontroller.cfg.j2")
-    )
-
-  if "mapred-site" in params.config['configurations']:
-    XmlConfig("mapred-site.xml",
-              conf_dir=config_dir,
-              configurations=params.config['configurations']['mapred-site'],
-              configuration_attributes=params.config['configuration_attributes']['mapred-site'],
-              owner=params.mapred_user,
-              group=params.user_group
-    )
-
-  if "capacity-scheduler" in params.config['configurations']:
-    XmlConfig("capacity-scheduler.xml",
-              conf_dir=config_dir,
-              configurations=params.config['configurations'][
-                'capacity-scheduler'],
-              configuration_attributes=params.config['configuration_attributes']['capacity-scheduler'],
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-  if "ssl-client" in params.config['configurations']:
-    XmlConfig("ssl-client.xml",
-              conf_dir=config_dir,
-              configurations=params.config['configurations']['ssl-client'],
-              configuration_attributes=params.config['configuration_attributes']['ssl-client'],
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-
-    Directory(params.hadoop_conf_secure_dir,
-              create_parents = True,
-              owner='root',
-              group=params.user_group,
-              cd_access='a',
-              )
-
-    XmlConfig("ssl-client.xml",
-              conf_dir=params.hadoop_conf_secure_dir,
-              configurations=params.config['configurations']['ssl-client'],
-              configuration_attributes=params.config['configuration_attributes']['ssl-client'],
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-
-  if "ssl-server" in params.config['configurations']:
-    XmlConfig("ssl-server.xml",
-              conf_dir=config_dir,
-              configurations=params.config['configurations']['ssl-server'],
-              configuration_attributes=params.config['configuration_attributes']['ssl-server'],
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-  if os.path.exists(os.path.join(config_dir, 'fair-scheduler.xml')):
-    File(os.path.join(config_dir, 'fair-scheduler.xml'),
-         owner=params.mapred_user,
-         group=params.user_group
-    )
-
-  if os.path.exists(
-    os.path.join(config_dir, 'ssl-client.xml.example')):
-    File(os.path.join(config_dir, 'ssl-client.xml.example'),
-         owner=params.mapred_user,
-         group=params.user_group
-    )
-
-  if os.path.exists(
-    os.path.join(config_dir, 'ssl-server.xml.example')):
-    File(os.path.join(config_dir, 'ssl-server.xml.example'),
-         owner=params.mapred_user,
-         group=params.user_group
-    )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/yarn_client.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/yarn_client.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/yarn_client.py
deleted file mode 100755
index 4d65a40..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/yarn_client.py
+++ /dev/null
@@ -1,67 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from yarn import yarn
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-
-
-class YarnClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    yarn()
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class YarnClientWindows(YarnClient):
-  pass
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class YarnClientDefault(YarnClient):
-  def get_component_name(self):
-    return "hadoop-client"
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-client", params.version)
-
-
-if __name__ == "__main__":
-  YarnClient().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/container-executor.cfg.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/container-executor.cfg.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/container-executor.cfg.j2
deleted file mode 100755
index c6f1ff6..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/container-executor.cfg.j2
+++ /dev/null
@@ -1,40 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-yarn.nodemanager.local-dirs={{nm_local_dirs}}
-yarn.nodemanager.log-dirs={{nm_log_dirs}}
-yarn.nodemanager.linux-container-executor.group={{yarn_executor_container_group}}
-banned.users=hdfs,yarn,mapred,bin
-min.user.id={{min_user_id}}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/exclude_hosts_list.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/exclude_hosts_list.j2
deleted file mode 100755
index c7ce416..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/exclude_hosts_list.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in exclude_hosts %}
-{{host}}
-{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/mapreduce.conf.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/mapreduce.conf.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/mapreduce.conf.j2
deleted file mode 100755
index ae8e6d5..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/mapreduce.conf.j2
+++ /dev/null
@@ -1,35 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-{{mapred_user}}   - nofile {{mapred_user_nofile_limit}}
-{{mapred_user}}   - nproc  {{mapred_user_nproc_limit}}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/taskcontroller.cfg.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/taskcontroller.cfg.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/taskcontroller.cfg.j2
deleted file mode 100755
index 3d5f4f2..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/taskcontroller.cfg.j2
+++ /dev/null
@@ -1,38 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-mapred.local.dir={{mapred_local_dir}}
-mapreduce.tasktracker.group={{mapred_tt_group}}
-hadoop.log.dir={{hdfs_log_dir_prefix}}/{{mapred_user}}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/yarn.conf.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/yarn.conf.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/yarn.conf.j2
deleted file mode 100755
index 1063099..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/templates/yarn.conf.j2
+++ /dev/null
@@ -1,35 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-{{yarn_user}}   - nofile {{yarn_user_nofile_limit}}
-{{yarn_user}}   - nproc  {{yarn_user_nproc_limit}}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/ZOOKEEPER/metainfo.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/ZOOKEEPER/metainfo.xml
deleted file mode 100755
index 0a89dc2..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/ZOOKEEPER/metainfo.xml
+++ /dev/null
@@ -1,27 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>ZOOKEEPER</name>
-      <version>3.4.6+odpi</version>
-      <extends>common-services/ZOOKEEPER/3.4.5</extends>
-    </service>
-  </services>
-</metainfo>


[14/52] bigtop git commit: ODPI-193. Add Hive 1.2 to ODPi ambari reference implementation

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive.py
new file mode 100755
index 0000000..4f53ea9
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive.py
@@ -0,0 +1,481 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import glob
+from urlparse import urlparse
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.core.resources.service import ServiceConfig
+from resource_management.core.resources.system import File, Execute, Directory
+from resource_management.core.source import StaticFile, Template, DownloadSource, InlineTemplate
+from resource_management.core.shell import as_user
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.functions.format import format
+from resource_management.core.exceptions import Fail
+from resource_management.core.shell import as_sudo
+from resource_management.core.shell import quote_bash_args
+from resource_management.core.logger import Logger
+from resource_management.core import utils
+from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster, setup_atlas_hook
+from ambari_commons.constants import SERVICE
+
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def hive(name=None):
+  import params
+
+  XmlConfig("hive-site.xml",
+            conf_dir = params.hive_conf_dir,
+            configurations = params.config['configurations']['hive-site'],
+            owner=params.hive_user,
+            configuration_attributes=params.config['configuration_attributes']['hive-site']
+  )
+
+  if name in ["hiveserver2","metastore"]:
+    # Manually overriding service logon user & password set by the installation package
+    service_name = params.service_map[name]
+    ServiceConfig(service_name,
+                  action="change_user",
+                  username = params.hive_user,
+                  password = Script.get_password(params.hive_user))
+    Execute(format("cmd /c hadoop fs -mkdir -p {hive_warehouse_dir}"), logoutput=True, user=params.hadoop_user)
+
+  if name == 'metastore':
+    if params.init_metastore_schema:
+      check_schema_created_cmd = format('cmd /c "{hive_bin}\\hive.cmd --service schematool -info '
+                                        '-dbType {hive_metastore_db_type} '
+                                        '-userName {hive_metastore_user_name} '
+                                        '-passWord {hive_metastore_user_passwd!p}'
+                                        '&set EXITCODE=%ERRORLEVEL%&exit /B %EXITCODE%"', #cmd "feature", propagate the process exit code manually
+                                        hive_bin=params.hive_bin,
+                                        hive_metastore_db_type=params.hive_metastore_db_type,
+                                        hive_metastore_user_name=params.hive_metastore_user_name,
+                                        hive_metastore_user_passwd=params.hive_metastore_user_passwd)
+      try:
+        Execute(check_schema_created_cmd)
+      except Fail:
+        create_schema_cmd = format('cmd /c {hive_bin}\\hive.cmd --service schematool -initSchema '
+                                   '-dbType {hive_metastore_db_type} '
+                                   '-userName {hive_metastore_user_name} '
+                                   '-passWord {hive_metastore_user_passwd!p}',
+                                   hive_bin=params.hive_bin,
+                                   hive_metastore_db_type=params.hive_metastore_db_type,
+                                   hive_metastore_user_name=params.hive_metastore_user_name,
+                                   hive_metastore_user_passwd=params.hive_metastore_user_passwd)
+        Execute(create_schema_cmd,
+                user = params.hive_user,
+                logoutput=True
+        )
+
+  if name == "hiveserver2":
+    if params.hive_execution_engine == "tez":
+      # Init the tez app dir in hadoop
+      script_file = __file__.replace('/', os.sep)
+      cmd_file = os.path.normpath(os.path.join(os.path.dirname(script_file), "..", "files", "hiveTezSetup.cmd"))
+
+      Execute("cmd /c " + cmd_file, logoutput=True, user=params.hadoop_user)
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def hive(name=None):
+  import params
+
+  if name == 'hiveserver2':
+    # copy tarball to HDFS feature not supported
+    if not (params.stack_version_formatted_major and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS, params.stack_version_formatted_major)):  
+      params.HdfsResource(params.webhcat_apps_dir,
+                            type="directory",
+                            action="create_on_execute",
+                            owner=params.webhcat_user,
+                            mode=0755
+                          )
+    
+    # Create webhcat dirs.
+    if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
+      params.HdfsResource(params.hcat_hdfs_user_dir,
+                           type="directory",
+                           action="create_on_execute",
+                           owner=params.hcat_user,
+                           mode=params.hcat_hdfs_user_mode
+      )
+
+    params.HdfsResource(params.webhcat_hdfs_user_dir,
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.webhcat_user,
+                         mode=params.webhcat_hdfs_user_mode
+    )
+
+    # ****** Begin Copy Tarballs ******
+    # *********************************
+    #  if copy tarball to HDFS feature  supported copy mapreduce.tar.gz and tez.tar.gz to HDFS
+    if params.stack_version_formatted_major and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS, params.stack_version_formatted_major):
+      copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
+      copy_to_hdfs("tez", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
+
+    # Always copy pig.tar.gz and hive.tar.gz using the appropriate mode.
+    # This can use a different source and dest location to account
+    copy_to_hdfs("pig",
+                 params.user_group,
+                 params.hdfs_user,
+                 file_mode=params.tarballs_mode,
+                 custom_source_file=params.pig_tar_source,
+                 custom_dest_file=params.pig_tar_dest_file,
+                 host_sys_prepped=params.host_sys_prepped)
+    copy_to_hdfs("hive",
+                 params.user_group,
+                 params.hdfs_user,
+                 file_mode=params.tarballs_mode,
+                 custom_source_file=params.hive_tar_source,
+                 custom_dest_file=params.hive_tar_dest_file,
+                 host_sys_prepped=params.host_sys_prepped)
+
+    wildcard_tarballs = ["sqoop", "hadoop_streaming"]
+    for tarball_name in wildcard_tarballs:
+      source_file_pattern = eval("params." + tarball_name + "_tar_source")
+      dest_dir = eval("params." + tarball_name + "_tar_dest_dir")
+
+      if source_file_pattern is None or dest_dir is None:
+        continue
+
+      source_files = glob.glob(source_file_pattern) if "*" in source_file_pattern else [source_file_pattern]
+      for source_file in source_files:
+        src_filename = os.path.basename(source_file)
+        dest_file = os.path.join(dest_dir, src_filename)
+
+        copy_to_hdfs(tarball_name,
+                     params.user_group,
+                     params.hdfs_user,
+                     file_mode=params.tarballs_mode,
+                     custom_source_file=source_file,
+                     custom_dest_file=dest_file,
+                     host_sys_prepped=params.host_sys_prepped)
+    # ******* End Copy Tarballs *******
+    # *********************************
+    
+    # if warehouse directory is in DFS
+    if not params.whs_dir_protocol or params.whs_dir_protocol == urlparse(params.default_fs).scheme:
+      # Create Hive Metastore Warehouse Dir
+      params.HdfsResource(params.hive_apps_whs_dir,
+                           type="directory",
+                            action="create_on_execute",
+                            owner=params.hive_user,
+                            mode=0777
+      )
+    else:
+      Logger.info(format("Not creating warehouse directory '{hive_apps_whs_dir}', as the location is not in DFS."))
+
+    # Create Hive User Dir
+    params.HdfsResource(params.hive_hdfs_user_dir,
+                         type="directory",
+                          action="create_on_execute",
+                          owner=params.hive_user,
+                          mode=params.hive_hdfs_user_mode
+    )
+    
+    if not is_empty(params.hive_exec_scratchdir) and not urlparse(params.hive_exec_scratchdir).path.startswith("/tmp"):
+      params.HdfsResource(params.hive_exec_scratchdir,
+                           type="directory",
+                           action="create_on_execute",
+                           owner=params.hive_user,
+                           group=params.hdfs_user,
+                           mode=0777) # Hive expects this dir to be writeable by everyone as it is used as a temp dir
+      
+    params.HdfsResource(None, action="execute")
+
+  Directory(params.hive_etc_dir_prefix,
+            mode=0755
+  )
+
+  # We should change configurations for client as well as for server.
+  # The reason is that stale-configs are service-level, not component.
+  Logger.info("Directories to fill with configs: %s" % str(params.hive_conf_dirs_list))
+  for conf_dir in params.hive_conf_dirs_list:
+    fill_conf_dir(conf_dir)
+
+  XmlConfig("hive-site.xml",
+            conf_dir=params.hive_config_dir,
+            configurations=params.hive_site_config,
+            configuration_attributes=params.config['configuration_attributes']['hive-site'],
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0644)
+
+  # Generate atlas-application.properties.xml file
+  if has_atlas_in_cluster():
+    atlas_hook_filepath = os.path.join(params.hive_config_dir, params.atlas_hook_filename)
+    setup_atlas_hook(SERVICE.HIVE, params.hive_atlas_application_properties, atlas_hook_filepath, params.hive_user, params.user_group)
+  
+  if name == 'hiveserver2':
+    XmlConfig("hiveserver2-site.xml",
+              conf_dir=params.hive_server_conf_dir,
+              configurations=params.config['configurations']['hiveserver2-site'],
+              configuration_attributes=params.config['configuration_attributes']['hiveserver2-site'],
+              owner=params.hive_user,
+              group=params.user_group,
+              mode=0644)
+
+  if params.hive_metastore_site_supported and name == 'metastore':
+    XmlConfig("hivemetastore-site.xml",
+              conf_dir=params.hive_server_conf_dir,
+              configurations=params.config['configurations']['hivemetastore-site'],
+              configuration_attributes=params.config['configuration_attributes']['hivemetastore-site'],
+              owner=params.hive_user,
+              group=params.user_group,
+              mode=0644)
+  
+  File(format("{hive_config_dir}/hive-env.sh"),
+       owner=params.hive_user,
+       group=params.user_group,
+       content=InlineTemplate(params.hive_env_sh_template)
+  )
+
+  # On some OS this folder could be not exists, so we will create it before pushing there files
+  Directory(params.limits_conf_dir,
+            create_parents = True,
+            owner='root',
+            group='root'
+            )
+
+  File(os.path.join(params.limits_conf_dir, 'hive.conf'),
+       owner='root',
+       group='root',
+       mode=0644,
+       content=Template("hive.conf.j2")
+       )
+
+  if name == 'metastore' or name == 'hiveserver2':
+    if params.hive_jdbc_target is not None and not os.path.exists(params.hive_jdbc_target):
+      jdbc_connector(params.hive_jdbc_target, params.hive_previous_jdbc_jar)
+    if params.hive2_jdbc_target is not None and not os.path.exists(params.hive2_jdbc_target):
+      jdbc_connector(params.hive2_jdbc_target, params.hive2_previous_jdbc_jar)
+
+  File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
+       content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}")),
+       mode = 0644,
+  )
+
+  if name == 'metastore':
+    File(os.path.join(params.hive_server_conf_dir, "hadoop-metrics2-hivemetastore.properties"),
+         owner=params.hive_user,
+         group=params.user_group,
+         content=Template("hadoop-metrics2-hivemetastore.properties.j2")
+    )
+
+    File(params.start_metastore_path,
+         mode=0755,
+         content=StaticFile('startMetastore.sh')
+    )
+    if params.init_metastore_schema:
+      create_schema_cmd = format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
+                                 "{hive_schematool_bin}/schematool -initSchema "
+                                 "-dbType {hive_metastore_db_type} "
+                                 "-userName {hive_metastore_user_name} "
+                                 "-passWord {hive_metastore_user_passwd!p} -verbose")
+
+      check_schema_created_cmd = as_user(format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
+                                        "{hive_schematool_bin}/schematool -info "
+                                        "-dbType {hive_metastore_db_type} "
+                                        "-userName {hive_metastore_user_name} "
+                                        "-passWord {hive_metastore_user_passwd!p} -verbose"), params.hive_user)
+
+      # HACK: in cases with quoted passwords and as_user (which does the quoting as well) !p won't work for hiding passwords.
+      # Fixing it with the hack below:
+      quoted_hive_metastore_user_passwd = quote_bash_args(quote_bash_args(params.hive_metastore_user_passwd))
+      if quoted_hive_metastore_user_passwd[0] == "'" and quoted_hive_metastore_user_passwd[-1] == "'" \
+          or quoted_hive_metastore_user_passwd[0] == '"' and quoted_hive_metastore_user_passwd[-1] == '"':
+        quoted_hive_metastore_user_passwd = quoted_hive_metastore_user_passwd[1:-1]
+      Logger.sensitive_strings[repr(check_schema_created_cmd)] = repr(check_schema_created_cmd.replace(
+          format("-passWord {quoted_hive_metastore_user_passwd}"), "-passWord " + utils.PASSWORDS_HIDE_STRING))
+
+      Execute(create_schema_cmd,
+              not_if = check_schema_created_cmd,
+              user = params.hive_user
+      )
+  elif name == 'hiveserver2':
+    File(params.start_hiveserver2_path,
+         mode=0755,
+         content=Template(format('{start_hiveserver2_script}'))
+    )
+
+    File(os.path.join(params.hive_server_conf_dir, "hadoop-metrics2-hiveserver2.properties"),
+         owner=params.hive_user,
+         group=params.user_group,
+         content=Template("hadoop-metrics2-hiveserver2.properties.j2")
+    )
+
+  if name != "client":
+    Directory(params.hive_pid_dir,
+              create_parents = True,
+              cd_access='a',
+              owner=params.hive_user,
+              group=params.user_group,
+              mode=0755)
+    Directory(params.hive_log_dir,
+              create_parents = True,
+              cd_access='a',
+              owner=params.hive_user,
+              group=params.user_group,
+              mode=0755)
+    Directory(params.hive_var_lib,
+              create_parents = True,
+              cd_access='a',
+              owner=params.hive_user,
+              group=params.user_group,
+              mode=0755)
+
+"""
+Writes configuration files required by Hive.
+"""
+def fill_conf_dir(component_conf_dir):
+  import params
+
+  Directory(component_conf_dir,
+            owner=params.hive_user,
+            group=params.user_group,
+            create_parents = True
+  )
+
+  XmlConfig("mapred-site.xml",
+            conf_dir=component_conf_dir,
+            configurations=params.config['configurations']['mapred-site'],
+            configuration_attributes=params.config['configuration_attributes']['mapred-site'],
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0644)
+
+
+  File(format("{component_conf_dir}/hive-default.xml.template"),
+       owner=params.hive_user,
+       group=params.user_group
+  )
+
+  File(format("{component_conf_dir}/hive-env.sh.template"),
+       owner=params.hive_user,
+       group=params.user_group
+  )
+
+  # Create hive-log4j.properties and hive-exec-log4j.properties
+  # in /etc/hive/conf and not in /etc/hive2/conf
+  if params.log4j_version == '1':
+    log4j_exec_filename = 'hive-exec-log4j.properties'
+    if (params.log4j_exec_props != None):
+      File(format("{component_conf_dir}/{log4j_exec_filename}"),
+           mode=0644,
+           group=params.user_group,
+           owner=params.hive_user,
+           content=params.log4j_exec_props
+      )
+    elif (os.path.exists("{component_conf_dir}/{log4j_exec_filename}.template")):
+      File(format("{component_conf_dir}/{log4j_exec_filename}"),
+           mode=0644,
+           group=params.user_group,
+           owner=params.hive_user,
+           content=StaticFile(format("{component_conf_dir}/{log4j_exec_filename}.template"))
+      )
+
+    log4j_filename = 'hive-log4j.properties'
+    if (params.log4j_props != None):
+      File(format("{component_conf_dir}/{log4j_filename}"),
+           mode=0644,
+           group=params.user_group,
+           owner=params.hive_user,
+           content=params.log4j_props
+      )
+    elif (os.path.exists("{component_conf_dir}/{log4j_filename}.template")):
+      File(format("{component_conf_dir}/{log4j_filename}"),
+           mode=0644,
+           group=params.user_group,
+           owner=params.hive_user,
+           content=StaticFile(format("{component_conf_dir}/{log4j_filename}.template"))
+      )
+    pass # if params.log4j_version == '1'
+
+
+def jdbc_connector(target, hive_previous_jdbc_jar):
+  """
+  Shared by Hive Batch, Hive Metastore, and Hive Interactive
+  :param target: Target of jdbc jar name, which could be for any of the components above.
+  """
+  import params
+
+  if not params.jdbc_jar_name:
+    return
+
+  if params.hive_jdbc_driver in params.hive_jdbc_drivers_list and params.hive_use_existing_db:
+    environment = {
+      "no_proxy": format("{ambari_server_hostname}")
+    }
+
+    if hive_previous_jdbc_jar and os.path.isfile(hive_previous_jdbc_jar):
+      File(hive_previous_jdbc_jar, action='delete')
+
+    # TODO: should be removed after ranger_hive_plugin will not provide jdbc
+    if params.prepackaged_jdbc_name != params.jdbc_jar_name:
+      Execute(('rm', '-f', params.prepackaged_ojdbc_symlink),
+              path=["/bin", "/usr/bin/"],
+              sudo = True)
+    
+    File(params.downloaded_custom_connector,
+         content = DownloadSource(params.driver_curl_source))
+
+    # maybe it will be more correcvly to use db type
+    if params.sqla_db_used:
+      untar_sqla_type2_driver = ('tar', '-xvf', params.downloaded_custom_connector, '-C', params.tmp_dir)
+
+      Execute(untar_sqla_type2_driver, sudo = True)
+
+      Execute(format("yes | {sudo} cp {jars_path_in_archive} {hive_lib}"))
+
+      Directory(params.jdbc_libs_dir,
+                create_parents = True)
+
+      Execute(format("yes | {sudo} cp {libs_path_in_archive} {jdbc_libs_dir}"))
+
+      Execute(format("{sudo} chown -R {hive_user}:{user_group} {hive_lib}/*"))
+
+    else:
+      Execute(('cp', '--remove-destination', params.downloaded_custom_connector, target),
+            #creates=target, TODO: uncomment after ranger_hive_plugin will not provide jdbc
+            path=["/bin", "/usr/bin/"],
+            sudo = True)
+
+  else:
+    #for default hive db (Mysql)
+    Execute(('cp', '--remove-destination', format('/usr/share/java/{jdbc_jar_name}'), target),
+            #creates=target, TODO: uncomment after ranger_hive_plugin will not provide jdbc
+            path=["/bin", "/usr/bin/"],
+            sudo=True
+    )
+  pass
+
+  File(target,
+       mode = 0644,
+  )

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_client.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_client.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_client.py
new file mode 100755
index 0000000..3d9bfd7
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_client.py
@@ -0,0 +1,68 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from hive import hive
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons import OSConst
+from resource_management.core.exceptions import ClientComponentHasNoStatus
+
+class HiveClient(Script):
+  def install(self, env):
+    import params
+    self.install_packages(env)
+    self.configure(env)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hive(name='client')
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HiveClientWindows(HiveClient):
+  pass
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HiveClientDefault(HiveClient):
+  def get_component_name(self):
+    return "hadoop-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Hive client Stack Upgrade pre-restart")
+
+    import params
+    env.set_params(params)
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "hive", params.version)
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-client", params.version)
+
+
+if __name__ == "__main__":
+  HiveClient().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_interactive.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_interactive.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_interactive.py
new file mode 100755
index 0000000..74c67fc
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_interactive.py
@@ -0,0 +1,302 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+# Python Imports
+import os
+import glob
+from urlparse import urlparse
+
+# Resource Management and Common Imports
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.core.resources.service import ServiceConfig
+from resource_management.core.resources.system import File, Execute, Directory
+from resource_management.core.source import StaticFile, Template, DownloadSource, InlineTemplate
+from resource_management.core.shell import as_user
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.functions.format import format
+from resource_management.core.exceptions import Fail
+from resource_management.core.shell import as_sudo
+from resource_management.core.shell import quote_bash_args
+from resource_management.core.logger import Logger
+from resource_management.core import utils
+
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+from hive import fill_conf_dir, jdbc_connector
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def hive_interactive(name=None):
+  pass
+
+"""
+Sets up the configs, jdbc connection and tarball copy to HDFS for Hive Server Interactive.
+"""
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def hive_interactive(name=None):
+  import params
+
+  # list of properties that should be excluded from the config
+  # this approach is a compromise against adding a dedicated config
+  # type for hive_server_interactive or needed config groups on a
+  # per component basis
+  exclude_list = ['hive.enforce.bucketing',
+                  'hive.enforce.sorting']
+
+  # List of configs to be excluded from hive2 client, but present in Hive2 server.
+  exclude_list_for_hive2_client = ['javax.jdo.option.ConnectionPassword']
+
+  # Copy Tarballs in HDFS.
+  if params.stack_version_formatted_major and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.stack_version_formatted_major):
+    resource_created = copy_to_hdfs("tez_hive2",
+                 params.user_group,
+                 params.hdfs_user,
+                 file_mode=params.tarballs_mode,
+                 host_sys_prepped=params.host_sys_prepped)
+
+    if resource_created:
+      params.HdfsResource(None, action="execute")
+
+  Directory(params.hive_interactive_etc_dir_prefix,
+            mode=0755
+            )
+
+  Logger.info("Directories to fill with configs: %s" % str(params.hive_conf_dirs_list))
+  for conf_dir in params.hive_conf_dirs_list:
+    fill_conf_dir(conf_dir)
+
+  '''
+  As hive2/hive-site.xml only contains the new + the changed props compared to hive/hive-site.xml,
+  we need to merge hive/hive-site.xml and hive2/hive-site.xml and store it in hive2/hive-site.xml.
+  '''
+  merged_hive_interactive_site = {}
+  merged_hive_interactive_site.update(params.config['configurations']['hive-site'])
+  merged_hive_interactive_site.update(params.config['configurations']['hive-interactive-site'])
+  for item in exclude_list:
+    if item in merged_hive_interactive_site.keys():
+      del merged_hive_interactive_site[item]
+
+  '''
+  Hive2 doesn't have support for Atlas, we need to remove the Hook 'org.apache.atlas.hive.hook.HiveHook',
+  which would have come in config 'hive.exec.post.hooks' during the site merge logic, if Atlas is installed.
+  '''
+  remove_atlas_hook_if_exists(merged_hive_interactive_site)
+
+  '''
+  As tez_hive2/tez-site.xml only contains the new + the changed props compared to tez/tez-site.xml,
+  we need to merge tez/tez-site.xml and tez_hive2/tez-site.xml and store it in tez_hive2/tez-site.xml.
+  '''
+  merged_tez_interactive_site = {}
+  if 'tez-site' in params.config['configurations']:
+    merged_tez_interactive_site.update(params.config['configurations']['tez-site'])
+    Logger.info("Retrieved 'tez/tez-site' for merging with 'tez_hive2/tez-interactive-site'.")
+  else:
+    Logger.error("Tez's 'tez-site' couldn't be retrieved from passed-in configurations.")
+
+  merged_tez_interactive_site.update(params.config['configurations']['tez-interactive-site'])
+  XmlConfig("tez-site.xml",
+            conf_dir = params.tez_interactive_config_dir,
+            configurations = merged_tez_interactive_site,
+            configuration_attributes=params.config['configuration_attributes']['tez-interactive-site'],
+            owner = params.tez_interactive_user,
+            group = params.user_group,
+            mode = 0664)
+
+  '''
+  Merge properties from hiveserver2-interactive-site into hiveserver2-site
+  '''
+  merged_hiveserver2_interactive_site = {}
+  if 'hiveserver2-site' in params.config['configurations']:
+    merged_hiveserver2_interactive_site.update(params.config['configurations']['hiveserver2-site'])
+    Logger.info("Retrieved 'hiveserver2-site' for merging with 'hiveserver2-interactive-site'.")
+  else:
+    Logger.error("'hiveserver2-site' couldn't be retrieved from passed-in configurations.")
+  merged_hiveserver2_interactive_site.update(params.config['configurations']['hiveserver2-interactive-site'])
+
+
+  # Create config files under /etc/hive2/conf and /etc/hive2/conf/conf.server:
+  #   hive-site.xml
+  #   hive-env.sh
+  #   llap-daemon-log4j2.properties
+  #   llap-cli-log4j2.properties
+  #   hive-log4j2.properties
+  #   hive-exec-log4j2.properties
+  #   beeline-log4j2.properties
+
+  hive2_conf_dirs_list = params.hive_conf_dirs_list
+  hive2_client_conf_path = format("{stack_root}/current/{component_directory}/conf")
+
+  # Making copy of 'merged_hive_interactive_site' in 'merged_hive_interactive_site_copy', and deleting 'javax.jdo.option.ConnectionPassword'
+  # config from there, as Hive2 client shouldn't have that config.
+  merged_hive_interactive_site_copy = merged_hive_interactive_site.copy()
+  for item in exclude_list_for_hive2_client:
+    if item in merged_hive_interactive_site.keys():
+      del merged_hive_interactive_site_copy[item]
+
+  for conf_dir in hive2_conf_dirs_list:
+      if conf_dir == hive2_client_conf_path:
+        XmlConfig("hive-site.xml",
+                  conf_dir=conf_dir,
+                  configurations=merged_hive_interactive_site_copy,
+                  configuration_attributes=params.config['configuration_attributes']['hive-interactive-site'],
+                  owner=params.hive_user,
+                  group=params.user_group,
+                  mode=0644)
+      else:
+        XmlConfig("hive-site.xml",
+                  conf_dir=conf_dir,
+                  configurations=merged_hive_interactive_site,
+                  configuration_attributes=params.config['configuration_attributes']['hive-interactive-site'],
+                  owner=params.hive_user,
+                  group=params.user_group,
+                  mode=0644)
+
+      XmlConfig("hiveserver2-site.xml",
+                conf_dir=conf_dir,
+                configurations=merged_hiveserver2_interactive_site,
+                configuration_attributes=params.config['configuration_attributes']['hiveserver2-interactive-site'],
+                owner=params.hive_user,
+                group=params.user_group,
+                mode=0644)
+
+      hive_server_interactive_conf_dir = conf_dir
+
+      File(format("{hive_server_interactive_conf_dir}/hive-env.sh"),
+           owner=params.hive_user,
+           group=params.user_group,
+           content=InlineTemplate(params.hive_interactive_env_sh_template))
+
+      llap_daemon_log4j_filename = 'llap-daemon-log4j2.properties'
+      File(format("{hive_server_interactive_conf_dir}/{llap_daemon_log4j_filename}"),
+           mode=0644,
+           group=params.user_group,
+           owner=params.hive_user,
+           content=params.llap_daemon_log4j)
+
+      llap_cli_log4j2_filename = 'llap-cli-log4j2.properties'
+      File(format("{hive_server_interactive_conf_dir}/{llap_cli_log4j2_filename}"),
+           mode=0644,
+           group=params.user_group,
+           owner=params.hive_user,
+           content=params.llap_cli_log4j2)
+
+      hive_log4j2_filename = 'hive-log4j2.properties'
+      File(format("{hive_server_interactive_conf_dir}/{hive_log4j2_filename}"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.hive_user,
+         content=params.hive_log4j2)
+
+      hive_exec_log4j2_filename = 'hive-exec-log4j2.properties'
+      File(format("{hive_server_interactive_conf_dir}/{hive_exec_log4j2_filename}"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.hive_user,
+         content=params.hive_exec_log4j2)
+
+      beeline_log4j2_filename = 'beeline-log4j2.properties'
+      File(format("{hive_server_interactive_conf_dir}/{beeline_log4j2_filename}"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.hive_user,
+         content=params.beeline_log4j2)
+
+      File(os.path.join(hive_server_interactive_conf_dir, "hadoop-metrics2-hiveserver2.properties"),
+           owner=params.hive_user,
+           group=params.user_group,
+           content=Template("hadoop-metrics2-hiveserver2.properties.j2")
+           )
+
+      File(format("{hive_server_interactive_conf_dir}/hadoop-metrics2-llapdaemon.properties"),
+           owner=params.hive_user,
+           group=params.user_group,
+           content=Template("hadoop-metrics2-llapdaemon.j2"))
+
+      File(format("{hive_server_interactive_conf_dir}/hadoop-metrics2-llaptaskscheduler.properties"),
+           owner=params.hive_user,
+           group=params.user_group,
+           content=Template("hadoop-metrics2-llaptaskscheduler.j2"))
+
+
+  # On some OS this folder could be not exists, so we will create it before pushing there files
+  Directory(params.limits_conf_dir,
+            create_parents = True,
+            owner='root',
+            group='root')
+
+  File(os.path.join(params.limits_conf_dir, 'hive.conf'),
+       owner='root',
+       group='root',
+       mode=0644,
+       content=Template("hive.conf.j2"))
+
+  if not os.path.exists(params.target_hive_interactive):
+    jdbc_connector(params.target_hive_interactive, params.hive_intaractive_previous_jdbc_jar)
+
+  File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
+       content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}")),
+       mode = 0644)
+  File(params.start_hiveserver2_interactive_path,
+       mode=0755,
+       content=Template(format('{start_hiveserver2_interactive_script}')))
+
+  Directory(params.hive_pid_dir,
+            create_parents=True,
+            cd_access='a',
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0755)
+  Directory(params.hive_log_dir,
+            create_parents=True,
+            cd_access='a',
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0755)
+  Directory(params.hive_interactive_var_lib,
+            create_parents=True,
+            cd_access='a',
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0755)
+
+"""
+Remove 'org.apache.atlas.hive.hook.HiveHook' value from Hive2/hive-site.xml config 'hive.exec.post.hooks', if exists.
+"""
+def remove_atlas_hook_if_exists(merged_hive_interactive_site):
+  if 'hive.exec.post.hooks' in merged_hive_interactive_site.keys():
+    existing_hive_exec_post_hooks = merged_hive_interactive_site.get('hive.exec.post.hooks')
+    if existing_hive_exec_post_hooks:
+      hook_splits = existing_hive_exec_post_hooks.split(",")
+      updated_hook_splits = [hook for hook in hook_splits if not hook.strip() == 'org.apache.atlas.hive.hook.HiveHook']
+      updated_hooks_str = ",".join((str(hook)).strip() for hook in updated_hook_splits)
+      if updated_hooks_str != existing_hive_exec_post_hooks:
+        merged_hive_interactive_site['hive.exec.post.hooks'] = updated_hooks_str
+        Logger.info("Updated Hive2/hive-site.xml 'hive.exec.post.hooks' value from : '{0}' to : '{1}'"
+                    .format(existing_hive_exec_post_hooks, updated_hooks_str))
+      else:
+        Logger.info("No change done to Hive2/hive-site.xml 'hive.exec.post.hooks' value.")
+  else:
+      Logger.debug("'hive.exec.post.hooks' doesn't exist in Hive2/hive-site.xml")

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_metastore.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_metastore.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_metastore.py
new file mode 100755
index 0000000..17bf581
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_metastore.py
@@ -0,0 +1,259 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Execute, Directory
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.constants import Direction
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.security_commons import build_expectations
+from resource_management.libraries.functions.security_commons import cached_kinit_executor
+from resource_management.libraries.functions.security_commons import get_params_from_filesystem
+from resource_management.libraries.functions.security_commons import validate_security_config_properties
+from resource_management.libraries.functions.security_commons import FILE_TYPE_XML
+from resource_management.core.resources.system import File
+
+from hive import hive
+from hive import jdbc_connector
+from hive_service import hive_service
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons import OSConst
+
+# the legacy conf.server location in previous stack versions
+LEGACY_HIVE_SERVER_CONF = "/etc/hive/conf.server"
+
+class HiveMetastore(Script):
+  def install(self, env):
+    import params
+    self.install_packages(env)
+
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    # writing configurations on start required for securtity
+    self.configure(env)
+
+    hive_service('metastore', action='start', upgrade_type=upgrade_type)
+
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    hive_service('metastore', action='stop', upgrade_type=upgrade_type)
+
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hive(name = 'metastore')
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HiveMetastoreWindows(HiveMetastore):
+  def status(self, env):
+    import status_params
+    from resource_management.libraries.functions import check_windows_service_status
+    check_windows_service_status(status_params.hive_metastore_win_service_name)
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HiveMetastoreDefault(HiveMetastore):
+  def get_component_name(self):
+    return "hive-metastore"
+
+
+  def status(self, env):
+    import status_params
+    from resource_management.libraries.functions import check_process_status
+
+    env.set_params(status_params)
+    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Metastore Stack Upgrade pre-restart")
+    import params
+
+    env.set_params(params)
+
+    is_upgrade = params.upgrade_direction == Direction.UPGRADE
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "hive", params.version)
+      stack_select.select("hive-metastore", params.version)
+
+    if is_upgrade and params.stack_version_formatted_major and \
+            check_stack_feature(StackFeature.HIVE_METASTORE_UPGRADE_SCHEMA, params.stack_version_formatted_major):
+      self.upgrade_schema(env)
+
+
+  def security_status(self, env):
+    import status_params
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"hive.server2.authentication": "KERBEROS",
+                           "hive.metastore.sasl.enabled": "true",
+                           "hive.security.authorization.enabled": "true"}
+      props_empty_check = ["hive.metastore.kerberos.keytab.file",
+                           "hive.metastore.kerberos.principal"]
+
+      props_read_check = ["hive.metastore.kerberos.keytab.file"]
+      hive_site_props = build_expectations('hive-site', props_value_check, props_empty_check,
+                                            props_read_check)
+
+      hive_expectations ={}
+      hive_expectations.update(hive_site_props)
+
+      security_params = get_params_from_filesystem(status_params.hive_conf_dir,
+                                                   {'hive-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, hive_expectations)
+      if not result_issues: # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if 'hive-site' not in security_params \
+            or 'hive.metastore.kerberos.keytab.file' not in security_params['hive-site'] \
+            or 'hive.metastore.kerberos.principal' not in security_params['hive-site']:
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hive_user,
+                                security_params['hive-site']['hive.metastore.kerberos.keytab.file'],
+                                security_params['hive-site']['hive.metastore.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+
+  def upgrade_schema(self, env):
+    """
+    Executes the schema upgrade binary.  This is its own function because it could
+    be called as a standalone task from the upgrade pack, but is safe to run it for each
+    metastore instance. The schema upgrade on an already upgraded metastore is a NOOP.
+
+    The metastore schema upgrade requires a database driver library for most
+    databases. During an upgrade, it's possible that the library is not present,
+    so this will also attempt to copy/download the appropriate driver.
+
+    This function will also ensure that configurations are written out to disk before running
+    since the new configs will most likely not yet exist on an upgrade.
+
+    Should not be invoked for a DOWNGRADE; Metastore only supports schema upgrades.
+    """
+    Logger.info("Upgrading Hive Metastore Schema")
+    import status_params
+    import params
+    env.set_params(params)
+
+    # ensure that configurations are written out before trying to upgrade the schema
+    # since the schematool needs configs and doesn't know how to use the hive conf override
+    self.configure(env)
+
+    if params.security_enabled:
+      cached_kinit_executor(status_params.kinit_path_local,
+        status_params.hive_user,
+        params.hive_metastore_keytab_path,
+        params.hive_metastore_principal,
+        status_params.hostname,
+        status_params.tmp_dir)
+      
+    # ensure that the JDBC drive is present for the schema tool; if it's not
+    # present, then download it first
+    if params.hive_jdbc_driver in params.hive_jdbc_drivers_list:
+      target_directory = format("{stack_root}/{version}/hive/lib")
+
+      # download it if it does not exist
+      if not os.path.exists(params.source_jdbc_file):
+        jdbc_connector(params.hive_jdbc_target, params.hive_previous_jdbc_jar)
+
+      target_directory_and_filename = os.path.join(target_directory, os.path.basename(params.source_jdbc_file))
+
+      if params.sqla_db_used:
+        target_native_libs_directory = format("{target_directory}/native/lib64")
+
+        Execute(format("yes | {sudo} cp {jars_in_hive_lib} {target_directory}"))
+
+        Directory(target_native_libs_directory, create_parents = True)
+
+        Execute(format("yes | {sudo} cp {libs_in_hive_lib} {target_native_libs_directory}"))
+
+        Execute(format("{sudo} chown -R {hive_user}:{user_group} {hive_lib}/*"))
+      else:
+        # copy the JDBC driver from the older metastore location to the new location only
+        # if it does not already exist
+        if not os.path.exists(target_directory_and_filename):
+          Execute(('cp', params.source_jdbc_file, target_directory),
+            path=["/bin", "/usr/bin/"], sudo = True)
+
+      File(target_directory_and_filename, mode = 0644)
+
+    # build the schema tool command
+    binary = format("{hive_schematool_ver_bin}/schematool")
+
+    # the conf.server directory changed locations between stack versions
+    # since the configurations have not been written out yet during an upgrade
+    # we need to choose the original legacy location
+    schematool_hive_server_conf_dir = params.hive_server_conf_dir
+    if params.current_version is not None:
+      current_version = format_stack_version(params.current_version)
+      if not(check_stack_feature(StackFeature.CONFIG_VERSIONING, current_version)):
+        schematool_hive_server_conf_dir = LEGACY_HIVE_SERVER_CONF
+
+    env_dict = {
+      'HIVE_CONF_DIR': schematool_hive_server_conf_dir
+    }
+
+    command = format("{binary} -dbType {hive_metastore_db_type} -upgradeSchema")
+    Execute(command, user=params.hive_user, tries=1, environment=env_dict, logoutput=True)
+    
+  def get_log_folder(self):
+    import params
+    return params.hive_log_dir
+
+  def get_user(self):
+    import params
+    return params.hive_user
+
+
+if __name__ == "__main__":
+  HiveMetastore().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_server.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_server.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_server.py
new file mode 100755
index 0000000..31b083b
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_server.py
@@ -0,0 +1,211 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
+from resource_management.libraries.functions.get_stack_version import get_stack_version
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+from ambari_commons import OSCheck, OSConst
+if OSCheck.is_windows_family():
+  from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
+from setup_ranger_hive import setup_ranger_hive
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons.constants import UPGRADE_TYPE_ROLLING
+from resource_management.core.logger import Logger
+
+import hive_server_upgrade
+from hive import hive
+from hive_service import hive_service
+
+
+class HiveServer(Script):
+  def install(self, env):
+    import params
+    self.install_packages(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hive(name='hiveserver2')
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HiveServerWindows(HiveServer):
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    hive_service('hiveserver2', action='start')
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+    hive_service('hiveserver2', action='stop')
+
+  def status(self, env):
+    import status_params
+    check_windows_service_status(status_params.hive_server_win_service_name)
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HiveServerDefault(HiveServer):
+  def get_component_name(self):
+    return "hive-server2"
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+
+    setup_ranger_hive(upgrade_type=upgrade_type)
+    hive_service('hiveserver2', action = 'start', upgrade_type=upgrade_type)
+
+    # only perform this if upgrading and rolling; a non-rolling upgrade doesn't need
+    # to do this since hive is already down
+    if upgrade_type == UPGRADE_TYPE_ROLLING:
+      hive_server_upgrade.post_upgrade_deregister()
+
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    # During rolling upgrade, HiveServer2 should not be stopped before new server is available.
+    # Once new server is started, old one is stopped by the --deregister command which is 
+    # invoked by the 'hive_server_upgrade.post_upgrade_deregister()' method
+    if upgrade_type != UPGRADE_TYPE_ROLLING:
+      hive_service( 'hiveserver2', action = 'stop' )
+
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{hive_pid_dir}/{hive_pid}")
+
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Hive Server Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "hive", params.version)
+      stack_select.select("hive-server2", params.version)
+
+      # Copy mapreduce.tar.gz and tez.tar.gz to HDFS
+      resource_created = copy_to_hdfs(
+        "mapreduce",
+        params.user_group,
+        params.hdfs_user,
+        host_sys_prepped=params.host_sys_prepped)
+
+      resource_created = copy_to_hdfs(
+        "tez",
+        params.user_group,
+        params.hdfs_user,
+        host_sys_prepped=params.host_sys_prepped) or resource_created
+
+      if resource_created:
+        params.HdfsResource(None, action="execute")
+
+
+  def security_status(self, env):
+    import status_params
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"hive.server2.authentication": "KERBEROS",
+                           "hive.metastore.sasl.enabled": "true",
+                           "hive.security.authorization.enabled": "true"}
+      props_empty_check = ["hive.server2.authentication.kerberos.keytab",
+                           "hive.server2.authentication.kerberos.principal",
+                           "hive.server2.authentication.spnego.principal",
+                           "hive.server2.authentication.spnego.keytab"]
+
+      props_read_check = ["hive.server2.authentication.kerberos.keytab",
+                          "hive.server2.authentication.spnego.keytab"]
+      hive_site_props = build_expectations('hive-site', props_value_check, props_empty_check,
+                                            props_read_check)
+
+      hive_expectations ={}
+      hive_expectations.update(hive_site_props)
+
+      security_params = get_params_from_filesystem(status_params.hive_conf_dir,
+                                                   {'hive-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, hive_expectations)
+      if not result_issues: # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if 'hive-site' not in security_params \
+            or 'hive.server2.authentication.kerberos.keytab' not in security_params['hive-site'] \
+            or 'hive.server2.authentication.kerberos.principal' not in security_params['hive-site']  \
+            or 'hive.server2.authentication.spnego.keytab' not in security_params['hive-site'] \
+            or 'hive.server2.authentication.spnego.principal' not in security_params['hive-site']:
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hive_user,
+                                security_params['hive-site']['hive.server2.authentication.kerberos.keytab'],
+                                security_params['hive-site']['hive.server2.authentication.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hive_user,
+                                security_params['hive-site']['hive.server2.authentication.spnego.keytab'],
+                                security_params['hive-site']['hive.server2.authentication.spnego.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+  def get_log_folder(self):
+    import params
+    return params.hive_log_dir
+  
+  def get_user(self):
+    import params
+    return params.hive_user
+
+if __name__ == "__main__":
+  HiveServer().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_server_interactive.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_server_interactive.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_server_interactive.py
new file mode 100755
index 0000000..2df001c
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_server_interactive.py
@@ -0,0 +1,535 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+# Python Imports
+import subprocess
+import os
+import re
+import time
+import shutil
+from datetime import datetime
+import json
+
+# Ambari Commons & Resource Management imports
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.core.source import InlineTemplate
+from resource_management.core.resources.system import Execute
+
+# Imports needed for Rolling/Express Upgrade
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
+
+from resource_management.core import shell
+from resource_management.core.exceptions import Fail
+from resource_management.core.logger import Logger
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.libraries.functions.decorator import retry
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+
+# Local Imports
+from setup_ranger_hive import setup_ranger_hive
+from hive_service_interactive import hive_service_interactive
+from hive_interactive import hive_interactive
+from hive_server import HiveServerDefault
+from setup_ranger_hive_interactive import setup_ranger_hive_interactive
+
+import traceback
+
+class HiveServerInteractive(Script):
+  pass
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HiveServerInteractiveDefault(HiveServerInteractive):
+
+    def get_component_name(self):
+      return "hive-server2-hive2"
+
+    def install(self, env):
+      import params
+      self.install_packages(env)
+
+    def configure(self, env):
+      import params
+      env.set_params(params)
+      hive_interactive(name='hiveserver2')
+
+    def pre_upgrade_restart(self, env, upgrade_type=None):
+      Logger.info("Executing Hive Server Interactive Stack Upgrade pre-restart")
+      import params
+      env.set_params(params)
+
+      if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+        stack_select.select("hive-server2-hive2", params.version)
+        conf_select.select(params.stack_name, "hive2", params.version)
+
+        # Copy hive.tar.gz and tez.tar.gz used by Hive Interactive to HDFS
+        resource_created = copy_to_hdfs(
+          "hive2",
+          params.user_group,
+          params.hdfs_user,
+          host_sys_prepped=params.host_sys_prepped)
+
+        resource_created = copy_to_hdfs(
+          "tez_hive2",
+          params.user_group,
+          params.hdfs_user,
+          host_sys_prepped=params.host_sys_prepped) or resource_created
+
+        if resource_created:
+          params.HdfsResource(None, action="execute")
+
+    def start(self, env, upgrade_type=None):
+      import params
+      env.set_params(params)
+      self.configure(env)
+
+      if params.security_enabled:
+        # Do the security setup, internally calls do_kinit()
+        self.setup_security()
+
+      # TODO : We need have conditional [re]start of LLAP once "status check command" for LLAP is ready.
+      # Check status and based on that decide on [re]starting.
+
+      # Start LLAP before Hive Server Interactive start.
+      status = self._llap_start(env)
+      if not status:
+        raise Fail("Skipping START of Hive Server Interactive since LLAP app couldn't be STARTED.")
+
+      # TODO : test the workability of Ranger and Hive2 during upgrade
+      setup_ranger_hive_interactive(upgrade_type=upgrade_type)
+      hive_service_interactive('hiveserver2', action='start', upgrade_type=upgrade_type)
+
+
+    def stop(self, env, upgrade_type=None):
+      import params
+      env.set_params(params)
+
+      if params.security_enabled:
+        self.do_kinit()
+
+      # Stop Hive Interactive Server first
+      hive_service_interactive('hiveserver2', action='stop')
+
+      self._llap_stop(env)
+
+    def status(self, env):
+      import status_params
+      env.set_params(status_params)
+
+      # We are not doing 'llap' status check done here as part of status check for 'HSI', as 'llap' status
+      # check is a heavy weight operation.
+
+      pid_file = format("{hive_pid_dir}/{hive_interactive_pid}")
+      # Recursively check all existing gmetad pid files
+      check_process_status(pid_file)
+
+    def security_status(self, env):
+      import status_params
+      env.set_params(status_params)
+
+      if status_params.security_enabled:
+        props_value_check = {"hive.server2.authentication": "KERBEROS",
+                             "hive.metastore.sasl.enabled": "true",
+                             "hive.security.authorization.enabled": "true"}
+        props_empty_check = ["hive.server2.authentication.kerberos.keytab",
+                             "hive.server2.authentication.kerberos.principal",
+                             "hive.server2.authentication.spnego.principal",
+                             "hive.server2.authentication.spnego.keytab"]
+
+        props_read_check = ["hive.server2.authentication.kerberos.keytab",
+                            "hive.server2.authentication.spnego.keytab"]
+        hive_site_props = build_expectations('hive-site', props_value_check, props_empty_check,
+                                             props_read_check)
+
+        hive_expectations ={}
+        hive_expectations.update(hive_site_props)
+
+        security_params = get_params_from_filesystem(status_params.hive_server_interactive_conf_dir,
+                                                     {'hive-site.xml': FILE_TYPE_XML})
+        result_issues = validate_security_config_properties(security_params, hive_expectations)
+        if not result_issues: # If all validations passed successfully
+          try:
+            # Double check the dict before calling execute
+            if 'hive-site' not in security_params \
+              or 'hive.server2.authentication.kerberos.keytab' not in security_params['hive-site'] \
+              or 'hive.server2.authentication.kerberos.principal' not in security_params['hive-site'] \
+              or 'hive.server2.authentication.spnego.keytab' not in security_params['hive-site'] \
+              or 'hive.server2.authentication.spnego.principal' not in security_params['hive-site']:
+              self.put_structured_out({"securityState": "UNSECURED"})
+              self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
+              return
+
+            cached_kinit_executor(status_params.kinit_path_local,
+                                  status_params.hive_user,
+                                  security_params['hive-site']['hive.server2.authentication.kerberos.keytab'],
+                                  security_params['hive-site']['hive.server2.authentication.kerberos.principal'],
+                                  status_params.hostname,
+                                  status_params.tmp_dir)
+            cached_kinit_executor(status_params.kinit_path_local,
+                                  status_params.hive_user,
+                                  security_params['hive-site']['hive.server2.authentication.spnego.keytab'],
+                                  security_params['hive-site']['hive.server2.authentication.spnego.principal'],
+                                  status_params.hostname,
+                                  status_params.tmp_dir)
+            self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+          except Exception as e:
+            self.put_structured_out({"securityState": "ERROR"})
+            self.put_structured_out({"securityStateErrorInfo": str(e)})
+        else:
+          issues = []
+          for cf in result_issues:
+            issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+          self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+          self.put_structured_out({"securityState": "UNSECURED"})
+      else:
+        self.put_structured_out({"securityState": "UNSECURED"})
+
+    def restart_llap(self, env):
+      """
+      Custom command to Restart LLAP
+      """
+      Logger.info("Custom Command to retart LLAP")
+      import params
+      env.set_params(params)
+
+      if params.security_enabled:
+        self.do_kinit()
+
+      self._llap_stop(env)
+      self._llap_start(env)
+
+    def _llap_stop(self, env):
+      import params
+      Logger.info("Stopping LLAP")
+      SLIDER_APP_NAME = "llap0"
+
+      stop_cmd = ["slider", "stop", SLIDER_APP_NAME]
+
+      code, output, error = shell.call(stop_cmd, user=params.hive_user, stderr=subprocess.PIPE, logoutput=True)
+      if code == 0:
+        Logger.info(format("Stopped {SLIDER_APP_NAME} application on Slider successfully"))
+      elif code == 69 and output is not None and "Unknown application instance" in output:
+        Logger.info(format("Application {SLIDER_APP_NAME} was already stopped on Slider"))
+      else:
+        raise Fail(format("Could not stop application {SLIDER_APP_NAME} on Slider. {error}\n{output}"))
+
+      # Will exit with code 4 if need to run with "--force" to delete directories and registries.
+      Execute(('slider', 'destroy', SLIDER_APP_NAME, "--force"),
+              user=params.hive_user,
+              timeout=30,
+              ignore_failures=True,
+      )
+
+    """
+    Controls the start of LLAP.
+    """
+    def _llap_start(self, env, cleanup=False):
+      import params
+      env.set_params(params)
+      Logger.info("Starting LLAP")
+      LLAP_PACKAGE_CREATION_PATH = Script.get_tmp_dir()
+      LLAP_APP_NAME = 'llap0'
+
+      unique_name = "llap-slider%s" % datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S')
+
+      cmd = format("{stack_root}/current/hive-server2-hive2/bin/hive --service llap --instances {params.num_llap_nodes}"
+                   " --slider-am-container-mb {params.slider_am_container_mb} --size {params.llap_daemon_container_size}m "
+                   " --cache {params.hive_llap_io_mem_size}m --xmx {params.llap_heap_size}m --loglevel {params.llap_log_level}"
+                   " --output {LLAP_PACKAGE_CREATION_PATH}/{unique_name}")
+      if params.security_enabled:
+        llap_keytab_splits = params.hive_llap_keytab_file.split("/")
+        Logger.debug("llap_keytab_splits : {0}".format(llap_keytab_splits))
+        cmd += format(" --slider-keytab-dir .slider/keytabs/{params.hive_user}/ --slider-keytab "
+                      "{llap_keytab_splits[4]} --slider-principal {params.hive_llap_principal}")
+
+      # Append args.
+      llap_java_args = InlineTemplate(params.llap_app_java_opts).get_content()
+      cmd += format(" --args \" {llap_java_args}\"")
+
+      run_file_path = None
+      try:
+        Logger.info(format("Command: {cmd}"))
+        code, output, error = shell.checked_call(cmd, user=params.hive_user, stderr=subprocess.PIPE, logoutput=True)
+
+        if code != 0 or output is None:
+          raise Fail("Command failed with either non-zero return code or no output.")
+
+        # E.g., output:
+        # Prepared llap-slider-05Apr2016/run.sh for running LLAP on Slider
+        exp = r"Prepared (.*?run.sh) for running LLAP"
+        run_file_path = None
+        out_splits = output.split("\n")
+        for line in out_splits:
+          line = line.strip()
+          m = re.match(exp, line, re.I)
+          if m and len(m.groups()) == 1:
+            run_file_name = m.group(1)
+            run_file_path = os.path.join(params.hive_user_home_dir, run_file_name)
+            break
+        if not run_file_path:
+          raise Fail("Did not find run.sh file in output: " + str(output))
+
+        Logger.info(format("Run file path: {run_file_path}"))
+        Execute(run_file_path, user=params.hive_user)
+        Logger.info("Submitted LLAP app name : {0}".format(LLAP_APP_NAME))
+
+        # We need to check the status of LLAP app to figure out it got
+        # launched properly and is in running state. Then go ahead with Hive Interactive Server start.
+        status = self.check_llap_app_status(LLAP_APP_NAME, params.num_retries_for_checking_llap_status)
+        if status:
+          Logger.info("LLAP app '{0}' deployed successfully.".format(LLAP_APP_NAME))
+          return True
+        else:
+          Logger.error("LLAP app '{0}' deployment unsuccessful.".format(LLAP_APP_NAME))
+          return False
+      except:
+        # Attempt to clean up the packaged application, or potentially rename it with a .bak
+        if run_file_path is not None and cleanup:
+          try:
+            parent_dir = os.path.dirname(run_file_path)
+            if os.path.isdir(parent_dir):
+              shutil.rmtree(parent_dir)
+          except Exception, e:
+            Logger.error("Could not cleanup LLAP app package. Error: " + str(e))
+
+        # throw the original exception
+        raise
+
+    """
+    Does kinit and copies keytab for Hive/LLAP to HDFS.
+    """
+    def setup_security(self):
+      import params
+
+      self.do_kinit()
+
+      # Copy params.hive_llap_keytab_file to hdfs://<host>:<port>/user/<hive_user>/.slider/keytabs/<hive_user> , required by LLAP
+      slider_keytab_install_cmd = format("slider install-keytab --keytab {params.hive_llap_keytab_file} --folder {params.hive_user} --overwrite")
+      Execute(slider_keytab_install_cmd, user=params.hive_user)
+
+    def do_kinit(self):
+      import params
+
+      hive_interactive_kinit_cmd = format("{kinit_path_local} -kt {params.hive_server2_keytab} {params.hive_principal}; ")
+      Execute(hive_interactive_kinit_cmd, user=params.hive_user)
+
+      llap_kinit_cmd = format("{kinit_path_local} -kt {params.hive_llap_keytab_file} {params.hive_llap_principal}; ")
+      Execute(llap_kinit_cmd, user=params.hive_user)
+
+    """
+    Get llap app status data.
+    """
+    def _get_llap_app_status_info(self, app_name):
+      import status_params
+      LLAP_APP_STATUS_CMD_TIMEOUT = 0
+
+      llap_status_cmd = format("{stack_root}/current/hive-server2-hive2/bin/hive --service llapstatus --name {app_name} --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
+      code, output, error = shell.checked_call(llap_status_cmd, user=status_params.hive_user, stderr=subprocess.PIPE,
+                                               logoutput=False)
+      Logger.info("Received 'llapstatus' command 'output' : {0}".format(output))
+      return self._make_valid_json(output)
+
+
+    """
+    Remove extra lines from 'llapstatus' status output (eg: because of MOTD logging) so as to have a valid JSON data to be passed in
+    to JSON converter.
+    """
+    def _make_valid_json(self, output):
+      '''
+
+      Note: It is assumed right now that extra lines will be only at the start and not at the end.
+
+      Sample expected JSON to be passed for 'loads' is either of the form :
+
+      Case 'A':
+      {
+          "amInfo" : {
+          "appName" : "llap0",
+          "appType" : "org-apache-slider",
+          "appId" : "APP1",
+          "containerId" : "container_1466036628595_0010_01_000001",
+          "hostname" : "hostName",
+          "amWebUrl" : "http://hostName:port/"
+        },
+        "state" : "LAUNCHING",
+        ....
+        "desiredInstances" : 1,
+        "liveInstances" : 0,
+        ....
+        ....
+      }
+
+      or
+
+      Case 'B':
+      {
+        "state" : "APP_NOT_FOUND"
+      }
+
+      '''
+      splits = output.split("\n")
+
+      len_splits = len(splits)
+      if (len_splits < 3):
+        raise Fail ("Malformed JSON data received from 'llapstatus' command. Exiting ....")
+
+      marker_idx = None # To detect where from to start reading for JSON data
+      for idx, split in enumerate(splits):
+        curr_elem = split.strip()
+        if idx+2 > len_splits:
+          raise Fail("Iterated over the received 'llapstatus' comamnd. Couldn't validate the received output for JSON parsing.")
+        next_elem = (splits[(idx + 1)]).strip()
+        if curr_elem == "{":
+          if next_elem == "\"amInfo\" : {" and (splits[len_splits-1]).strip() == '}':
+            # For Case 'A'
+            marker_idx = idx
+            break;
+          elif idx+3 == len_splits and next_elem.startswith('"state" : ') and (splits[idx + 2]).strip() == '}':
+              # For Case 'B'
+              marker_idx = idx
+              break;
+
+      Logger.info("Marker index for start of JSON data for 'llapsrtatus' comamnd : {0}".format(marker_idx))
+
+      # Remove extra logging from possible JSON output
+      if marker_idx is None:
+        raise Fail("Couldn't validate the received output for JSON parsing.")
+      else:
+        if marker_idx != 0:
+          del splits[0:marker_idx]
+          Logger.info("Removed lines: '1-{0}' from the received 'llapstatus' output to make it valid for JSON parsing.".format(marker_idx))
+
+      scanned_output = '\n'.join(splits)
+      llap_app_info = json.loads(scanned_output)
+      return llap_app_info
+
+
+    """
+    Checks llap app status. The states can be : 'COMPLETE', 'APP_NOT_FOUND', 'RUNNING_PARTIAL', 'RUNNING_ALL' & 'LAUNCHING'.
+
+    if app is in 'APP_NOT_FOUND', 'RUNNING_PARTIAL' and 'LAUNCHING' state:
+       we wait for 'num_times_to_wait' to have app in (1). 'RUNNING_ALL' or (2). 'RUNNING_PARTIAL'
+       state with 80% or more 'desiredInstances' running and Return True
+    else :
+       Return False
+
+    Parameters: llap_app_name : deployed llap app name.
+                num_retries :   Number of retries to check the LLAP app status.
+    """
+    def check_llap_app_status(self, llap_app_name, num_retries):
+      # counters based on various states.
+      curr_time = time.time()
+
+      if num_retries <= 0:
+        num_retries = 2
+      if num_retries > 20:
+        num_retries = 20
+      @retry(times=num_retries, sleep_time=2, err_class=Fail)
+      def do_retries():
+        live_instances = 0
+        desired_instances = 0
+
+        percent_desired_instances_to_be_up = 80 # Used in 'RUNNING_PARTIAL' state.
+        llap_app_info = self._get_llap_app_status_info(llap_app_name)
+        if llap_app_info is None or 'state' not in llap_app_info:
+          Logger.error("Malformed JSON data received for LLAP app. Exiting ....")
+          return False
+
+        if llap_app_info['state'].upper() == 'RUNNING_ALL':
+          Logger.info(
+            "LLAP app '{0}' in '{1}' state.".format(llap_app_name, llap_app_info['state']))
+          return True
+        elif llap_app_info['state'].upper() == 'RUNNING_PARTIAL':
+          # Check how many instances were up.
+          if 'liveInstances' in llap_app_info and 'desiredInstances' in llap_app_info:
+            live_instances = llap_app_info['liveInstances']
+            desired_instances = llap_app_info['desiredInstances']
+          else:
+            Logger.info(
+              "LLAP app '{0}' is in '{1}' state, but 'instances' information not available in JSON received. " \
+              "Exiting ....".format(llap_app_name, llap_app_info['state']))
+            Logger.info(llap_app_info)
+            return False
+          if desired_instances == 0:
+            Logger.info("LLAP app '{0}' desired instance are set to 0. Exiting ....".format(llap_app_name))
+            return False
+
+          percentInstancesUp = 0
+          if live_instances > 0:
+            percentInstancesUp = float(live_instances) / desired_instances * 100
+          if percentInstancesUp >= percent_desired_instances_to_be_up:
+            Logger.info("LLAP app '{0}' in '{1}' state. Live Instances : '{2}'  >= {3}% of Desired Instances : " \
+                        "'{4}'.".format(llap_app_name, llap_app_info['state'],
+                                       llap_app_info['liveInstances'],
+                                       percent_desired_instances_to_be_up,
+                                       llap_app_info['desiredInstances']))
+            return True
+          else:
+            Logger.info("LLAP app '{0}' in '{1}' state. Live Instances : '{2}'. Desired Instances : " \
+                        "'{3}' after {4} secs.".format(llap_app_name, llap_app_info['state'],
+                                                       llap_app_info['liveInstances'],
+                                                       llap_app_info['desiredInstances'],
+                                                       time.time() - curr_time))
+            raise Fail("App state is RUNNING_PARTIAL. Live Instances : '{0}', Desired Instance : '{1}'".format(llap_app_info['liveInstances'],
+                                                                                                           llap_app_info['desiredInstances']))
+        elif llap_app_info['state'].upper() in ['APP_NOT_FOUND', 'LAUNCHING', 'COMPLETE']:
+          status_str = format("LLAP app '{0}' current state is {1}.".format(llap_app_name, llap_app_info['state']))
+          Logger.info(status_str)
+          raise Fail(status_str)
+        else:  # Covers any unknown that we get.
+          Logger.info(
+            "LLAP app '{0}' current state is '{1}'. Expected : 'RUNNING'.".format(llap_app_name, llap_app_info['state']))
+          return False
+
+      try:
+        status = do_retries()
+        return status
+      except Exception, e:
+        Logger.info("LLAP app '{0}' did not come up after a wait of {1} seconds.".format(llap_app_name,
+                                                                                          time.time() - curr_time))
+        traceback.print_exc()
+        return False
+
+    def get_log_folder(self):
+      import params
+      return params.hive_log_dir
+
+    def get_user(self):
+      import params
+      return params.hive_user
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HiveServerInteractiveWindows(HiveServerInteractive):
+
+  def status(self, env):
+    pass
+
+if __name__ == "__main__":
+  HiveServerInteractive().execute()
\ No newline at end of file


[18/52] bigtop git commit: ODPI-193. Add Hive 1.2 to ODPi ambari reference implementation

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.13.0.oracle.sql
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.13.0.oracle.sql b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.13.0.oracle.sql
new file mode 100755
index 0000000..6bd8df9
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.13.0.oracle.sql
@@ -0,0 +1,835 @@
+-- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE SEQUENCE_TABLE
+(
+   SEQUENCE_NAME VARCHAR2(255) NOT NULL,
+   NEXT_VAL NUMBER NOT NULL
+);
+
+ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY (SEQUENCE_NAME);
+
+-- Table NUCLEUS_TABLES is an internal table required by DataNucleus.
+-- This table is required if datanucleus.autoStartMechanism=SchemaTable
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE NUCLEUS_TABLES
+(
+   CLASS_NAME VARCHAR2(128) NOT NULL,
+   TABLE_NAME VARCHAR2(128) NOT NULL,
+   TYPE VARCHAR2(4) NOT NULL,
+   OWNER VARCHAR2(2) NOT NULL,
+   VERSION VARCHAR2(20) NOT NULL,
+   INTERFACE_NAME VARCHAR2(255) NULL
+);
+
+ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY (CLASS_NAME);
+
+-- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+CREATE TABLE PART_COL_PRIVS
+(
+    PART_COLUMN_GRANT_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PART_ID NUMBER NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    PART_COL_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
+
+-- Table CDS.
+CREATE TABLE CDS
+(
+    CD_ID NUMBER NOT NULL
+);
+
+ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
+
+-- Table COLUMNS_V2 for join relationship
+CREATE TABLE COLUMNS_V2
+(
+    CD_ID NUMBER NOT NULL,
+    "COMMENT" VARCHAR2(256) NULL,
+    "COLUMN_NAME" VARCHAR2(128) NOT NULL,
+    TYPE_NAME VARCHAR2(4000) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
+
+-- Table PARTITION_KEY_VALS for join relationship
+CREATE TABLE PARTITION_KEY_VALS
+(
+    PART_ID NUMBER NOT NULL,
+    PART_KEY_VAL VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
+
+-- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE TABLE DBS
+(
+    DB_ID NUMBER NOT NULL,
+    "DESC" VARCHAR2(4000) NULL,
+    DB_LOCATION_URI VARCHAR2(4000) NOT NULL,
+    "NAME" VARCHAR2(128) NULL,
+    OWNER_NAME VARCHAR2(128) NULL,
+    OWNER_TYPE VARCHAR2(10) NULL
+);
+
+ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
+
+-- Table PARTITION_PARAMS for join relationship
+CREATE TABLE PARTITION_PARAMS
+(
+    PART_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
+
+-- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+CREATE TABLE SERDES
+(
+    SERDE_ID NUMBER NOT NULL,
+    "NAME" VARCHAR2(128) NULL,
+    SLIB VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
+
+-- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
+CREATE TABLE TYPES
+(
+    TYPES_ID NUMBER NOT NULL,
+    TYPE_NAME VARCHAR2(128) NULL,
+    TYPE1 VARCHAR2(767) NULL,
+    TYPE2 VARCHAR2(767) NULL
+);
+
+ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
+
+-- Table PARTITION_KEYS for join relationship
+CREATE TABLE PARTITION_KEYS
+(
+    TBL_ID NUMBER NOT NULL,
+    PKEY_COMMENT VARCHAR2(4000) NULL,
+    PKEY_NAME VARCHAR2(128) NOT NULL,
+    PKEY_TYPE VARCHAR2(767) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
+
+-- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE TABLE ROLES
+(
+    ROLE_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    OWNER_NAME VARCHAR2(128) NULL,
+    ROLE_NAME VARCHAR2(128) NULL
+);
+
+ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
+
+-- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
+CREATE TABLE PARTITIONS
+(
+    PART_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    PART_NAME VARCHAR2(767) NULL,
+    SD_ID NUMBER NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
+
+-- Table INDEX_PARAMS for join relationship
+CREATE TABLE INDEX_PARAMS
+(
+    INDEX_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
+
+-- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+CREATE TABLE TBL_COL_PRIVS
+(
+    TBL_COLUMN_GRANT_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    TBL_COL_PRIV VARCHAR2(128) NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
+
+-- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
+CREATE TABLE IDXS
+(
+    INDEX_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DEFERRED_REBUILD NUMBER(1) NOT NULL CHECK (DEFERRED_REBUILD IN (1,0)),
+    INDEX_HANDLER_CLASS VARCHAR2(4000) NULL,
+    INDEX_NAME VARCHAR2(128) NULL,
+    INDEX_TBL_ID NUMBER NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    ORIG_TBL_ID NUMBER NULL,
+    SD_ID NUMBER NULL
+);
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
+
+-- Table BUCKETING_COLS for join relationship
+CREATE TABLE BUCKETING_COLS
+(
+    SD_ID NUMBER NOT NULL,
+    BUCKET_COL_NAME VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table TYPE_FIELDS for join relationship
+CREATE TABLE TYPE_FIELDS
+(
+    TYPE_NAME NUMBER NOT NULL,
+    "COMMENT" VARCHAR2(256) NULL,
+    FIELD_NAME VARCHAR2(128) NOT NULL,
+    FIELD_TYPE VARCHAR2(767) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
+
+-- Table SD_PARAMS for join relationship
+CREATE TABLE SD_PARAMS
+(
+    SD_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
+
+-- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE TABLE GLOBAL_PRIVS
+(
+    USER_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    USER_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
+
+-- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+CREATE TABLE SDS
+(
+    SD_ID NUMBER NOT NULL,
+    CD_ID NUMBER NULL,
+    INPUT_FORMAT VARCHAR2(4000) NULL,
+    IS_COMPRESSED NUMBER(1) NOT NULL CHECK (IS_COMPRESSED IN (1,0)),
+    LOCATION VARCHAR2(4000) NULL,
+    NUM_BUCKETS NUMBER (10) NOT NULL,
+    OUTPUT_FORMAT VARCHAR2(4000) NULL,
+    SERDE_ID NUMBER NULL,
+    IS_STOREDASSUBDIRECTORIES NUMBER(1) NOT NULL CHECK (IS_STOREDASSUBDIRECTORIES IN (1,0))
+);
+
+ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
+
+-- Table TABLE_PARAMS for join relationship
+CREATE TABLE TABLE_PARAMS
+(
+    TBL_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
+
+-- Table SORT_COLS for join relationship
+CREATE TABLE SORT_COLS
+(
+    SD_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    "ORDER" NUMBER (10) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+CREATE TABLE TBL_PRIVS
+(
+    TBL_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    TBL_PRIV VARCHAR2(128) NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
+
+-- Table DATABASE_PARAMS for join relationship
+CREATE TABLE DATABASE_PARAMS
+(
+    DB_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(180) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
+
+-- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
+CREATE TABLE ROLE_MAP
+(
+    ROLE_GRANT_ID NUMBER NOT NULL,
+    ADD_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    ROLE_ID NUMBER NULL
+);
+
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
+
+-- Table SERDE_PARAMS for join relationship
+CREATE TABLE SERDE_PARAMS
+(
+    SERDE_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
+
+-- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+CREATE TABLE PART_PRIVS
+(
+    PART_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PART_ID NUMBER NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    PART_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
+
+-- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+CREATE TABLE DB_PRIVS
+(
+    DB_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DB_ID NUMBER NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    DB_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
+
+-- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
+CREATE TABLE TBLS
+(
+    TBL_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DB_ID NUMBER NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    OWNER VARCHAR2(767) NULL,
+    RETENTION NUMBER (10) NOT NULL,
+    SD_ID NUMBER NULL,
+    TBL_NAME VARCHAR2(128) NULL,
+    TBL_TYPE VARCHAR2(128) NULL,
+    VIEW_EXPANDED_TEXT CLOB NULL,
+    VIEW_ORIGINAL_TEXT CLOB NULL
+);
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
+
+-- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE TABLE PARTITION_EVENTS
+(
+    PART_NAME_ID NUMBER NOT NULL,
+    DB_NAME VARCHAR2(128) NULL,
+    EVENT_TIME NUMBER NOT NULL,
+    EVENT_TYPE NUMBER (10) NOT NULL,
+    PARTITION_NAME VARCHAR2(767) NULL,
+    TBL_NAME VARCHAR2(128) NULL
+);
+
+ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
+
+-- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
+CREATE TABLE SKEWED_STRING_LIST
+(
+    STRING_LIST_ID NUMBER NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
+
+CREATE TABLE SKEWED_STRING_LIST_VALUES
+(
+    STRING_LIST_ID NUMBER NOT NULL,
+    "STRING_LIST_VALUE" VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_COL_NAMES
+(
+    SD_ID NUMBER NOT NULL,
+    "SKEWED_COL_NAME" VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
+(
+    SD_ID NUMBER NOT NULL,
+    STRING_LIST_ID_KID NUMBER NOT NULL,
+    "LOCATION" VARCHAR2(4000) NULL
+);
+
+CREATE TABLE MASTER_KEYS
+(
+    KEY_ID NUMBER (10) NOT NULL,
+    MASTER_KEY VARCHAR2(767) NULL
+);
+
+CREATE TABLE DELEGATION_TOKENS
+(
+    TOKEN_IDENT VARCHAR2(767) NOT NULL,
+    TOKEN VARCHAR2(767) NULL
+);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_VALUES
+(
+    SD_ID_OID NUMBER NOT NULL,
+    STRING_LIST_ID_EID NUMBER NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+-- column statistics
+
+CREATE TABLE TAB_COL_STATS (
+ CS_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(128) NOT NULL, 
+ COLUMN_NAME VARCHAR2(128) NOT NULL,
+ COLUMN_TYPE VARCHAR2(128) NOT NULL,
+ TBL_ID NUMBER NOT NULL,
+ LONG_LOW_VALUE NUMBER,
+ LONG_HIGH_VALUE NUMBER,
+ DOUBLE_LOW_VALUE NUMBER,
+ DOUBLE_HIGH_VALUE NUMBER,
+ BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+ BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+ NUM_NULLS NUMBER NOT NULL,
+ NUM_DISTINCTS NUMBER,
+ AVG_COL_LEN NUMBER,
+ MAX_COL_LEN NUMBER,
+ NUM_TRUES NUMBER,
+ NUM_FALSES NUMBER,
+ LAST_ANALYZED NUMBER NOT NULL
+);
+
+CREATE TABLE VERSION (
+  VER_ID NUMBER NOT NULL,
+  SCHEMA_VERSION VARCHAR(127) NOT NULL,
+  VERSION_COMMENT VARCHAR(255)
+);
+ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID);
+
+CREATE TABLE PART_COL_STATS (
+ CS_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(128) NOT NULL,
+ PARTITION_NAME VARCHAR2(767) NOT NULL,
+ COLUMN_NAME VARCHAR2(128) NOT NULL,
+ COLUMN_TYPE VARCHAR2(128) NOT NULL,
+ PART_ID NUMBER NOT NULL,
+ LONG_LOW_VALUE NUMBER,
+ LONG_HIGH_VALUE NUMBER,
+ DOUBLE_LOW_VALUE NUMBER,
+ DOUBLE_HIGH_VALUE NUMBER,
+ BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+ BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+ NUM_NULLS NUMBER NOT NULL,
+ NUM_DISTINCTS NUMBER,
+ AVG_COL_LEN NUMBER,
+ MAX_COL_LEN NUMBER,
+ NUM_TRUES NUMBER,
+ NUM_FALSES NUMBER,
+ LAST_ANALYZED NUMBER NOT NULL
+);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED;
+
+CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
+
+CREATE TABLE FUNCS (
+  FUNC_ID NUMBER NOT NULL,
+  CLASS_NAME VARCHAR2(4000),
+  CREATE_TIME NUMBER(10) NOT NULL,
+  DB_ID NUMBER,
+  FUNC_NAME VARCHAR2(128),
+  FUNC_TYPE NUMBER(10) NOT NULL,
+  OWNER_NAME VARCHAR2(128),
+  OWNER_TYPE VARCHAR2(10)
+);
+
+ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID);
+
+CREATE TABLE FUNC_RU (
+  FUNC_ID NUMBER NOT NULL,
+  RESOURCE_TYPE NUMBER(10) NOT NULL,
+  RESOURCE_URI VARCHAR2(4000),
+  INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID, INTEGER_IDX);
+
+
+-- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
+
+CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table COLUMNS_V2
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
+
+
+-- Constraints for table PARTITION_KEY_VALS
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
+
+
+-- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME");
+
+
+-- Constraints for table PARTITION_PARAMS
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
+
+
+-- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+
+-- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
+CREATE UNIQUE INDEX UNIQUE_TYPE ON TYPES (TYPE_NAME);
+
+
+-- Constraints for table PARTITION_KEYS
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
+
+
+-- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
+
+
+-- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
+
+CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
+
+CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
+
+
+-- Constraints for table INDEX_PARAMS
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
+
+
+-- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
+
+
+-- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
+
+CREATE INDEX IDXS_N50 ON IDXS (INDEX_TBL_ID);
+
+CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
+
+CREATE INDEX IDXS_N49 ON IDXS (ORIG_TBL_ID);
+
+
+-- Constraints for table BUCKETING_COLS
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
+
+
+-- Constraints for table TYPE_FIELDS
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
+
+
+-- Constraints for table SD_PARAMS
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
+
+
+-- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
+CREATE INDEX SDS_N50 ON SDS (CD_ID);
+
+
+-- Constraints for table TABLE_PARAMS
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
+
+
+-- Constraints for table SORT_COLS
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
+
+
+-- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
+
+CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table DATABASE_PARAMS
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
+
+
+-- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
+
+CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table SERDE_PARAMS
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
+
+
+-- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
+
+
+-- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
+
+
+-- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
+
+CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
+
+CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
+
+
+-- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
+
+
+-- Constraints for table FUNCS for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions]
+ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED;
+
+CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME, DB_ID);
+
+CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID);
+
+
+-- Constraints for table FUNC_RU for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions]
+ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) INITIALLY DEFERRED;
+
+CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID);
+
+
+-- -----------------------------------------------------------------------------------------------------------------------------------------------
+-- Transaction and Lock Tables
+-- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file.
+-- -----------------------------------------------------------------------------------------------------------------------------------------------
+
+CREATE TABLE TXNS (
+  TXN_ID NUMBER(19) PRIMARY KEY,
+  TXN_STATE char(1) NOT NULL,
+  TXN_STARTED NUMBER(19) NOT NULL,
+  TXN_LAST_HEARTBEAT NUMBER(19) NOT NULL,
+  TXN_USER varchar(128) NOT NULL,
+  TXN_HOST varchar(128) NOT NULL
+);
+
+CREATE TABLE TXN_COMPONENTS (
+  TC_TXNID NUMBER(19) REFERENCES TXNS (TXN_ID),
+  TC_DATABASE VARCHAR2(128) NOT NULL,
+  TC_TABLE VARCHAR2(128),
+  TC_PARTITION VARCHAR2(767) NULL
+);
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS (
+  CTC_TXNID NUMBER(19),
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(128),
+  CTC_PARTITION varchar(767)
+);
+
+CREATE TABLE NEXT_TXN_ID (
+  NTXN_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE HIVE_LOCKS (
+  HL_LOCK_EXT_ID NUMBER(19) NOT NULL,
+  HL_LOCK_INT_ID NUMBER(19) NOT NULL,
+  HL_TXNID NUMBER(19),
+  HL_DB VARCHAR2(128) NOT NULL,
+  HL_TABLE VARCHAR2(128),
+  HL_PARTITION VARCHAR2(767),
+  HL_LOCK_STATE CHAR(1) NOT NULL,
+  HL_LOCK_TYPE CHAR(1) NOT NULL,
+  HL_LAST_HEARTBEAT NUMBER(19) NOT NULL,
+  HL_ACQUIRED_AT NUMBER(19),
+  HL_USER varchar(128) NOT NULL,
+  HL_HOST varchar(128) NOT NULL,
+  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+); 
+
+CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID);
+
+CREATE TABLE NEXT_LOCK_ID (
+  NL_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE COMPACTION_QUEUE (
+  CQ_ID NUMBER(19) PRIMARY KEY,
+  CQ_DATABASE varchar(128) NOT NULL,
+  CQ_TABLE varchar(128) NOT NULL,
+  CQ_PARTITION varchar(767),
+  CQ_STATE char(1) NOT NULL,
+  CQ_TYPE char(1) NOT NULL,
+  CQ_WORKER_ID varchar(128),
+  CQ_START NUMBER(19),
+  CQ_RUN_AS varchar(128)
+);
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+  NCQ_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+
+-- -----------------------------------------------------------------
+-- Record schema version. Should be the last step in the init script
+-- -----------------------------------------------------------------
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.13.0', 'Hive release version 0.13.0');
+

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.13.0.postgres.sql
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.13.0.postgres.sql b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.13.0.postgres.sql
new file mode 100755
index 0000000..7b886e1
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.13.0.postgres.sql
@@ -0,0 +1,1538 @@
+--
+-- PostgreSQL database dump
+--
+
+SET statement_timeout = 0;
+SET client_encoding = 'UTF8';
+SET standard_conforming_strings = off;
+SET check_function_bodies = false;
+SET client_min_messages = warning;
+SET escape_string_warning = off;
+
+SET search_path = public, pg_catalog;
+
+SET default_tablespace = '';
+
+SET default_with_oids = false;
+
+--
+-- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "BUCKETING_COLS" (
+    "SD_ID" bigint NOT NULL,
+    "BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "CDS" (
+    "CD_ID" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_OLD; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_OLD" (
+    "SD_ID" bigint NOT NULL,
+    "COMMENT" character varying(256) DEFAULT NULL::character varying,
+    "COLUMN_NAME" character varying(128) NOT NULL,
+    "TYPE_NAME" character varying(4000) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_V2" (
+    "CD_ID" bigint NOT NULL,
+    "COMMENT" character varying(4000),
+    "COLUMN_NAME" character varying(128) NOT NULL,
+    "TYPE_NAME" character varying(4000),
+    "INTEGER_IDX" integer NOT NULL
+);
+
+
+--
+-- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DATABASE_PARAMS" (
+    "DB_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(180) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DBS" (
+    "DB_ID" bigint NOT NULL,
+    "DESC" character varying(4000) DEFAULT NULL::character varying,
+    "DB_LOCATION_URI" character varying(4000) NOT NULL,
+    "NAME" character varying(128) DEFAULT NULL::character varying,
+    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+    "OWNER_TYPE" character varying(10) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DB_PRIVS" (
+    "DB_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DB_ID" bigint,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "DB_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "GLOBAL_PRIVS" (
+    "USER_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "USER_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "IDXS" (
+    "INDEX_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DEFERRED_REBUILD" boolean NOT NULL,
+    "INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character varying,
+    "INDEX_NAME" character varying(128) DEFAULT NULL::character varying,
+    "INDEX_TBL_ID" bigint,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "ORIG_TBL_ID" bigint,
+    "SD_ID" bigint
+);
+
+
+--
+-- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "INDEX_PARAMS" (
+    "INDEX_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "NUCLEUS_TABLES" (
+    "CLASS_NAME" character varying(128) NOT NULL,
+    "TABLE_NAME" character varying(128) NOT NULL,
+    "TYPE" character varying(4) NOT NULL,
+    "OWNER" character varying(2) NOT NULL,
+    "VERSION" character varying(20) NOT NULL,
+    "INTERFACE_NAME" character varying(255) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PARTITIONS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITIONS" (
+    "PART_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "PART_NAME" character varying(767) DEFAULT NULL::character varying,
+    "SD_ID" bigint,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: PARTITION_EVENTS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_EVENTS" (
+    "PART_NAME_ID" bigint NOT NULL,
+    "DB_NAME" character varying(128),
+    "EVENT_TIME" bigint NOT NULL,
+    "EVENT_TYPE" integer NOT NULL,
+    "PARTITION_NAME" character varying(767),
+    "TBL_NAME" character varying(128)
+);
+
+
+--
+-- Name: PARTITION_KEYS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_KEYS" (
+    "TBL_ID" bigint NOT NULL,
+    "PKEY_COMMENT" character varying(4000) DEFAULT NULL::character varying,
+    "PKEY_NAME" character varying(128) NOT NULL,
+    "PKEY_TYPE" character varying(767) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: PARTITION_KEY_VALS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_KEY_VALS" (
+    "PART_ID" bigint NOT NULL,
+    "PART_KEY_VAL" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: PARTITION_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_PARAMS" (
+    "PART_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PART_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_COL_PRIVS" (
+    "PART_COLUMN_GRANT_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_ID" bigint,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_COL_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PART_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_PRIVS" (
+    "PART_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_ID" bigint,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: ROLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "ROLES" (
+    "ROLE_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+    "ROLE_NAME" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: ROLE_MAP; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "ROLE_MAP" (
+    "ROLE_GRANT_ID" bigint NOT NULL,
+    "ADD_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "ROLE_ID" bigint
+);
+
+
+--
+-- Name: SDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SDS" (
+    "SD_ID" bigint NOT NULL,
+    "INPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+    "IS_COMPRESSED" boolean NOT NULL,
+    "LOCATION" character varying(4000) DEFAULT NULL::character varying,
+    "NUM_BUCKETS" bigint NOT NULL,
+    "OUTPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+    "SERDE_ID" bigint,
+    "CD_ID" bigint,
+    "IS_STOREDASSUBDIRECTORIES" boolean NOT NULL
+);
+
+
+--
+-- Name: SD_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SD_PARAMS" (
+    "SD_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SEQUENCE_TABLE; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SEQUENCE_TABLE" (
+    "SEQUENCE_NAME" character varying(255) NOT NULL,
+    "NEXT_VAL" bigint NOT NULL
+);
+
+
+--
+-- Name: SERDES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SERDES" (
+    "SERDE_ID" bigint NOT NULL,
+    "NAME" character varying(128) DEFAULT NULL::character varying,
+    "SLIB" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SERDE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SERDE_PARAMS" (
+    "SERDE_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SORT_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SORT_COLS" (
+    "SD_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "ORDER" bigint NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: TABLE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TABLE_PARAMS" (
+    "TBL_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: TBLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBLS" (
+    "TBL_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DB_ID" bigint,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "OWNER" character varying(767) DEFAULT NULL::character varying,
+    "RETENTION" bigint NOT NULL,
+    "SD_ID" bigint,
+    "TBL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "VIEW_EXPANDED_TEXT" text,
+    "VIEW_ORIGINAL_TEXT" text
+);
+
+
+--
+-- Name: TBL_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBL_COL_PRIVS" (
+    "TBL_COLUMN_GRANT_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "TBL_COL_PRIV" character varying(128) DEFAULT NULL::character varying,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: TBL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBL_PRIVS" (
+    "TBL_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "TBL_PRIV" character varying(128) DEFAULT NULL::character varying,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: TYPES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TYPES" (
+    "TYPES_ID" bigint NOT NULL,
+    "TYPE_NAME" character varying(128) DEFAULT NULL::character varying,
+    "TYPE1" character varying(767) DEFAULT NULL::character varying,
+    "TYPE2" character varying(767) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: TYPE_FIELDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TYPE_FIELDS" (
+    "TYPE_NAME" bigint NOT NULL,
+    "COMMENT" character varying(256) DEFAULT NULL::character varying,
+    "FIELD_NAME" character varying(128) NOT NULL,
+    "FIELD_TYPE" character varying(767) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_STRING_LIST" (
+    "STRING_LIST_ID" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_STRING_LIST_VALUES" (
+    "STRING_LIST_ID" bigint NOT NULL,
+    "STRING_LIST_VALUE" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_COL_NAMES" (
+    "SD_ID" bigint NOT NULL,
+    "SKEWED_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_COL_VALUE_LOC_MAP" (
+    "SD_ID" bigint NOT NULL,
+    "STRING_LIST_ID_KID" bigint NOT NULL,
+    "LOCATION" character varying(4000) DEFAULT NULL::character varying
+);
+
+CREATE TABLE "SKEWED_VALUES" (
+    "SD_ID_OID" bigint NOT NULL,
+    "STRING_LIST_ID_EID" bigint NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: TAB_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE  "MASTER_KEYS"
+(
+    "KEY_ID" SERIAL,
+    "MASTER_KEY" varchar(767) NULL,
+    PRIMARY KEY ("KEY_ID")
+);
+
+CREATE TABLE  "DELEGATION_TOKENS"
+(
+    "TOKEN_IDENT" varchar(767) NOT NULL,
+    "TOKEN" varchar(767) NULL,
+    PRIMARY KEY ("TOKEN_IDENT")
+);
+
+CREATE TABLE "TAB_COL_STATS" (
+ "CS_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "TBL_ID" bigint NOT NULL,
+ "LONG_LOW_VALUE" bigint,
+ "LONG_HIGH_VALUE" bigint,
+ "DOUBLE_LOW_VALUE" double precision,
+ "DOUBLE_HIGH_VALUE" double precision,
+ "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_NULLS" bigint NOT NULL,
+ "NUM_DISTINCTS" bigint,
+ "AVG_COL_LEN" double precision,
+ "MAX_COL_LEN" bigint,
+ "NUM_TRUES" bigint,
+ "NUM_FALSES" bigint,
+ "LAST_ANALYZED" bigint NOT NULL
+);
+
+--
+-- Table structure for VERSION
+--
+CREATE TABLE "VERSION" (
+  "VER_ID" bigint,
+  "SCHEMA_VERSION" character varying(127) NOT NULL,
+  "VERSION_COMMENT" character varying(255) NOT NULL
+);
+
+--
+-- Name: PART_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_COL_STATS" (
+ "CS_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
+ "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "PART_ID" bigint NOT NULL,
+ "LONG_LOW_VALUE" bigint,
+ "LONG_HIGH_VALUE" bigint,
+ "DOUBLE_LOW_VALUE" double precision,
+ "DOUBLE_HIGH_VALUE" double precision,
+ "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_NULLS" bigint NOT NULL,
+ "NUM_DISTINCTS" bigint,
+ "AVG_COL_LEN" double precision,
+ "MAX_COL_LEN" bigint,
+ "NUM_TRUES" bigint,
+ "NUM_FALSES" bigint,
+ "LAST_ANALYZED" bigint NOT NULL
+);
+
+--
+-- Table structure for FUNCS
+--
+CREATE TABLE "FUNCS" (
+  "FUNC_ID" BIGINT NOT NULL,
+  "CLASS_NAME" VARCHAR(4000),
+  "CREATE_TIME" INTEGER NOT NULL,
+  "DB_ID" BIGINT,
+  "FUNC_NAME" VARCHAR(128),
+  "FUNC_TYPE" INTEGER NOT NULL,
+  "OWNER_NAME" VARCHAR(128),
+  "OWNER_TYPE" VARCHAR(10),
+  PRIMARY KEY ("FUNC_ID")
+);
+
+--
+-- Table structure for FUNC_RU
+--
+CREATE TABLE "FUNC_RU" (
+  "FUNC_ID" BIGINT NOT NULL,
+  "RESOURCE_TYPE" INTEGER NOT NULL,
+  "RESOURCE_URI" VARCHAR(4000),
+  "INTEGER_IDX" INTEGER NOT NULL,
+  PRIMARY KEY ("FUNC_ID", "INTEGER_IDX")
+);
+
+--
+-- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "BUCKETING_COLS"
+    ADD CONSTRAINT "BUCKETING_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+
+--
+-- Name: CDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "CDS"
+    ADD CONSTRAINT "CDS_pkey" PRIMARY KEY ("CD_ID");
+
+
+--
+-- Name: COLUMNS_V2_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "COLUMNS_V2"
+    ADD CONSTRAINT "COLUMNS_V2_pkey" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
+
+
+--
+-- Name: COLUMNS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "COLUMNS_OLD"
+    ADD CONSTRAINT "COLUMNS_pkey" PRIMARY KEY ("SD_ID", "COLUMN_NAME");
+
+
+--
+-- Name: DATABASE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DATABASE_PARAMS"
+    ADD CONSTRAINT "DATABASE_PARAMS_pkey" PRIMARY KEY ("DB_ID", "PARAM_KEY");
+
+
+--
+-- Name: DBPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DBPRIVILEGEINDEX" UNIQUE ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: DBS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DBS"
+    ADD CONSTRAINT "DBS_pkey" PRIMARY KEY ("DB_ID");
+
+
+--
+-- Name: DB_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DB_PRIVS_pkey" PRIMARY KEY ("DB_GRANT_ID");
+
+
+--
+-- Name: GLOBALPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "GLOBAL_PRIVS"
+    ADD CONSTRAINT "GLOBALPRIVILEGEINDEX" UNIQUE ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: GLOBAL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "GLOBAL_PRIVS"
+    ADD CONSTRAINT "GLOBAL_PRIVS_pkey" PRIMARY KEY ("USER_GRANT_ID");
+
+
+--
+-- Name: IDXS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_pkey" PRIMARY KEY ("INDEX_ID");
+
+
+--
+-- Name: INDEX_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "INDEX_PARAMS"
+    ADD CONSTRAINT "INDEX_PARAMS_pkey" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
+
+
+--
+-- Name: NUCLEUS_TABLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "NUCLEUS_TABLES"
+    ADD CONSTRAINT "NUCLEUS_TABLES_pkey" PRIMARY KEY ("CLASS_NAME");
+
+
+--
+-- Name: PARTITIONS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_pkey" PRIMARY KEY ("PART_ID");
+
+
+--
+-- Name: PARTITION_EVENTS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_EVENTS"
+    ADD CONSTRAINT "PARTITION_EVENTS_pkey" PRIMARY KEY ("PART_NAME_ID");
+
+
+--
+-- Name: PARTITION_KEYS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_KEYS"
+    ADD CONSTRAINT "PARTITION_KEYS_pkey" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
+
+
+--
+-- Name: PARTITION_KEY_VALS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_KEY_VALS"
+    ADD CONSTRAINT "PARTITION_KEY_VALS_pkey" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
+
+
+--
+-- Name: PARTITION_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_PARAMS"
+    ADD CONSTRAINT "PARTITION_PARAMS_pkey" PRIMARY KEY ("PART_ID", "PARAM_KEY");
+
+
+--
+-- Name: PART_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PART_COL_PRIVS"
+    ADD CONSTRAINT "PART_COL_PRIVS_pkey" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
+
+
+--
+-- Name: PART_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PART_PRIVS"
+    ADD CONSTRAINT "PART_PRIVS_pkey" PRIMARY KEY ("PART_GRANT_ID");
+
+
+--
+-- Name: ROLEENTITYINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLES"
+    ADD CONSTRAINT "ROLEENTITYINDEX" UNIQUE ("ROLE_NAME");
+
+
+--
+-- Name: ROLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLES"
+    ADD CONSTRAINT "ROLES_pkey" PRIMARY KEY ("ROLE_ID");
+
+
+--
+-- Name: ROLE_MAP_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "ROLE_MAP_pkey" PRIMARY KEY ("ROLE_GRANT_ID");
+
+
+--
+-- Name: SDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_pkey" PRIMARY KEY ("SD_ID");
+
+
+--
+-- Name: SD_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SD_PARAMS"
+    ADD CONSTRAINT "SD_PARAMS_pkey" PRIMARY KEY ("SD_ID", "PARAM_KEY");
+
+
+--
+-- Name: SEQUENCE_TABLE_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SEQUENCE_TABLE"
+    ADD CONSTRAINT "SEQUENCE_TABLE_pkey" PRIMARY KEY ("SEQUENCE_NAME");
+
+
+--
+-- Name: SERDES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SERDES"
+    ADD CONSTRAINT "SERDES_pkey" PRIMARY KEY ("SERDE_ID");
+
+
+--
+-- Name: SERDE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SERDE_PARAMS"
+    ADD CONSTRAINT "SERDE_PARAMS_pkey" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
+
+
+--
+-- Name: SORT_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SORT_COLS"
+    ADD CONSTRAINT "SORT_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+
+--
+-- Name: TABLE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TABLE_PARAMS"
+    ADD CONSTRAINT "TABLE_PARAMS_pkey" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
+
+
+--
+-- Name: TBLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_pkey" PRIMARY KEY ("TBL_ID");
+
+
+--
+-- Name: TBL_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBL_COL_PRIVS"
+    ADD CONSTRAINT "TBL_COL_PRIVS_pkey" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
+
+
+--
+-- Name: TBL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBL_PRIVS"
+    ADD CONSTRAINT "TBL_PRIVS_pkey" PRIMARY KEY ("TBL_GRANT_ID");
+
+
+--
+-- Name: TYPES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPES"
+    ADD CONSTRAINT "TYPES_pkey" PRIMARY KEY ("TYPES_ID");
+
+
+--
+-- Name: TYPE_FIELDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPE_FIELDS"
+    ADD CONSTRAINT "TYPE_FIELDS_pkey" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_pkey" PRIMARY KEY ("STRING_LIST_ID");
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_pkey" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
+
+
+ALTER TABLE ONLY "SKEWED_COL_NAMES"
+    ADD CONSTRAINT "SKEWED_COL_NAMES_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_pkey" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_pkey" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
+
+--
+-- Name: TAB_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+--
+-- Name: PART_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+--
+-- Name: UNIQUEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "UNIQUEINDEX" UNIQUE ("INDEX_NAME", "ORIG_TBL_ID");
+
+
+--
+-- Name: UNIQUEPARTITION; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("PART_NAME", "TBL_ID");
+
+
+--
+-- Name: UNIQUETABLE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "UNIQUETABLE" UNIQUE ("TBL_NAME", "DB_ID");
+
+
+--
+-- Name: UNIQUE_DATABASE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DBS"
+    ADD CONSTRAINT "UNIQUE_DATABASE" UNIQUE ("NAME");
+
+
+--
+-- Name: UNIQUE_TYPE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPES"
+    ADD CONSTRAINT "UNIQUE_TYPE" UNIQUE ("TYPE_NAME");
+
+
+--
+-- Name: USERROLEMAPINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "USERROLEMAPINDEX" UNIQUE ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "BUCKETING_COLS_N49" ON "BUCKETING_COLS" USING btree ("SD_ID");
+
+
+--
+-- Name: COLUMNS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "COLUMNS_N49" ON "COLUMNS_OLD" USING btree ("SD_ID");
+
+
+--
+-- Name: DATABASE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "DATABASE_PARAMS_N49" ON "DATABASE_PARAMS" USING btree ("DB_ID");
+
+
+--
+-- Name: DB_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "DB_PRIVS_N49" ON "DB_PRIVS" USING btree ("DB_ID");
+
+
+--
+-- Name: IDXS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N49" ON "IDXS" USING btree ("ORIG_TBL_ID");
+
+
+--
+-- Name: IDXS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N50" ON "IDXS" USING btree ("INDEX_TBL_ID");
+
+
+--
+-- Name: IDXS_N51; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N51" ON "IDXS" USING btree ("SD_ID");
+
+
+--
+-- Name: INDEX_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "INDEX_PARAMS_N49" ON "INDEX_PARAMS" USING btree ("INDEX_ID");
+
+
+--
+-- Name: PARTITIONCOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONCOLUMNPRIVILEGEINDEX" ON "PART_COL_PRIVS" USING btree ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: PARTITIONEVENTINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONEVENTINDEX" ON "PARTITION_EVENTS" USING btree ("PARTITION_NAME");
+
+
+--
+-- Name: PARTITIONS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONS_N49" ON "PARTITIONS" USING btree ("TBL_ID");
+
+
+--
+-- Name: PARTITIONS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONS_N50" ON "PARTITIONS" USING btree ("SD_ID");
+
+
+--
+-- Name: PARTITION_KEYS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_KEYS_N49" ON "PARTITION_KEYS" USING btree ("TBL_ID");
+
+
+--
+-- Name: PARTITION_KEY_VALS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_KEY_VALS_N49" ON "PARTITION_KEY_VALS" USING btree ("PART_ID");
+
+
+--
+-- Name: PARTITION_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_PARAMS_N49" ON "PARTITION_PARAMS" USING btree ("PART_ID");
+
+
+--
+-- Name: PARTPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTPRIVILEGEINDEX" ON "PART_PRIVS" USING btree ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: PART_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_COL_PRIVS_N49" ON "PART_COL_PRIVS" USING btree ("PART_ID");
+
+
+--
+-- Name: PART_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID");
+
+
+--
+-- Name: ROLE_MAP_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "ROLE_MAP_N49" ON "ROLE_MAP" USING btree ("ROLE_ID");
+
+
+--
+-- Name: SDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SDS_N49" ON "SDS" USING btree ("SERDE_ID");
+
+
+--
+-- Name: SD_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SD_PARAMS_N49" ON "SD_PARAMS" USING btree ("SD_ID");
+
+
+--
+-- Name: SERDE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SERDE_PARAMS_N49" ON "SERDE_PARAMS" USING btree ("SERDE_ID");
+
+
+--
+-- Name: SORT_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SORT_COLS_N49" ON "SORT_COLS" USING btree ("SD_ID");
+
+
+--
+-- Name: TABLECOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLECOLUMNPRIVILEGEINDEX" ON "TBL_COL_PRIVS" USING btree ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: TABLEPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLEPRIVILEGEINDEX" ON "TBL_PRIVS" USING btree ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: TABLE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLE_PARAMS_N49" ON "TABLE_PARAMS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TBLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBLS_N49" ON "TBLS" USING btree ("DB_ID");
+
+
+--
+-- Name: TBLS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBLS_N50" ON "TBLS" USING btree ("SD_ID");
+
+
+--
+-- Name: TBL_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBL_COL_PRIVS_N49" ON "TBL_COL_PRIVS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TBL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBL_PRIVS_N49" ON "TBL_PRIVS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TYPE_FIELDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TYPE_FIELDS_N49" ON "TYPE_FIELDS" USING btree ("TYPE_NAME");
+
+--
+-- Name: TAB_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TAB_COL_STATS_N49" ON "TAB_COL_STATS" USING btree ("TBL_ID");
+
+--
+-- Name: PART_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID");
+
+--
+-- Name: UNIQUEFUNCTION; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE UNIQUE INDEX "UNIQUEFUNCTION" ON "FUNCS" ("FUNC_NAME", "DB_ID");
+
+--
+-- Name: FUNCS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "FUNCS_N49" ON "FUNCS" ("DB_ID");
+
+--
+-- Name: FUNC_RU_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "FUNC_RU_N49" ON "FUNC_RU" ("FUNC_ID");
+
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_fkey" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "SKEWED_COL_NAMES"
+    ADD CONSTRAINT "SKEWED_COL_NAMES_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey1" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_fkey1" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_fkey2" FOREIGN KEY ("SD_ID_OID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: BUCKETING_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "BUCKETING_COLS"
+    ADD CONSTRAINT "BUCKETING_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: COLUMNS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "COLUMNS_OLD"
+    ADD CONSTRAINT "COLUMNS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: COLUMNS_V2_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "COLUMNS_V2"
+    ADD CONSTRAINT "COLUMNS_V2_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+
+
+--
+-- Name: DATABASE_PARAMS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "DATABASE_PARAMS"
+    ADD CONSTRAINT "DATABASE_PARAMS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: DB_PRIVS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DB_PRIVS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_INDEX_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_INDEX_TBL_ID_fkey" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_ORIG_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_ORIG_TBL_ID_fkey" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: INDEX_PARAMS_INDEX_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "INDEX_PARAMS"
+    ADD CONSTRAINT "INDEX_PARAMS_INDEX_ID_fkey" FOREIGN KEY ("INDEX_ID") REFERENCES "IDXS"("INDEX_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITIONS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITIONS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_KEYS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_KEYS"
+    ADD CONSTRAINT "PARTITION_KEYS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_KEY_VALS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_KEY_VALS"
+    ADD CONSTRAINT "PARTITION_KEY_VALS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_PARAMS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_PARAMS"
+    ADD CONSTRAINT "PARTITION_PARAMS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_COL_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PART_COL_PRIVS"
+    ADD CONSTRAINT "PART_COL_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PART_PRIVS"
+    ADD CONSTRAINT "PART_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: ROLE_MAP_ROLE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "ROLE_MAP_ROLE_ID_fkey" FOREIGN KEY ("ROLE_ID") REFERENCES "ROLES"("ROLE_ID") DEFERRABLE;
+
+
+--
+-- Name: SDS_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+
+
+--
+-- Name: SDS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+
+
+--
+-- Name: SD_PARAMS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SD_PARAMS"
+    ADD CONSTRAINT "SD_PARAMS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: SERDE_PARAMS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SERDE_PARAMS"
+    ADD CONSTRAINT "SERDE_PARAMS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+
+
+--
+-- Name: SORT_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SORT_COLS"
+    ADD CONSTRAINT "SORT_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: TABLE_PARAMS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TABLE_PARAMS"
+    ADD CONSTRAINT "TABLE_PARAMS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TBLS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: TBLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: TBL_COL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBL_COL_PRIVS"
+    ADD CONSTRAINT "TBL_COL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TBL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBL_PRIVS"
+    ADD CONSTRAINT "TBL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TYPE_FIELDS_TYPE_NAME_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TYPE_FIELDS"
+    ADD CONSTRAINT "TYPE_FIELDS_TYPE_NAME_fkey" FOREIGN KEY ("TYPE_NAME") REFERENCES "TYPES"("TYPES_ID") DEFERRABLE;
+
+--
+-- Name: TAB_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_fkey" FOREIGN KEY("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID");
+
+-- Name: FUNCS_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ALTER TABLE ONLY "FUNCS"
+    ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "DBS" ("DB_ID") DEFERRABLE;
+
+-- Name: FUNC_RU_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ALTER TABLE ONLY "FUNC_RU"
+    ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "FUNCS" ("FUNC_ID") DEFERRABLE;
+
+--
+-- Name: public; Type: ACL; Schema: -; Owner: hiveuser
+--
+
+REVOKE ALL ON SCHEMA public FROM PUBLIC;
+GRANT ALL ON SCHEMA public TO PUBLIC;
+
+
+--
+-- PostgreSQL database dump complete
+--
+
+-- -----------------------------------------------------------------------------------------------------------------------------------------------
+-- Transaction and lock tables
+-- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file.
+-- -----------------------------------------------------------------------------------------------------------------------------------------------
+
+CREATE TABLE TXNS (
+  TXN_ID bigint PRIMARY KEY,
+  TXN_STATE char(1) NOT NULL,
+  TXN_STARTED bigint NOT NULL,
+  TXN_LAST_HEARTBEAT bigint NOT NULL,
+  TXN_USER varchar(128) NOT NULL,
+  TXN_HOST varchar(128) NOT NULL
+);
+
+CREATE TABLE TXN_COMPONENTS (
+  TC_TXNID bigint REFERENCES TXNS (TXN_ID),
+  TC_DATABASE varchar(128) NOT NULL,
+  TC_TABLE varchar(128),
+  TC_PARTITION varchar(767) DEFAULT NULL
+);
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS (
+  CTC_TXNID bigint,
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(128),
+  CTC_PARTITION varchar(767)
+);
+
+CREATE TABLE NEXT_TXN_ID (
+  NTXN_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE HIVE_LOCKS (
+  HL_LOCK_EXT_ID bigint NOT NULL,
+  HL_LOCK_INT_ID bigint NOT NULL,
+  HL_TXNID bigint,
+  HL_DB varchar(128) NOT NULL,
+  HL_TABLE varchar(128),
+  HL_PARTITION varchar(767) DEFAULT NULL,
+  HL_LOCK_STATE char(1) NOT NULL,
+  HL_LOCK_TYPE char(1) NOT NULL,
+  HL_LAST_HEARTBEAT bigint NOT NULL,
+  HL_ACQUIRED_AT bigint,
+  HL_USER varchar(128) NOT NULL,
+  HL_HOST varchar(128) NOT NULL,
+  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+); 
+
+CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS USING hash (HL_TXNID);
+
+CREATE TABLE NEXT_LOCK_ID (
+  NL_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE COMPACTION_QUEUE (
+  CQ_ID bigint PRIMARY KEY,
+  CQ_DATABASE varchar(128) NOT NULL,
+  CQ_TABLE varchar(128) NOT NULL,
+  CQ_PARTITION varchar(767),
+  CQ_STATE char(1) NOT NULL,
+  CQ_TYPE char(1) NOT NULL,
+  CQ_WORKER_ID varchar(128),
+  CQ_START bigint,
+  CQ_RUN_AS varchar(128)
+);
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+  NCQ_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+
+-- -----------------------------------------------------------------
+-- Record schema version. Should be the last step in the init script
+-- -----------------------------------------------------------------
+INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '0.13.0', 'Hive release version 0.13.0');
+

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/upgrade-0.12.0-to-0.13.0.oracle.sql
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/upgrade-0.12.0-to-0.13.0.oracle.sql b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/upgrade-0.12.0-to-0.13.0.oracle.sql
new file mode 100755
index 0000000..d08b985
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/upgrade-0.12.0-to-0.13.0.oracle.sql
@@ -0,0 +1,165 @@
+SELECT 'Upgrading MetaStore schema from 0.12.0 to 0.13.0' AS Status from dual;
+
+-- 15-HIVE-5700.oracle.sql
+-- Normalize the date partition column values as best we can. No schema changes.
+
+CREATE FUNCTION hive13_to_date(date_str IN VARCHAR2) RETURN DATE IS dt DATE; BEGIN dt := TO_DATE(date_str, 'YYYY-MM-DD'); RETURN dt; EXCEPTION WHEN others THEN RETURN null; END;/
+
+MERGE INTO PARTITION_KEY_VALS
+USING (
+  SELECT SRC.PART_ID as IPART_ID, SRC.INTEGER_IDX as IINTEGER_IDX, 
+     NVL(TO_CHAR(hive13_to_date(PART_KEY_VAL),'YYYY-MM-DD'), PART_KEY_VAL) as NORM
+  FROM PARTITION_KEY_VALS SRC
+    INNER JOIN PARTITIONS ON SRC.PART_ID = PARTITIONS.PART_ID
+    INNER JOIN PARTITION_KEYS ON PARTITION_KEYS.TBL_ID = PARTITIONS.TBL_ID
+      AND PARTITION_KEYS.INTEGER_IDX = SRC.INTEGER_IDX AND PARTITION_KEYS.PKEY_TYPE = 'date'
+) ON (IPART_ID = PARTITION_KEY_VALS.PART_ID AND IINTEGER_IDX = PARTITION_KEY_VALS.INTEGER_IDX)
+WHEN MATCHED THEN UPDATE SET PART_KEY_VAL = NORM;
+
+DROP FUNCTION hive13_to_date;
+
+-- 16-HIVE-6386.oracle.sql
+ALTER TABLE DBS ADD OWNER_NAME VARCHAR2(128);
+ALTER TABLE DBS ADD OWNER_TYPE VARCHAR2(10);
+
+-- 17-HIVE-6458.oracle.sql
+CREATE TABLE FUNCS (
+  FUNC_ID NUMBER NOT NULL,
+  CLASS_NAME VARCHAR2(4000),
+  CREATE_TIME NUMBER(10) NOT NULL,
+  DB_ID NUMBER,
+  FUNC_NAME VARCHAR2(128),
+  FUNC_TYPE NUMBER(10) NOT NULL,
+  OWNER_NAME VARCHAR2(128),
+  OWNER_TYPE VARCHAR2(10)
+);
+
+ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID);
+ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED;
+CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME, DB_ID);
+CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID);
+
+CREATE TABLE FUNC_RU (
+  FUNC_ID NUMBER NOT NULL,
+  RESOURCE_TYPE NUMBER(10) NOT NULL,
+  RESOURCE_URI VARCHAR2(4000),
+  INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID, INTEGER_IDX);
+ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) INITIALLY DEFERRED;
+CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID);
+
+-- 18-HIVE-6757.oracle.sql
+UPDATE SDS
+  SET INPUT_FORMAT = 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat'
+WHERE
+  INPUT_FORMAT= 'parquet.hive.DeprecatedParquetInputFormat' or
+  INPUT_FORMAT = 'parquet.hive.MapredParquetInputFormat'
+;
+
+UPDATE SDS
+  SET OUTPUT_FORMAT = 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'
+WHERE
+  OUTPUT_FORMAT = 'parquet.hive.DeprecatedParquetOutputFormat'  or
+  OUTPUT_FORMAT = 'parquet.hive.MapredParquetOutputFormat'
+;
+
+UPDATE SERDES
+  SET SLIB='org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe'
+WHERE
+  SLIB = 'parquet.hive.serde.ParquetHiveSerDe'
+;
+
+-- hive-txn-schema-0.13.0.oracle.sql
+
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the License); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an AS IS BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+
+--
+-- Tables for transaction management
+-- 
+
+CREATE TABLE TXNS (
+  TXN_ID NUMBER(19) PRIMARY KEY,
+  TXN_STATE char(1) NOT NULL,
+  TXN_STARTED NUMBER(19) NOT NULL,
+  TXN_LAST_HEARTBEAT NUMBER(19) NOT NULL,
+  TXN_USER varchar(128) NOT NULL,
+  TXN_HOST varchar(128) NOT NULL
+);
+
+CREATE TABLE TXN_COMPONENTS (
+  TC_TXNID NUMBER(19) REFERENCES TXNS (TXN_ID),
+  TC_DATABASE VARCHAR2(128) NOT NULL,
+  TC_TABLE VARCHAR2(128),
+  TC_PARTITION VARCHAR2(767) NULL
+);
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS (
+  CTC_TXNID NUMBER(19),
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(128),
+  CTC_PARTITION varchar(767)
+);
+
+CREATE TABLE NEXT_TXN_ID (
+  NTXN_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE HIVE_LOCKS (
+  HL_LOCK_EXT_ID NUMBER(19) NOT NULL,
+  HL_LOCK_INT_ID NUMBER(19) NOT NULL,
+  HL_TXNID NUMBER(19),
+  HL_DB VARCHAR2(128) NOT NULL,
+  HL_TABLE VARCHAR2(128),
+  HL_PARTITION VARCHAR2(767),
+  HL_LOCK_STATE CHAR(1) NOT NULL,
+  HL_LOCK_TYPE CHAR(1) NOT NULL,
+  HL_LAST_HEARTBEAT NUMBER(19) NOT NULL,
+  HL_ACQUIRED_AT NUMBER(19),
+  HL_USER varchar(128) NOT NULL,
+  HL_HOST varchar(128) NOT NULL,
+  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+); 
+
+CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID);
+
+CREATE TABLE NEXT_LOCK_ID (
+  NL_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE COMPACTION_QUEUE (
+  CQ_ID NUMBER(19) PRIMARY KEY,
+  CQ_DATABASE varchar(128) NOT NULL,
+  CQ_TABLE varchar(128) NOT NULL,
+  CQ_PARTITION varchar(767),
+  CQ_STATE char(1) NOT NULL,
+  CQ_TYPE char(1) NOT NULL,
+  CQ_WORKER_ID varchar(128),
+  CQ_START NUMBER(19),
+  CQ_RUN_AS varchar(128)
+);
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+  NCQ_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+
+UPDATE VERSION SET SCHEMA_VERSION='0.13.0', VERSION_COMMENT='Hive release version 0.13.0' where VER_ID=1;
+SELECT 'Finished upgrading MetaStore schema from 0.12.0 to 0.13.0' AS Status from dual;

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/upgrade-0.13.0.oracle.sql
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/upgrade-0.13.0.oracle.sql b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/upgrade-0.13.0.oracle.sql
new file mode 100755
index 0000000..b34f406
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/upgrade-0.13.0.oracle.sql
@@ -0,0 +1,38 @@
+ALTER TABLE TXNS MODIFY (
+  TXN_ID NUMBER(19),
+  TXN_STARTED NUMBER(19),
+  TXN_LAST_HEARTBEAT NUMBER(19)
+);
+
+ALTER TABLE TXN_COMPONENTS MODIFY (
+  TC_TXNID NUMBER(19)
+);
+
+ALTER TABLE COMPLETED_TXN_COMPONENTS MODIFY (
+  CTC_TXNID NUMBER(19)
+);
+
+ALTER TABLE NEXT_TXN_ID MODIFY (
+  NTXN_NEXT NUMBER(19)
+);
+
+ALTER TABLE HIVE_LOCKS MODIFY (
+  HL_LOCK_EXT_ID NUMBER(19),
+  HL_LOCK_INT_ID NUMBER(19),
+  HL_TXNID NUMBER(19),
+  HL_LAST_HEARTBEAT NUMBER(19),
+  HL_ACQUIRED_AT NUMBER(19)
+);
+
+ALTER TABLE NEXT_LOCK_ID MODIFY (
+  NL_NEXT NUMBER(19)
+);
+
+ALTER TABLE COMPACTION_QUEUE MODIFY (
+  CQ_ID NUMBER(19),
+  CQ_START NUMBER(19)
+);
+
+ALTER TABLE NEXT_COMPACTION_QUEUE_ID MODIFY (
+  NCQ_NEXT NUMBER(19)
+);

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/kerberos.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/kerberos.json b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/kerberos.json
new file mode 100755
index 0000000..34bda73
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/kerberos.json
@@ -0,0 +1,149 @@
+{
+  "services": [
+    {
+      "name": "HIVE",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "hive-site": {
+            "hive.metastore.sasl.enabled": "true",
+            "hive.server2.authentication": "KERBEROS"
+          }
+        },
+        {
+          "webhcat-site": {
+            "templeton.kerberos.secret": "secret",
+            "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=${clusterHostInfo/hive_metastore_host|each(thrift://%s:9083, \\\\,, \\s*\\,\\s*)},hive.metastore.sasl.enabled=true,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse,hive.exec.mode.local.auto=false,hive.metastore.kerberos.principal=hive/_HOST@${realm}"
+          }
+        },
+        {
+          "core-site": {
+            "hadoop.proxyuser.HTTP.hosts": "${clusterHostInfo/webhcat_server_host}"
+          }
+        },
+        {
+          "ranger-hive-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "HIVE_METASTORE",
+          "identities": [
+            {
+              "name": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "hive-site/hive.metastore.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "hive-site/hive.metastore.kerberos.keytab.file"
+              }
+            }
+          ]
+        },
+        {
+          "name": "HIVE_SERVER",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "hive_server_hive",
+              "principal": {
+                "value": "hive/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hive-site/hive.server2.authentication.kerberos.principal",
+                "local_username": "${hive-env/hive_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hive.service.keytab",
+                "owner": {
+                  "name": "${hive-env/hive_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": "r"
+                },
+                "configuration": "hive-site/hive.server2.authentication.kerberos.keytab"
+              }
+            },
+            {
+              "name": "atlas_kafka",
+              "reference": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "hive-atlas-application.properties/atlas.jaas.KafkaClient.option.principal"
+              },
+              "keytab": {
+                "configuration": "hive-atlas-application.properties/atlas.jaas.KafkaClient.option.keyTab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hive-site/hive.server2.authentication.spnego.principal"
+              },
+              "keytab": {
+                "configuration": "hive-site/hive.server2.authentication.spnego.keytab"
+              }
+            },
+            {
+              "name": "ranger_audit",
+              "reference": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "ranger-hive-audit/xasecure.audit.jaas.Client.option.principal"
+              },
+              "keytab": {
+                "configuration": "ranger-hive-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "HIVE_SERVER_INTERACTIVE",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "/HIVE/HIVE_SERVER/hive_server_hive"
+            },
+            {
+              "name": "/HIVE/HIVE_SERVER/spnego"
+            },
+            {
+              "name": "/YARN/NODEMANAGER/llap_zk_hive"
+            }
+          ]
+        },
+        {
+          "name": "WEBHCAT_SERVER",
+          "identities": [
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "webhcat-site/templeton.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "webhcat-site/templeton.kerberos.keytab"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}


[41/52] bigtop git commit: BIGTOP-1406. package Ambari in Bigtop

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_server_upgrade.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_server_upgrade.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_server_upgrade.py
deleted file mode 100755
index 478c240..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_server_upgrade.py
+++ /dev/null
@@ -1,141 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-import re
-from resource_management.core.logger import Logger
-from resource_management.core.exceptions import Fail
-from resource_management.core.resources.system import Execute
-from resource_management.core import shell
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.version import format_stack_version
-
-
-def post_upgrade_deregister():
-  """
-  Runs the "hive --service hiveserver2 --deregister <version>" command to
-  de-provision the server in preparation for an upgrade. This will contact
-  ZooKeeper to remove the server so that clients that attempt to connect
-  will be directed to other servers automatically. Once all
-  clients have drained, the server will shutdown automatically; this process
-  could take a very long time.
-  This function will obtain the Kerberos ticket if security is enabled.
-  :return:
-  """
-  import params
-
-  Logger.info('HiveServer2 executing "deregister" command to complete upgrade...')
-
-  if params.security_enabled:
-    kinit_command=format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; ")
-    Execute(kinit_command,user=params.smokeuser)
-
-  # calculate the current hive server version
-  current_hiveserver_version = _get_current_hiveserver_version()
-  if current_hiveserver_version is None:
-    raise Fail('Unable to determine the current HiveServer2 version to deregister.')
-
-  # fallback when upgrading because <stack-root>/current/hive-server2/conf/conf.server may not exist
-  hive_server_conf_dir = params.hive_server_conf_dir
-  if not os.path.exists(hive_server_conf_dir):
-    hive_server_conf_dir = "/etc/hive/conf.server"
-
-  # deregister
-  hive_execute_path = params.execute_path
-  # If upgrading, the upgrade-target hive binary should be used to call the --deregister command.
-  # If downgrading, the downgrade-source hive binary should be used to call the --deregister command.
-  # By now <stack-selector-tool> has been called to set 'current' to target-stack
-  if "downgrade" == params.upgrade_direction:
-    # hive_bin
-    downgrade_version = params.current_version
-    if params.downgrade_from_version:
-      downgrade_version = params.downgrade_from_version
-    hive_execute_path = _get_hive_execute_path(downgrade_version)
-
-  command = format('hive --config {hive_server_conf_dir} --service hiveserver2 --deregister ' + current_hiveserver_version)
-  Execute(command, user=params.hive_user, path=hive_execute_path, tries=1 )
-
-
-def _get_hive_execute_path(stack_version_formatted):
-  """
-  Returns the exact execute path to use for the given stack-version.
-  This method does not return the "current" path
-  :param stack_version_formatted: Exact stack-version to use in the new path
-  :return: Hive execute path for the exact stack-version
-  """
-  import params
-
-  hive_execute_path = params.execute_path
-  formatted_stack_version = format_stack_version(stack_version_formatted)
-  if formatted_stack_version and check_stack_feature(StackFeature.ROLLING_UPGRADE, formatted_stack_version):
-    # hive_bin
-    new_hive_bin = format('{stack_root}/{stack_version_formatted}/hive/bin')
-    if (os.pathsep + params.hive_bin) in hive_execute_path:
-      hive_execute_path = hive_execute_path.replace(os.pathsep + params.hive_bin, os.pathsep + new_hive_bin)
-    # hadoop_bin_dir
-    new_hadoop_bin = stack_select.get_hadoop_dir_for_stack_version("bin", stack_version_formatted)
-    old_hadoop_bin = params.hadoop_bin_dir
-    if new_hadoop_bin and len(new_hadoop_bin) > 0 and (os.pathsep + old_hadoop_bin) in hive_execute_path:
-      hive_execute_path = hive_execute_path.replace(os.pathsep + old_hadoop_bin, os.pathsep + new_hadoop_bin)
-  return hive_execute_path
-
-
-def _get_current_hiveserver_version():
-  """
-  Runs "hive --version" and parses the result in order
-  to obtain the current version of hive.
-
-  :return:  the hiveserver2 version, returned by "hive --version"
-  """
-  import params
-
-  try:
-    # When downgrading the source version should be the version we are downgrading from
-    if "downgrade" == params.upgrade_direction:
-      if not params.downgrade_from_version:
-        raise Fail('The version from which we are downgrading from should be provided in \'downgrade_from_version\'')
-      source_version = params.downgrade_from_version
-    else:
-      source_version = params.current_version
-    hive_execute_path = _get_hive_execute_path(source_version)
-    version_hive_bin = params.hive_bin
-    formatted_source_version = format_stack_version(source_version)
-    if formatted_source_version and check_stack_feature(StackFeature.ROLLING_UPGRADE, formatted_source_version):
-      version_hive_bin = format('{stack_root}/{source_version}/hive/bin')
-    command = format('{version_hive_bin}/hive --version')
-    return_code, output = shell.call(command, user=params.hive_user, path=hive_execute_path)
-  except Exception, e:
-    Logger.error(str(e))
-    raise Fail('Unable to execute hive --version command to retrieve the hiveserver2 version.')
-
-  if return_code != 0:
-    raise Fail('Unable to determine the current HiveServer2 version because of a non-zero return code of {0}'.format(str(return_code)))
-
-  match = re.search('^(Hive) ([0-9]+.[0-9]+.\S+)', output, re.MULTILINE)
-
-  if match:
-    current_hive_server_version = match.group(2)
-    return current_hive_server_version
-  else:
-    raise Fail('The extracted hiveserver2 version "{0}" does not matching any known pattern'.format(output))
-
-

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_service.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_service.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_service.py
deleted file mode 100755
index 22b4061..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_service.py
+++ /dev/null
@@ -1,187 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-import time
-
-from ambari_commons.constants import UPGRADE_TYPE_ROLLING
-from resource_management.core.logger import Logger
-from resource_management.core import shell
-from resource_management.libraries.functions.format import format
-from resource_management.core.resources.system import File, Execute
-from resource_management.core.resources.service import Service
-from resource_management.core.exceptions import Fail
-from resource_management.core.shell import as_user
-from resource_management.libraries.functions.hive_check import check_thrift_port_sasl
-from resource_management.libraries.functions import get_user_call_output
-from resource_management.libraries.functions.show_logs import show_logs
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-from ambari_commons import OSConst
-
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def hive_service(name, action='start', upgrade_type=None):
-  import params
-  if name == 'metastore':
-    if action == 'start' or action == 'stop':
-      Service(params.hive_metastore_win_service_name, action=action)
-
-  if name == 'hiveserver2':
-    if action == 'start' or action == 'stop':
-      Service(params.hive_server_win_service_name, action=action)
-
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def hive_service(name, action='start', upgrade_type=None):
-
-  import params
-
-  if name == 'metastore':
-    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
-    cmd = format("{start_metastore_path} {hive_log_dir}/hive.out {hive_log_dir}/hive.err {pid_file} {hive_server_conf_dir} {hive_log_dir}")
-  elif name == 'hiveserver2':
-    pid_file = format("{hive_pid_dir}/{hive_pid}")
-    cmd = format("{start_hiveserver2_path} {hive_log_dir}/hive-server2.out {hive_log_dir}/hive-server2.err {pid_file} {hive_server_conf_dir} {hive_log_dir}")
-
-
-    if params.security_enabled and params.current_version and check_stack_feature(StackFeature.HIVE_SERVER2_KERBERIZED_ENV, params.current_version):
-      hive_kinit_cmd = format("{kinit_path_local} -kt {hive_server2_keytab} {hive_principal}; ")
-      Execute(hive_kinit_cmd, user=params.hive_user)
-
-  pid = get_user_call_output.get_user_call_output(format("cat {pid_file}"), user=params.hive_user, is_checked_call=False)[1]
-  process_id_exists_command = format("ls {pid_file} >/dev/null 2>&1 && ps -p {pid} >/dev/null 2>&1")
-
-  if action == 'start':
-    if name == 'hiveserver2':
-      check_fs_root(params.hive_server_conf_dir, params.execute_path)
-
-    daemon_cmd = cmd
-    hadoop_home = params.hadoop_home
-    hive_bin = "hive"
-
-    # upgrading hiveserver2 (rolling_restart) means that there is an existing,
-    # de-registering hiveserver2; the pid will still exist, but the new
-    # hiveserver is spinning up on a new port, so the pid will be re-written
-    if upgrade_type == UPGRADE_TYPE_ROLLING:
-      process_id_exists_command = None
-
-      if params.version and params.stack_root:
-        hadoop_home = format("{stack_root}/{version}/hadoop")
-        hive_bin = os.path.join(params.hive_bin, hive_bin)
-      
-    Execute(daemon_cmd, 
-      user = params.hive_user,
-      environment = { 'HADOOP_HOME': hadoop_home, 'JAVA_HOME': params.java64_home, 'HIVE_BIN': hive_bin },
-      path = params.execute_path,
-      not_if = process_id_exists_command)
-
-    if params.hive_jdbc_driver == "com.mysql.jdbc.Driver" or \
-       params.hive_jdbc_driver == "org.postgresql.Driver" or \
-       params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
-
-      validation_called = False
-
-      if params.hive_jdbc_target is not None:
-        validation_called = True
-        validate_connection(params.hive_jdbc_target, params.hive_lib)
-      if params.hive2_jdbc_target is not None:
-        validation_called = True
-        validate_connection(params.hive2_jdbc_target, params.hive_server2_hive2_lib)
-
-      if not validation_called:
-        emessage = "ERROR! DB connection check should be executed at least one time!"
-        Logger.error(emessage)
-
-  elif action == 'stop':
-
-    daemon_kill_cmd = format("{sudo} kill {pid}")
-    daemon_hard_kill_cmd = format("{sudo} kill -9 {pid}")
-
-    Execute(daemon_kill_cmd,
-      not_if = format("! ({process_id_exists_command})")
-    )
-
-    wait_time = 5
-    Execute(daemon_hard_kill_cmd,
-      not_if = format("! ({process_id_exists_command}) || ( sleep {wait_time} && ! ({process_id_exists_command}) )"),
-      ignore_failures = True
-    )
-
-    try:
-      # check if stopped the process, else fail the task
-      Execute(format("! ({process_id_exists_command})"),
-        tries=20,
-        try_sleep=3,
-      )
-    except:
-      show_logs(params.hive_log_dir, params.hive_user)
-      raise
-
-    File(pid_file,
-         action = "delete"
-    )
-
-def validate_connection(target_path_to_jdbc, hive_lib_path):
-  import params
-
-  path_to_jdbc = target_path_to_jdbc
-  if not params.jdbc_jar_name:
-    path_to_jdbc = format("{hive_lib_path}/") + \
-                   params.default_connectors_map[params.hive_jdbc_driver] if params.hive_jdbc_driver in params.default_connectors_map else None
-    if not os.path.isfile(path_to_jdbc):
-      path_to_jdbc = format("{hive_lib_path}/") + "*"
-      error_message = "Error! Sorry, but we can't find jdbc driver with default name " + params.default_connectors_map[params.hive_jdbc_driver] + \
-                      " in hive lib dir. So, db connection check can fail. Please run 'ambari-server setup --jdbc-db={db_name} --jdbc-driver={path_to_jdbc} on server host.'"
-      Logger.error(error_message)
-
-  db_connection_check_command = format(
-    "{java64_home}/bin/java -cp {check_db_connection_jar}:{path_to_jdbc} org.apache.ambari.server.DBConnectionVerification '{hive_jdbc_connection_url}' {hive_metastore_user_name} {hive_metastore_user_passwd!p} {hive_jdbc_driver}")
-
-  try:
-    Execute(db_connection_check_command,
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin', tries=5, try_sleep=10)
-  except:
-    show_logs(params.hive_log_dir, params.hive_user)
-    raise
-
-
-def check_fs_root(conf_dir, execution_path):
-  import params
-
-  if not params.fs_root.startswith("hdfs://"):
-    Logger.info("Skipping fs root check as fs_root does not start with hdfs://")
-    return
-
-  metatool_cmd = format("hive --config {conf_dir} --service metatool")
-  cmd = as_user(format("{metatool_cmd} -listFSRoot", env={'PATH': execution_path}), params.hive_user) \
-        + format(" 2>/dev/null | grep hdfs:// | cut -f1,2,3 -d '/' | grep -v '{fs_root}' | head -1")
-  code, out = shell.call(cmd)
-
-  if code == 0 and out.strip() != "" and params.fs_root.strip() != out.strip():
-    out = out.strip()
-    cmd = format("{metatool_cmd} -updateLocation {fs_root} {out}")
-    Execute(cmd,
-            user=params.hive_user,
-            environment={'PATH': execution_path}
-    )
-

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_service_interactive.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_service_interactive.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_service_interactive.py
deleted file mode 100755
index eaf95ad..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_service_interactive.py
+++ /dev/null
@@ -1,109 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-# Python Imports
-
-# Ambari Commons & Resource Management imports
-import os
-from resource_management.core.logger import Logger
-from resource_management.libraries.functions.format import format
-from resource_management.core.resources.system import File, Execute
-from resource_management.libraries.functions import get_user_call_output
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-from ambari_commons import OSConst
-
-# Local Imports
-from hive_service import check_fs_root
-
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def hive_service_interactive(name, action='start', upgrade_type=None):
-  pass
-
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def hive_service_interactive(name, action='start', upgrade_type=None):
-  import params
-
-  pid_file = format("{hive_pid_dir}/{hive_interactive_pid}")
-  cmd = format("{start_hiveserver2_interactive_path} {hive_pid_dir}/hive-server2-interactive.out {hive_log_dir}/hive-server2-interactive.err {pid_file} {hive_server_interactive_conf_dir} {hive_log_dir}")
-
-  pid = get_user_call_output.get_user_call_output(format("cat {pid_file}"), user=params.hive_user, is_checked_call=False)[1]
-  process_id_exists_command = format("ls {pid_file} >/dev/null 2>&1 && ps -p {pid} >/dev/null 2>&1")
-
-  if action == 'start':
-    check_fs_root(params.hive_server_interactive_conf_dir, params.execute_path_hive_interactive)
-    daemon_cmd = cmd
-    hadoop_home = params.hadoop_home
-    hive_interactive_bin = "hive2"
-
-    Execute(daemon_cmd,
-            user = params.hive_user,
-            environment = { 'HADOOP_HOME': hadoop_home, 'JAVA_HOME': params.java64_home, 'HIVE_BIN': hive_interactive_bin },
-            path = params.execute_path,
-            not_if = process_id_exists_command)
-
-    if params.hive_jdbc_driver == "com.mysql.jdbc.Driver" or \
-        params.hive_jdbc_driver == "org.postgresql.Driver" or \
-        params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
-
-      path_to_jdbc = params.target_hive_interactive
-      if not params.jdbc_jar_name:
-        path_to_jdbc = format("{hive_interactive_lib}/") + \
-                       params.default_connectors_map[params.hive_jdbc_driver] if params.hive_jdbc_driver in params.default_connectors_map else None
-        if not os.path.isfile(path_to_jdbc):
-          path_to_jdbc = format("{hive_interactive_lib}/") + "*"
-          error_message = "Error! Sorry, but we can't find jdbc driver with default name " + params.default_connectors_map[params.hive_jdbc_driver] + \
-                " in hive lib dir. So, db connection check can fail. Please run 'ambari-server setup --jdbc-db={db_name} --jdbc-driver={path_to_jdbc} on server host.'"
-          Logger.error(error_message)
-
-      db_connection_check_command = format(
-        "{java64_home}/bin/java -cp {check_db_connection_jar}:{path_to_jdbc} org.apache.ambari.server.DBConnectionVerification '{hive_jdbc_connection_url}' {hive_metastore_user_name} {hive_metastore_user_passwd!p} {hive_jdbc_driver}")
-      Execute(db_connection_check_command,
-              path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin', tries=5, try_sleep=10)
-  elif action == 'stop':
-
-    daemon_kill_cmd = format("{sudo} kill {pid}")
-    daemon_hard_kill_cmd = format("{sudo} kill -9 {pid}")
-
-    Execute(daemon_kill_cmd,
-            not_if = format("! ({process_id_exists_command})")
-            )
-
-    # check if stopped the process, otherwise send hard kill command.
-    try:
-      Execute(format("! ({process_id_exists_command})"),
-              tries=10,
-              try_sleep=3,
-              )
-    except:
-      Execute(daemon_hard_kill_cmd,
-              not_if = format("! ({process_id_exists_command}) ")
-              )
-
-    # check if stopped the process, else fail the task
-    Execute(format("! ({process_id_exists_command})"),
-            tries=20,
-            try_sleep=3,
-            )
-
-    File(pid_file,
-         action = "delete"
-         )

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_server.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_server.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_server.py
deleted file mode 100755
index 851dc02..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_server.py
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-import os
-import mysql_users
-from resource_management import *
-
-from mysql_service import mysql_service
-from mysql_utils import mysql_configure
-
-
-class MysqlServer(Script):
-  def install(self, env):
-    import params
-    self.install_packages(env)
-    self.configure(env)
-
-  def clean(self, env):
-    import params
-    env.set_params(params)
-    mysql_users.mysql_deluser()
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    mysql_configure()
-
-  def start(self, env, rolling_restart=False):
-    import params
-    env.set_params(params)
-    mysql_service(daemon_name=params.daemon_name, action='start')
-
-  def stop(self, env, rolling_restart=False):
-    import params
-    env.set_params(params)
-    mysql_service(daemon_name=params.daemon_name, action='stop')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-
-    mysql_service(daemon_name=status_params.daemon_name, action='status')
-
-
-if __name__ == "__main__":
-  MysqlServer().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_service.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_service.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_service.py
deleted file mode 100755
index 8b98ed1..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_service.py
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-def mysql_service(daemon_name=None, action='start'): 
-  status_cmd = format("pgrep -l '^{process_name}$'")
-  cmd = ('service', daemon_name, action)
-
-  if action == 'status':
-    try:
-      Execute(status_cmd)
-    except Fail:
-      raise ComponentIsNotRunning()
-  elif action == 'stop':
-    import params
-    Execute(cmd,
-            logoutput = True,
-            only_if = status_cmd,
-            sudo = True,
-    )
-  elif action == 'start':
-    import params   
-    Execute(cmd,
-      logoutput = True,
-      not_if = status_cmd,
-      sudo = True,
-    )
-
-
-

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_users.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_users.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_users.py
deleted file mode 100755
index c023548..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_users.py
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-# Used to add hive access to the needed components
-def mysql_adduser():
-  import params
-  
-  File(params.mysql_adduser_path,
-       mode=0755,
-       content=StaticFile('addMysqlUser.sh')
-  )
-  hive_server_host = format("{hive_server_host}")
-  hive_metastore_host = format("{hive_metastore_host}")
-
-  add_metastore_cmd = "bash -x {mysql_adduser_path} {daemon_name} {hive_metastore_user_name} {hive_metastore_user_passwd!p} {hive_metastore_host}"
-  add_hiveserver_cmd = "bash -x {mysql_adduser_path} {daemon_name} {hive_metastore_user_name} {hive_metastore_user_passwd!p} {hive_server_host}"
-  if (hive_server_host == hive_metastore_host):
-    cmd = format(add_hiveserver_cmd)
-  else:
-    cmd = format(add_hiveserver_cmd + ";" + add_metastore_cmd)
-  Execute(cmd,
-          tries=3,
-          try_sleep=5,
-          logoutput=False,
-          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-  )
-
-# Removes hive access from components
-def mysql_deluser():
-  import params
-  
-  File(params.mysql_deluser_path,
-       mode=0755,
-       content=StaticFile('removeMysqlUser.sh')
-  )
-  hive_server_host = format("{hive_server_host}")
-  hive_metastore_host = format("{hive_metastore_host}")
-
-  del_hiveserver_cmd = "bash -x {mysql_deluser_path} {daemon_name} {hive_metastore_user_name} {hive_server_host}"
-  del_metastore_cmd = "bash -x {mysql_deluser_path} {daemon_name} {hive_metastore_user_name} {hive_metastore_host}"
-  if (hive_server_host == hive_metastore_host):
-    cmd = format(del_hiveserver_cmd)
-  else:
-    cmd = format(
-      del_hiveserver_cmd + ";" + del_metastore_cmd)
-  Execute(cmd,
-          tries=3,
-          try_sleep=5,
-          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-  )
-

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_utils.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_utils.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_utils.py
deleted file mode 100755
index 5006b56..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/mysql_utils.py
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import mysql_users
-
-def mysql_configure():
-  import params
-
-  # required for running hive
-  replace_bind_address = ('sed','-i','s|^bind-address[ \t]*=.*|bind-address = 0.0.0.0|',params.mysql_configname)
-  Execute(replace_bind_address,
-          sudo = True,
-  )
-  
-  # this also will start mysql-server
-  mysql_users.mysql_adduser()
-  
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params.py
deleted file mode 100755
index f10a3f3..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params.py
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from ambari_commons import OSCheck
-from resource_management.libraries.functions.default import default
-
-if OSCheck.is_windows_family():
-  from params_windows import *
-else:
-  from params_linux import *
-
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-retryAble = default("/commandParams/command_retry_enabled", False)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params_linux.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params_linux.py
deleted file mode 100755
index 9d79e12..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params_linux.py
+++ /dev/null
@@ -1,735 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import status_params
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-import os
-
-from urlparse import urlparse
-
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-from ambari_commons.os_check import OSCheck
-
-from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.is_empty import is_empty
-from resource_management.libraries.functions.copy_tarball import STACK_ROOT_PATTERN, STACK_NAME_PATTERN, STACK_VERSION_PATTERN
-from resource_management.libraries.functions import get_kinit_path
-from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.stack_features import get_stack_feature_version
-from resource_management.libraries.functions.get_port_from_url import get_port_from_url
-from resource_management.libraries.functions.expect import expect
-from resource_management.libraries import functions
-from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
-
-# Default log4j version; put config files under /etc/hive/conf
-log4j_version = '1'
-
-# server configurations
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-sudo = AMBARI_SUDO_BINARY
-
-stack_root = status_params.stack_root
-stack_name = status_params.stack_name
-stack_name_uppercase = stack_name.upper()
-agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
-agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
-
-# Needed since this is an Atlas Hook service.
-cluster_name = config['clusterName']
-
-# node hostname
-hostname = config["hostname"]
-
-# This is expected to be of the form #.#.#.#
-stack_version_unformatted = status_params.stack_version_unformatted
-stack_version_formatted_major = status_params.stack_version_formatted_major
-
-# this is not available on INSTALL action because <stack-selector-tool> is not available
-stack_version_formatted = functions.get_stack_version('hive-server2')
-
-# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade.
-# It cannot be used during the initial Cluser Install because the version is not yet known.
-version = default("/commandParams/version", None)
-
-# current host stack version
-current_version = default("/hostLevelParams/current_version", None)
-
-# When downgrading the 'version' and 'current_version' are both pointing to the downgrade-target version
-# downgrade_from_version provides the source-version the downgrade is happening from
-downgrade_from_version = default("/commandParams/downgrade_from_version", None)
-
-# get the correct version to use for checking stack features
-version_for_stack_feature_checks = get_stack_feature_version(config)
-
-# Upgrade direction
-upgrade_direction = default("/commandParams/upgrade_direction", None)
-stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
-stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
-
-# component ROLE directory (like hive-metastore or hive-server2-hive2)
-component_directory = status_params.component_directory
-component_directory_interactive = status_params.component_directory_interactive
-
-hadoop_home = '/usr/lib/hadoop'
-hive_bin = '/usr/lib/hive/bin'
-hive_schematool_ver_bin = '/usr/lib/hive/bin'
-hive_schematool_bin = '/usr/lib/hive/bin'
-hive_lib = '/usr/lib/hive/lib'
-hive_version_lib = '/usr/lib/hive/lib'
-#hadoop_home = format('{stack_root}/current/hadoop-client')
-#hive_bin = format('{stack_root}/current/{component_directory}/bin')
-#hive_schematool_ver_bin = format('{stack_root}/{version}/hive/bin')
-#hive_schematool_bin = format('{stack_root}/current/{component_directory}/bin')
-#hive_lib = format('{stack_root}/current/{component_directory}/lib')
-#hive_version_lib = format('{stack_root}/{version}/hive/lib')
-hive_var_lib = '/var/lib/hive'
-hive_user_home_dir = "/home/hive"
-
-# starting on stacks where HSI is supported, we need to begin using the 'hive2' schematool
-hive_server2_hive2_dir = None
-hive_server2_hive2_lib = None
-
-version = default("/commandParams/version", None)
-
-if check_stack_feature(StackFeature.HIVE_SERVER_INTERACTIVE, version_for_stack_feature_checks):
-  # the name of the hiveserver2-hive2 component
-  hive_server2_hive2_component = status_params.SERVER_ROLE_DIRECTORY_MAP["HIVE_SERVER_INTERACTIVE"]
-
-  # when using the version, we can just specify the component as "hive2"
-  hive_schematool_ver_bin = format('{stack_root}/{version}/hive2/bin')
-
-  # use the schematool which ships with hive2
-  hive_schematool_bin = format('{stack_root}/current/{hive_server2_hive2_component}/bin')
-
-  # <stack-root>/<version>/hive2 (as opposed to <stack-root>/<version>/hive)
-  hive_server2_hive2_dir = format('{stack_root}/current/{hive_server2_hive2_component}')
-
-  # <stack-root>/<version>/hive2 (as opposed to <stack-root>/<version>/hive)
-  hive_server2_hive2_version_dir = format('{stack_root}/{version}/hive2')
-
-  # <stack-root>/current/hive-server2-hive2/lib -> <stack-root>/<version>/hive2/lib
-  hive_server2_hive2_lib = format('{hive_server2_hive2_dir}/lib')
-
-  # <stack-root>/<version>/hive2/lib
-  hive_server2_hive2_version_lib = format('{hive_server2_hive2_version_dir}/lib')
-
-
-hive_interactive_bin = format('{stack_root}/current/{component_directory_interactive}/bin')
-hive_interactive_lib = format('{stack_root}/current/{component_directory_interactive}/lib')
-
-# Hive Interactive related paths
-hive_interactive_var_lib = '/var/lib/hive2'
-
-# These tar folders were used in previous stack versions, e.g., HDP 2.1
-hadoop_streaming_jars = '/usr/lib/hadoop-mapreduce/hadoop-streaming-*.jar'
-pig_tar_file = format('/usr/share/{stack_name_uppercase}-webhcat/pig.tar.gz')
-hive_tar_file = format('/usr/share/{stack_name_uppercase}-webhcat/hive.tar.gz')
-sqoop_tar_file = format('/usr/share/{stack_name_uppercase}-webhcat/sqoop*.tar.gz')
-
-hive_metastore_site_supported = False
-hive_etc_dir_prefix = "/etc/hive"
-hive_interactive_etc_dir_prefix = "/etc/hive2"
-limits_conf_dir = "/etc/security/limits.d"
-
-hive_user_nofile_limit = default("/configurations/hive-env/hive_user_nofile_limit", "32000")
-hive_user_nproc_limit = default("/configurations/hive-env/hive_user_nproc_limit", "16000")
-
-# use the directories from status_params as they are already calculated for
-# the correct stack version
-hadoop_conf_dir = status_params.hadoop_conf_dir
-hadoop_bin_dir = status_params.hadoop_bin_dir
-webhcat_conf_dir = status_params.webhcat_conf_dir
-hive_conf_dir = status_params.hive_conf_dir
-hive_home_dir = status_params.hive_home_dir
-hive_config_dir = status_params.hive_config_dir
-hive_client_conf_dir = status_params.hive_client_conf_dir
-hive_server_conf_dir = status_params.hive_server_conf_dir
-
-hcat_conf_dir = '/etc/hive-hcatalog/conf'
-config_dir = '/etc/hive-webhcat/conf'
-hcat_lib = '/usr/lib/hive-hcatalog/share/hcatalog'
-webhcat_bin_dir = '/usr/lib/hive-hcatalog/sbin'
-
-# --- Tarballs ---
-# DON'T CHANGE THESE VARIABLE NAMES
-# Values don't change from those in copy_tarball.py
-webhcat_apps_dir = "/apps/webhcat"
-hive_tar_source = "{0}/{1}/hive/hive.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN)
-pig_tar_source = "{0}/{1}/pig/pig.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN)
-hive_tar_dest_file = "/{0}/apps/{1}/hive/hive.tar.gz".format(STACK_NAME_PATTERN,STACK_VERSION_PATTERN)
-pig_tar_dest_file = "/{0}/apps/{1}/pig/pig.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)
-
-hadoop_streaming_tar_source = "{0}/{1}/hadoop-mapreduce/hadoop-streaming.jar".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN)
-sqoop_tar_source = "{0}/{1}/sqoop/sqoop.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN)
-hadoop_streaming_tar_dest_dir = "/{0}/apps/{1}/mapreduce/".format(STACK_NAME_PATTERN,STACK_VERSION_PATTERN)
-sqoop_tar_dest_dir = "/{0}/apps/{1}/sqoop/".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)
-
-tarballs_mode = 0444
-
-purge_tables = "false"
-# Starting from stack version for feature hive_purge_table drop should be executed with purge
-purge_tables = 'true'
-
-# this is NOT a typo.  Configs for hcatalog/webhcat point to a
-# specific directory which is NOT called 'conf'
-# FIXME: ODPi
-# hcat_conf_dir = format('{stack_root}/current/hive-webhcat/etc/hcatalog')
-# config_dir = format('{stack_root}/current/hive-webhcat/etc/webhcat')
-hcat_conf_dir = format('/etc/hive-hcatalog/conf')
-config_dir = format('/etc/hive-webhcat/conf')
-
-hive_metastore_site_supported = True
-
-execute_path = os.environ['PATH'] + os.pathsep + hive_bin + os.pathsep + hadoop_bin_dir
-
-hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
-hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
-
-hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
-hive_metastore_user_passwd = unicode(hive_metastore_user_passwd) if not is_empty(hive_metastore_user_passwd) else hive_metastore_user_passwd
-hive_metastore_db_type = config['configurations']['hive-env']['hive_database_type']
-
-#HACK Temporarily use dbType=azuredb while invoking schematool
-if hive_metastore_db_type == "mssql":
-  hive_metastore_db_type = "azuredb"
-
-#users
-hive_user = config['configurations']['hive-env']['hive_user']
-
-#JDBC driver jar name
-hive_jdbc_driver = config['configurations']['hive-site']['javax.jdo.option.ConnectionDriverName']
-jdk_location = config['hostLevelParams']['jdk_location']
-java_share_dir = '/usr/share/java'
-hive_database_name = config['configurations']['hive-env']['hive_database_name']
-hive_database = config['configurations']['hive-env']['hive_database']
-hive_use_existing_db = hive_database.startswith('Existing')
-
-default_connectors_map = { "com.microsoft.sqlserver.jdbc.SQLServerDriver":"sqljdbc4.jar",
-                           "com.mysql.jdbc.Driver":"mysql-connector-java.jar",
-                           "org.postgresql.Driver":"postgresql-jdbc.jar",
-                           "oracle.jdbc.driver.OracleDriver":"ojdbc.jar",
-                           "sap.jdbc4.sqlanywhere.IDriver":"sajdbc4.jar"}
-
-# NOT SURE THAT IT'S A GOOD IDEA TO USE PATH TO CLASS IN DRIVER, MAYBE IT WILL BE BETTER TO USE DB TYPE.
-# BECAUSE PATH TO CLASSES COULD BE CHANGED
-sqla_db_used = False
-hive_previous_jdbc_jar_name = None
-if hive_jdbc_driver == "com.microsoft.sqlserver.jdbc.SQLServerDriver":
-  jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
-  hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
-elif hive_jdbc_driver == "com.mysql.jdbc.Driver":
-  jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
-  hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
-elif hive_jdbc_driver == "org.postgresql.Driver":
-  jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
-  hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
-elif hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
-  jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
-  hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
-elif hive_jdbc_driver == "sap.jdbc4.sqlanywhere.IDriver":
-  jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
-  hive_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
-  sqla_db_used = True
-
-default_mysql_jar_name = "mysql-connector-java.jar"
-default_mysql_target = format("{hive_lib}/{default_mysql_jar_name}")
-hive_previous_jdbc_jar = format("{hive_lib}/{hive_previous_jdbc_jar_name}")
-if not hive_use_existing_db:
-  jdbc_jar_name = default_mysql_jar_name
-
-
-downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}")
-
-hive_jdbc_target = format("{hive_lib}/{jdbc_jar_name}")
-hive2_jdbc_target = None
-if hive_server2_hive2_dir:
-  hive2_jdbc_target = format("{hive_server2_hive2_lib}/{jdbc_jar_name}")
-
-# during upgrade / downgrade, use the specific version to copy the JDBC JAR to
-if upgrade_direction:
-  hive_jdbc_target = format("{hive_version_lib}/{jdbc_jar_name}")
-  hive2_jdbc_target = format("{hive_server2_hive2_version_lib}/{jdbc_jar_name}") if hive2_jdbc_target is not None else None
-
-
-hive2_previous_jdbc_jar = format("{hive_server2_hive2_lib}/{hive_previous_jdbc_jar_name}") if hive_server2_hive2_lib is not None else None
-driver_curl_source = format("{jdk_location}/{jdbc_jar_name}")
-
-# normally, the JDBC driver would be referenced by <stack-root>/current/.../foo.jar
-# but in RU if <stack-selector-tool> is called and the restart fails, then this means that current pointer
-# is now pointing to the upgraded version location; that's bad for the cp command
-source_jdbc_file = format("{stack_root}/{current_version}/hive/lib/{jdbc_jar_name}")
-
-check_db_connection_jar_name = "DBConnectionVerification.jar"
-check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
-hive_jdbc_drivers_list = ["com.microsoft.sqlserver.jdbc.SQLServerDriver","com.mysql.jdbc.Driver",
-                          "org.postgresql.Driver","oracle.jdbc.driver.OracleDriver","sap.jdbc4.sqlanywhere.IDriver"]
-
-prepackaged_jdbc_name = "ojdbc6.jar"
-prepackaged_ojdbc_symlink = format("{hive_lib}/{prepackaged_jdbc_name}")
-templeton_port = config['configurations']['webhcat-site']['templeton.port']
-
-#constants for type2 jdbc
-jdbc_libs_dir = format("{hive_lib}/native/lib64")
-lib_dir_available = os.path.exists(jdbc_libs_dir)
-
-if sqla_db_used:
-  jars_path_in_archive = format("{tmp_dir}/sqla-client-jdbc/java/*")
-  libs_path_in_archive = format("{tmp_dir}/sqla-client-jdbc/native/lib64/*")
-  downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}")
-  libs_in_hive_lib = format("{jdbc_libs_dir}/*")
-
-
-# Start, Common Hosts and Ports
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-
-hive_metastore_hosts = default('/clusterHostInfo/hive_metastore_host', [])
-hive_metastore_host = hive_metastore_hosts[0] if len(hive_metastore_hosts) > 0 else None
-hive_metastore_port = get_port_from_url(config['configurations']['hive-site']['hive.metastore.uris'])
-
-hive_server_hosts = default("/clusterHostInfo/hive_server_host", [])
-hive_server_host = hive_server_hosts[0] if len(hive_server_hosts) > 0 else None
-
-hive_server_interactive_hosts = default('/clusterHostInfo/hive_server_interactive_hosts', [])
-hive_server_interactive_host = hive_server_interactive_hosts[0] if len(hive_server_interactive_hosts) > 0 else None
-# End, Common Hosts and Ports
-
-hive_transport_mode = config['configurations']['hive-site']['hive.server2.transport.mode']
-
-if hive_transport_mode.lower() == "http":
-  hive_server_port = config['configurations']['hive-site']['hive.server2.thrift.http.port']
-else:
-  hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
-
-hive_url = format("jdbc:hive2://{hive_server_host}:{hive_server_port}")
-hive_http_endpoint = default('/configurations/hive-site/hive.server2.thrift.http.path', "cliservice")
-hive_server_principal = config['configurations']['hive-site']['hive.server2.authentication.kerberos.principal']
-hive_server2_authentication = config['configurations']['hive-site']['hive.server2.authentication']
-
-# ssl options
-hive_ssl = default('/configurations/hive-site/hive.server2.use.SSL', False)
-hive_ssl_keystore_path = default('/configurations/hive-site/hive.server2.keystore.path', None)
-hive_ssl_keystore_password = default('/configurations/hive-site/hive.server2.keystore.password', None)
-
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-smoke_test_sql = format("{tmp_dir}/hiveserver2.sql")
-smoke_test_path = format("{tmp_dir}/hiveserver2Smoke.sh")
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
-
-fs_root = config['configurations']['core-site']['fs.defaultFS']
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-hive_metastore_keytab_path = config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
-hive_metastore_principal = config['configurations']['hive-site']['hive.metastore.kerberos.principal']
-
-hive_server2_keytab = config['configurations']['hive-site']['hive.server2.authentication.kerberos.keytab']
-
-#hive_env
-hive_log_dir = config['configurations']['hive-env']['hive_log_dir']
-hive_pid_dir = status_params.hive_pid_dir
-hive_pid = status_params.hive_pid
-hive_interactive_pid = status_params.hive_interactive_pid
-
-#Default conf dir for client
-hive_conf_dirs_list = [hive_client_conf_dir]
-
-# These are the folders to which the configs will be written to.
-ranger_hive_component = status_params.SERVER_ROLE_DIRECTORY_MAP['HIVE_SERVER']
-if status_params.role == "HIVE_METASTORE" and hive_metastore_hosts is not None and hostname in hive_metastore_hosts:
-  hive_conf_dirs_list.append(hive_server_conf_dir)
-elif status_params.role == "HIVE_SERVER" and hive_server_hosts is not None and hostname in hive_server_host:
-  hive_conf_dirs_list.append(hive_server_conf_dir)
-elif status_params.role == "HIVE_SERVER_INTERACTIVE" and hive_server_interactive_hosts is not None and hostname in hive_server_interactive_hosts:
-  hive_conf_dirs_list.append(status_params.hive_server_interactive_conf_dir)
-  ranger_hive_component = status_params.SERVER_ROLE_DIRECTORY_MAP['HIVE_SERVER_INTERACTIVE']
-# log4j version is 2 for hive2; put config files under /etc/hive2/conf
-if status_params.role == "HIVE_SERVER_INTERACTIVE":
-  log4j_version = '2'
-
-#Starting hiveserver2
-start_hiveserver2_script = 'startHiveserver2.sh.j2'
-
-##Starting metastore
-start_metastore_script = 'startMetastore.sh'
-hive_metastore_pid = status_params.hive_metastore_pid
-
-# Hive Server Interactive
-slider_am_container_mb = default("/configurations/hive-interactive-env/slider_am_container_mb", 341)
-
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-yarn_user = config['configurations']['yarn-env']['yarn_user']
-user_group = config['configurations']['cluster-env']['user_group']
-artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
-# Need this for yarn.nodemanager.recovery.dir in yarn-site
-yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix']
-
-target_hive_interactive = format("{hive_interactive_lib}/{jdbc_jar_name}")
-hive_intaractive_previous_jdbc_jar = format("{hive_interactive_lib}/{hive_previous_jdbc_jar_name}")
-jars_in_hive_lib = format("{hive_lib}/*.jar")
-
-start_hiveserver2_path = format("{tmp_dir}/start_hiveserver2_script")
-start_metastore_path = format("{tmp_dir}/start_metastore_script")
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-
-if 'role' in config and config['role'] in ["HIVE_SERVER", "HIVE_METASTORE"]:
-  hive_heapsize = config['configurations']['hive-env']['hive.heapsize']
-else:
-  hive_heapsize = config['configurations']['hive-env']['hive.client.heapsize']
-
-hive_metastore_heapsize = config['configurations']['hive-env']['hive.metastore.heapsize']
-
-java64_home = config['hostLevelParams']['java_home']
-java_version = expect("/hostLevelParams/java_version", int)
-
-##### MYSQL
-db_name = config['configurations']['hive-env']['hive_database_name']
-mysql_group = 'mysql'
-mysql_host = config['clusterHostInfo']['hive_mysql_host']
-
-mysql_adduser_path = format("{tmp_dir}/addMysqlUser.sh")
-mysql_deluser_path = format("{tmp_dir}/removeMysqlUser.sh")
-
-#### Metastore
-# initialize the schema only if not in an upgrade/downgrade
-init_metastore_schema = upgrade_direction is None
-
-########## HCAT
-hcat_dbroot = hcat_lib
-
-hcat_user = config['configurations']['hive-env']['hcat_user']
-webhcat_user = config['configurations']['hive-env']['webhcat_user']
-
-hcat_pid_dir = status_params.hcat_pid_dir
-hcat_log_dir = config['configurations']['hive-env']['hcat_log_dir']
-hcat_env_sh_template = config['configurations']['hcat-env']['content']
-
-#hive-log4j.properties.template
-if (('hive-log4j' in config['configurations']) and ('content' in config['configurations']['hive-log4j'])):
-  log4j_props = config['configurations']['hive-log4j']['content']
-else:
-  log4j_props = None
-
-#webhcat-log4j.properties.template
-if (('webhcat-log4j' in config['configurations']) and ('content' in config['configurations']['webhcat-log4j'])):
-  log4j_webhcat_props = config['configurations']['webhcat-log4j']['content']
-else:
-  log4j_webhcat_props = None
-
-#hive-exec-log4j.properties.template
-if (('hive-exec-log4j' in config['configurations']) and ('content' in config['configurations']['hive-exec-log4j'])):
-  log4j_exec_props = config['configurations']['hive-exec-log4j']['content']
-else:
-  log4j_exec_props = None
-
-daemon_name = status_params.daemon_name
-process_name = status_params.process_name
-hive_env_sh_template = config['configurations']['hive-env']['content']
-
-hive_hdfs_user_dir = format("/user/{hive_user}")
-hive_hdfs_user_mode = 0755
-hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"]
-whs_dir_protocol = urlparse(hive_apps_whs_dir).scheme
-hive_exec_scratchdir = config['configurations']['hive-site']["hive.exec.scratchdir"]
-#for create_hdfs_directory
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', 'missing_principal').replace("_HOST", hostname)
-
-# Tez-related properties
-tez_user = config['configurations']['tez-env']['tez_user']
-
-# Tez jars
-tez_local_api_jars = '/usr/lib/tez/tez*.jar'
-tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'
-
-# Tez libraries
-tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None)
-
-if OSCheck.is_ubuntu_family():
-  mysql_configname = '/etc/mysql/my.cnf'
-else:
-  mysql_configname = '/etc/my.cnf'
-
-mysql_user = 'mysql'
-
-# Hive security
-hive_authorization_enabled = config['configurations']['hive-site']['hive.security.authorization.enabled']
-
-mysql_jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
-
-hive_site_config = dict(config['configurations']['hive-site'])
-
-########################################################
-############# AMS related params #####################
-########################################################
-ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
-has_metric_collector = not len(ams_collector_hosts) == 0
-if has_metric_collector:
-  if 'cluster-env' in config['configurations'] and \
-      'metrics_collector_vip_host' in config['configurations']['cluster-env']:
-    metric_collector_host = config['configurations']['cluster-env']['metrics_collector_vip_host']
-  else:
-    metric_collector_host = ams_collector_hosts[0]
-  if 'cluster-env' in config['configurations'] and \
-      'metrics_collector_vip_port' in config['configurations']['cluster-env']:
-    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
-  else:
-    metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "localhost:6188")
-    if metric_collector_web_address.find(':') != -1:
-      metric_collector_port = metric_collector_web_address.split(':')[1]
-    else:
-      metric_collector_port = '6188'
-  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
-    metric_collector_protocol = 'https'
-  else:
-    metric_collector_protocol = 'http'
-  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
-  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
-  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
-
-metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
-metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
-
-########################################################
-############# Atlas related params #####################
-########################################################
-#region Atlas Hooks
-hive_atlas_application_properties = default('/configurations/hive-atlas-application.properties', {})
-
-if has_atlas_in_cluster():
-  atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
-#endregion
-
-########################################################
-########### WebHCat related params #####################
-########################################################
-
-webhcat_env_sh_template = config['configurations']['webhcat-env']['content']
-templeton_log_dir = config['configurations']['hive-env']['hcat_log_dir']
-templeton_pid_dir = status_params.hcat_pid_dir
-
-webhcat_pid_file = status_params.webhcat_pid_file
-
-templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
-
-
-webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
-
-hcat_hdfs_user_dir = format("/user/{hcat_user}")
-hcat_hdfs_user_mode = 0755
-webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
-webhcat_hdfs_user_mode = 0755
-#for create_hdfs_directory
-security_param = "true" if security_enabled else "false"
-
-
-
-hdfs_site = config['configurations']['hdfs-site']
-default_fs = config['configurations']['core-site']['fs.defaultFS']
-
-dfs_type = default("/commandParams/dfs_type", "")
-
-import functools
-#create partial functions with common arguments for every HdfsResource call
-#to create hdfs directory we need to call params.HdfsResource in code
-HdfsResource = functools.partial(
- HdfsResource,
-  user = hdfs_user,
-  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir,
-  principal_name = hdfs_principal_name,
-  hdfs_site = hdfs_site,
-  default_fs = default_fs,
-  immutable_paths = get_not_managed_resources(),
-  dfs_type = dfs_type
- )
-
-# Hive Interactive related
-hive_interactive_hosts = default('/clusterHostInfo/hive_server_interactive_hosts', [])
-has_hive_interactive = len(hive_interactive_hosts) > 0
-if has_hive_interactive:
-  llap_daemon_log4j = config['configurations']['llap-daemon-log4j']['content']
-  llap_cli_log4j2 = config['configurations']['llap-cli-log4j2']['content']
-  hive_log4j2 = config['configurations']['hive-log4j2']['content']
-  hive_exec_log4j2 = config['configurations']['hive-exec-log4j2']['content']
-  beeline_log4j2 = config['configurations']['beeline-log4j2']['content']
-
-  hive_server_interactive_conf_dir = status_params.hive_server_interactive_conf_dir
-  execute_path_hive_interactive = os.path.join(os.environ['PATH'], hive_interactive_bin, hadoop_bin_dir)
-  start_hiveserver2_interactive_script = 'startHiveserver2Interactive.sh.j2'
-  start_hiveserver2_interactive_path = format("{tmp_dir}/start_hiveserver2_interactive_script")
-  hive_interactive_env_sh_template = config['configurations']['hive-interactive-env']['content']
-  hive_interactive_enabled = default('/configurations/hive-interactive-env/enable_hive_interactive', False)
-  llap_app_java_opts = default('/configurations/hive-interactive-env/llap_java_opts', '-XX:+AlwaysPreTouch {% if java_version > 7 %}-XX:+UseG1GC -XX:TLABSize=8m -XX:+ResizeTLAB -XX:+UseNUMA -XX:+AggressiveOpts -XX:MetaspaceSize=1024m -XX:InitiatingHeapOccupancyPercent=80 -XX:MaxGCPauseMillis=200{% else %}-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC{% endif %}')
-
-  # Service check related
-  if hive_transport_mode.lower() == "http":
-    hive_server_interactive_port = config['configurations']['hive-interactive-site']['hive.server2.thrift.http.port']
-  else:
-    hive_server_interactive_port = default('/configurations/hive-interactive-site/hive.server2.thrift.port',"10500")
-  # Tez for Hive interactive related
-  tez_interactive_config_dir = "/etc/tez_hive2/conf"
-  tez_interactive_user = config['configurations']['tez-env']['tez_user']
-  num_retries_for_checking_llap_status = default('/configurations/hive-interactive-env/num_retries_for_checking_llap_status', 10)
-  # Used in LLAP slider package creation
-  num_llap_nodes = config['configurations']['hive-interactive-env']['num_llap_nodes']
-  llap_daemon_container_size = config['configurations']['hive-interactive-site']['hive.llap.daemon.yarn.container.mb']
-  llap_log_level = config['configurations']['hive-interactive-env']['llap_log_level']
-  hive_llap_io_mem_size = config['configurations']['hive-interactive-site']['hive.llap.io.memory.size']
-  llap_heap_size = config['configurations']['hive-interactive-env']['llap_heap_size']
-  llap_app_name = config['configurations']['hive-interactive-env']['llap_app_name']
-  hive_llap_principal = None
-  if security_enabled:
-    hive_llap_keytab_file = config['configurations']['hive-interactive-site']['hive.llap.zk.sm.keytab.file']
-    hive_llap_principal = (config['configurations']['hive-interactive-site']['hive.llap.zk.sm.principal']).replace('_HOST',hostname.lower())
-  pass
-
-# ranger host
-ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
-has_ranger_admin = not len(ranger_admin_hosts) == 0
-xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
-
-#ranger hive properties
-policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
-if 'admin-properties' in config['configurations'] and 'policymgr_external_url' in config['configurations']['admin-properties'] and policymgr_mgr_url.endswith('/'):
-  policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
-xa_audit_db_name = default('/configurations/admin-properties/audit_db_name', 'ranger_audits')
-xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
-xa_db_host = config['configurations']['admin-properties']['db_host']
-repo_name = str(config['clusterName']) + '_hive'
-
-jdbc_driver_class_name = config['configurations']['ranger-hive-plugin-properties']['jdbc.driverClassName']
-common_name_for_certificate = config['configurations']['ranger-hive-plugin-properties']['common.name.for.certificate']
-
-repo_config_username = config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
-
-ranger_env = config['configurations']['ranger-env']
-ranger_plugin_properties = config['configurations']['ranger-hive-plugin-properties']
-policy_user = config['configurations']['ranger-hive-plugin-properties']['policy_user']
-
-if security_enabled:
-  hive_principal = hive_server_principal.replace('_HOST',hostname.lower())
-  hive_keytab = config['configurations']['hive-site']['hive.server2.authentication.kerberos.keytab']
-
-#For curl command in ranger plugin to get db connector
-if has_ranger_admin:
-  enable_ranger_hive = (config['configurations']['hive-env']['hive_security_authorization'].lower() == 'ranger')
-  repo_config_password = unicode(config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_PASSWORD'])
-  xa_audit_db_flavor = (config['configurations']['admin-properties']['DB_FLAVOR']).lower()
-  ranger_previous_jdbc_jar_name = None
-
-  if stack_supports_ranger_audit_db:
-    if xa_audit_db_flavor and xa_audit_db_flavor == 'mysql':
-      ranger_jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
-      ranger_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
-      jdbc_driver = "com.mysql.jdbc.Driver"
-    elif xa_audit_db_flavor and xa_audit_db_flavor == 'oracle':
-      ranger_jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
-      ranger_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
-      colon_count = xa_db_host.count(':')
-      if colon_count == 2 or colon_count == 0:
-        audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
-      else:
-        audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
-      jdbc_driver = "oracle.jdbc.OracleDriver"
-    elif xa_audit_db_flavor and xa_audit_db_flavor == 'postgres':
-      ranger_jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
-      ranger_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
-      jdbc_driver = "org.postgresql.Driver"
-    elif xa_audit_db_flavor and xa_audit_db_flavor == 'mssql':
-      ranger_jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
-      ranger_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
-      jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
-    elif xa_audit_db_flavor and xa_audit_db_flavor == 'sqla':
-      ranger_jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
-      ranger_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
-      jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
-
-  ranger_downloaded_custom_connector = format("{tmp_dir}/{ranger_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-  ranger_driver_curl_source = format("{jdk_location}/{ranger_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-  ranger_driver_curl_target = format("{hive_lib}/{ranger_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-  ranger_previous_jdbc_jar = format("{hive_lib}/{ranger_previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-  sql_connector_jar = ''
-
-  hive_ranger_plugin_config = {
-    'username': repo_config_username,
-    'password': repo_config_password,
-    'jdbc.driverClassName': jdbc_driver_class_name,
-    'jdbc.url': format("{hive_url}/default;principal={hive_principal}") if security_enabled else hive_url,
-    'commonNameForCertificate': common_name_for_certificate
-  }
-
-  hive_ranger_plugin_repo = {
-    'isActive': 'true',
-    'config': json.dumps(hive_ranger_plugin_config),
-    'description': 'hive repo',
-    'name': repo_name,
-    'repositoryType': 'hive',
-    'assetType': '3'
-  }
-
-  if stack_supports_ranger_kerberos and security_enabled:
-    hive_ranger_plugin_config['policy.download.auth.users'] = hive_user
-    hive_ranger_plugin_config['tag.download.auth.users'] = hive_user
-    hive_ranger_plugin_config['policy.grantrevoke.auth.users'] = hive_user
-
-  if stack_supports_ranger_kerberos:
-    hive_ranger_plugin_config['ambari.service.check.user'] = policy_user
-
-    hive_ranger_plugin_repo = {
-      'isEnabled': 'true',
-      'configs': hive_ranger_plugin_config,
-      'description': 'hive repo',
-      'name': repo_name,
-      'type': 'hive'
-    }
-
-
-  xa_audit_db_is_enabled = False
-  xa_audit_db_password = ''
-  if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db:
-    xa_audit_db_password = unicode(config['configurations']['admin-properties']['audit_db_password'])
-  ranger_audit_solr_urls = config['configurations']['ranger-admin-site']['ranger.audit.solr.urls']
-  if xml_configurations_supported and stack_supports_ranger_audit_db:
-    xa_audit_db_is_enabled = config['configurations']['ranger-hive-audit']['xasecure.audit.destination.db']
-  xa_audit_hdfs_is_enabled = config['configurations']['ranger-hive-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else None
-  ssl_keystore_password = unicode(config['configurations']['ranger-hive-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']) if xml_configurations_supported else None
-  ssl_truststore_password = unicode(config['configurations']['ranger-hive-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
-  credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None
-
-  #For SQLA explicitly disable audit to DB for Ranger
-  if xa_audit_db_flavor == 'sqla':
-    xa_audit_db_is_enabled = False
-

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params_windows.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params_windows.py
deleted file mode 100755
index 880fdb5..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params_windows.py
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from status_params import *
-
-# server configurations
-config = Script.get_config()
-
-# This is expected to be of the form #.#.#.#
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-
-stack_root = None
-hive_conf_dir = None
-hive_home = None
-hive_lib_dir = None
-hive_log_dir = None
-hive_opts = None
-hcat_home = None
-hcat_config_dir = None
-hive_bin = None
-
-try:
-  stack_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
-  hive_conf_dir = os.environ["HIVE_CONF_DIR"]
-  hive_home = os.environ["HIVE_HOME"]
-  hive_lib_dir = os.environ["HIVE_LIB_DIR"]
-  hive_log_dir = os.environ["HIVE_LOG_DIR"]
-  hive_opts = os.environ["HIVE_OPTS"]
-  hcat_home = os.environ["HCAT_HOME"]
-  hcat_config_dir = os.environ["WEBHCAT_CONF_DIR"]
-  hive_bin = os.path.join(hive_home, "bin")
-except:
-  pass
-
-hive_env_sh_template = config['configurations']['hive-env']['content']
-hive_warehouse_dir = config['configurations']['hive-site']['hive.metastore.warehouse.dir']
-hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
-hive_user = hadoop_user
-hcat_user = hadoop_user
-
-hive_metastore_db_type = config['configurations']['hive-env']['hive_database_type']
-hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
-hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
-
-hive_execution_engine = config["configurations"]["hive-site"]["hive.execution.engine"]
-
-######## Metastore Schema
-init_metastore_schema = not config['configurations']['hive-site']['datanucleus.autoCreateSchema']
-
-service_map = {
-  "metastore" : hive_metastore_win_service_name,
-  "client" : hive_client_win_service_name,
-  "hiveserver2" : hive_server_win_service_name,
-  "templeton" : webhcat_server_win_service_name
-}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/service_check.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/service_check.py
deleted file mode 100755
index 1836d0f..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/service_check.py
+++ /dev/null
@@ -1,190 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import socket
-import sys
-import time
-import subprocess
-
-from hcat_service_check import hcat_service_check
-from webhcat_service_check import webhcat_service_check
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-from resource_management.core import shell
-from resource_management.core.logger import Logger
-from resource_management.libraries.functions import get_unique_id_and_date
-
-class HiveServiceCheck(Script):
-  pass
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class HiveServiceCheckWindows(HiveServiceCheck):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-    smoke_cmd = os.path.join(params.stack_root,"Run-SmokeTests.cmd")
-    service = "HIVE"
-    Execute(format("cmd /C {smoke_cmd} {service}"), user=params.hive_user, logoutput=True)
-
-    hcat_service_check()
-    webhcat_service_check()
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class HiveServiceCheckDefault(HiveServiceCheck):
-
-  def __init__(self):
-    super(HiveServiceCheckDefault, self).__init__()
-    Logger.initialize_logger()
-
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    if params.security_enabled:
-      kinit_cmd = format(
-        "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; ")
-    else:
-      kinit_cmd = ""
-
-    # Check HiveServer
-    Logger.info("Running Hive Server checks")
-    Logger.info("--------------------------\n")
-    self.check_hive_server(env, 'Hive Server', kinit_cmd, params.hive_server_hosts,
-                           int(format("{hive_server_port}")))
-
-
-    if params.has_hive_interactive  and params.hive_interactive_enabled:
-      Logger.info("Running Hive Server2 checks")
-      Logger.info("--------------------------\n")
-
-      self.check_hive_server(env, 'Hive Server2', kinit_cmd, params.hive_interactive_hosts,
-                             int(format("{hive_server_interactive_port}")))
-
-      Logger.info("Running LLAP checks")
-      Logger.info("-------------------\n")
-      self.check_llap(env, kinit_cmd, params.hive_interactive_hosts, int(format("{hive_server_interactive_port}")),
-                      params.hive_llap_principal, params.hive_server2_authentication, params.hive_transport_mode,
-                      params.hive_http_endpoint)
-
-
-    Logger.info("Running HCAT checks")
-    Logger.info("-------------------\n")
-    hcat_service_check()
-
-    Logger.info("Running WEBHCAT checks")
-    Logger.info("---------------------\n")
-    webhcat_service_check()
-
-  def check_hive_server(self, env, server_component_name, kinit_cmd, address_list, server_port):
-    import params
-    env.set_params(params)
-    Logger.info("Server Address List : {0}, Port : {1}".format(address_list, server_port))
-
-    if not address_list:
-      raise Fail("Can not find any "+server_component_name+" ,host. Please check configuration.")
-
-    SOCKET_WAIT_SECONDS = 290
-
-    start_time = time.time()
-    end_time = start_time + SOCKET_WAIT_SECONDS
-
-    Logger.info("Waiting for the {0} to start...".format(server_component_name))
-
-    workable_server_available = False
-    i = 0
-    while time.time() < end_time and not workable_server_available:
-      address = address_list[i]
-      try:
-        check_thrift_port_sasl(address, server_port, params.hive_server2_authentication,
-                               params.hive_server_principal, kinit_cmd, params.smokeuser,
-                               transport_mode=params.hive_transport_mode, http_endpoint=params.hive_http_endpoint,
-                               ssl=params.hive_ssl, ssl_keystore=params.hive_ssl_keystore_path,
-                               ssl_password=params.hive_ssl_keystore_password)
-        Logger.info("Successfully connected to {0} on port {1}".format(address, server_port))
-        workable_server_available = True
-      except:
-        Logger.info("Connection to {0} on port {1} failed".format(address, server_port))
-        time.sleep(5)
-
-      i += 1
-      if i == len(address_list):
-        i = 0
-
-    elapsed_time = time.time() - start_time
-
-    if not workable_server_available:
-      raise Fail("Connection to '{0}' on host: {1} and port {2} failed after {3} seconds"
-                 .format(server_component_name, params.hostname, server_port, elapsed_time))
-
-    Logger.info("Successfully stayed connected to '{0}' on host: {1} and port {2} after {3} seconds"
-                .format(server_component_name, params.hostname, server_port, elapsed_time))
-
-  """
-  Performs Service check for LLAP app
-  """
-  def check_llap(self, env, kinit_cmd, address, port, key, hive_auth="NOSASL", transport_mode="binary", http_endpoint="cliservice"):
-    import params
-    env.set_params(params)
-
-    unique_id = get_unique_id_and_date()
-
-    beeline_url = ['jdbc:hive2://{address}:{port}/', "transportMode={transport_mode}"]
-
-    # Currently, HSI is supported on a single node only. The address list should be of size 1,
-    # thus picking the 1st node value.
-    address = address[0]
-
-    # append url according to used transport
-    if transport_mode == "http":
-      beeline_url.append('httpPath={http_endpoint}')
-
-    # append url according to used auth
-    if hive_auth == "NOSASL":
-      beeline_url.append('auth=noSasl')
-
-    # append url according to principal
-    if kinit_cmd:
-      beeline_url.append('principal={key}')
-
-    exec_path = params.execute_path
-    if params.version and params.stack_root:
-      upgrade_hive_bin = format("{stack_root}/{version}/hive2/bin")
-      exec_path =  os.environ['PATH'] + os.pathsep + params.hadoop_bin_dir + os.pathsep + upgrade_hive_bin
-
-    # beeline path
-    llap_cmd = "! beeline -u '%s'" % format(";".join(beeline_url))
-    # Append LLAP SQL script path
-    llap_cmd += format(" --hiveconf \"hiveLlapServiceCheck={unique_id}\" -f {stack_root}/current/hive-server2-hive2/scripts/llap/sql/serviceCheckScript.sql")
-    # Append grep patterns for detecting failure
-    llap_cmd += " -e '' 2>&1| awk '{print}'|grep -i -e 'Invalid status\|Invalid URL\|command not found\|Connection refused'"
-
-    Execute(llap_cmd,
-            user=params.hive_user,
-            path=['/usr/sbin', '/usr/local/bin', '/bin', '/usr/bin', exec_path],
-            tries=1,
-            wait_for_finish=True,
-            stderr=subprocess.PIPE,
-            logoutput=True)
-
-if __name__ == "__main__":
-  HiveServiceCheck().execute()
\ No newline at end of file


[12/52] bigtop git commit: ODPI-193. Add Hive 1.2 to ODPi ambari reference implementation

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/setup_ranger_hive.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/setup_ranger_hive.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/setup_ranger_hive.py
new file mode 100755
index 0000000..81a4e3e
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/setup_ranger_hive.py
@@ -0,0 +1,98 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.core.logger import Logger
+
+def setup_ranger_hive(upgrade_type = None):
+  import params
+
+  if params.has_ranger_admin:
+
+    stack_version = None
+
+    if upgrade_type is not None:
+      stack_version = params.version
+
+    if params.retryAble:
+      Logger.info("Hive: Setup ranger: command retry enables thus retrying if ranger admin is down !")
+    else:
+      Logger.info("Hive: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
+
+    if params.xml_configurations_supported and params.enable_ranger_hive and params.xa_audit_hdfs_is_enabled:
+      params.HdfsResource("/ranger/audit",
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hdfs_user,
+                         group=params.hdfs_user,
+                         mode=0755,
+                         recursive_chmod=True
+      )
+      params.HdfsResource("/ranger/audit/hiveServer2",
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hive_user,
+                         group=params.hive_user,
+                         mode=0700,
+                         recursive_chmod=True
+      )
+      params.HdfsResource(None, action="execute")
+
+    if params.xml_configurations_supported:
+      api_version=None
+      if params.stack_supports_ranger_kerberos:
+        api_version='v2'
+      from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
+      setup_ranger_plugin('hive-server2', 'hive', params.ranger_previous_jdbc_jar,
+                          params.ranger_downloaded_custom_connector, params.ranger_driver_curl_source,
+                          params.ranger_driver_curl_target, params.java64_home,
+                          params.repo_name, params.hive_ranger_plugin_repo,
+                          params.ranger_env, params.ranger_plugin_properties,
+                          params.policy_user, params.policymgr_mgr_url,
+                          params.enable_ranger_hive, conf_dict=params.hive_server_conf_dir,
+                          component_user=params.hive_user, component_group=params.user_group, cache_service_list=['hiveServer2'],
+                          plugin_audit_properties=params.config['configurations']['ranger-hive-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-hive-audit'],
+                          plugin_security_properties=params.config['configurations']['ranger-hive-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-hive-security'],
+                          plugin_policymgr_ssl_properties=params.config['configurations']['ranger-hive-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-hive-policymgr-ssl'],
+                          component_list=['hive-client', 'hive-metastore', 'hive-server2'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
+                          credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password,
+                          ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
+                          stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble, api_version=api_version,
+                          is_security_enabled = params.security_enabled,
+                          is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos,
+                          component_user_principal=params.hive_principal if params.security_enabled else None,
+                          component_user_keytab=params.hive_server2_keytab if params.security_enabled else None)
+    else:
+      from resource_management.libraries.functions.setup_ranger_plugin import setup_ranger_plugin
+      setup_ranger_plugin('hive-server2', 'hive', params.ranger_previous_jdbc_jar,
+                        params.ranger_downloaded_custom_connector, params.ranger_driver_curl_source,
+                        params.ranger_driver_curl_target, params.java64_home,
+                        params.repo_name, params.hive_ranger_plugin_repo,
+                        params.ranger_env, params.ranger_plugin_properties,
+                        params.policy_user, params.policymgr_mgr_url,
+                        params.enable_ranger_hive, conf_dict=params.hive_server_conf_dir,
+                        component_user=params.hive_user, component_group=params.user_group, cache_service_list=['hiveServer2'],
+                        plugin_audit_properties=params.config['configurations']['ranger-hive-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-hive-audit'],
+                        plugin_security_properties=params.config['configurations']['ranger-hive-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-hive-security'],
+                        plugin_policymgr_ssl_properties=params.config['configurations']['ranger-hive-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-hive-policymgr-ssl'],
+                        component_list=['hive-client', 'hive-metastore', 'hive-server2'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
+                        credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password,
+                        ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
+                        stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble)
+  else:
+    Logger.info('Ranger admin not installed')

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/setup_ranger_hive_interactive.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/setup_ranger_hive_interactive.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/setup_ranger_hive_interactive.py
new file mode 100755
index 0000000..0b5d5db
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/setup_ranger_hive_interactive.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.core.logger import Logger
+
+def setup_ranger_hive_interactive(upgrade_type = None):
+  import params
+
+  if params.has_ranger_admin:
+
+    stack_version = None
+
+    if upgrade_type is not None:
+      stack_version = params.version
+
+    if params.retryAble:
+      Logger.info("Hive2: Setup ranger: command retry enabled thus retrying if ranger admin is down !")
+    else:
+      Logger.info("Hive2: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
+
+    if params.xml_configurations_supported and params.enable_ranger_hive and params.xa_audit_hdfs_is_enabled:
+      params.HdfsResource("/ranger/audit",
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hdfs_user,
+                         group=params.hdfs_user,
+                         mode=0755,
+                         recursive_chmod=True
+      )
+      params.HdfsResource("/ranger/audit/hive2",
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hive_user,
+                         group=params.hive_user,
+                         mode=0700,
+                         recursive_chmod=True
+      )
+      params.HdfsResource(None, action="execute")
+
+      from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
+      setup_ranger_plugin('hive-server2-hive2', 'hive', params.ranger_previous_jdbc_jar,
+                          params.ranger_downloaded_custom_connector, params.ranger_driver_curl_source,
+                          params.ranger_driver_curl_target, params.java64_home,
+                          params.repo_name, params.hive_ranger_plugin_repo,
+                          params.ranger_env, params.ranger_plugin_properties,
+                          params.policy_user, params.policymgr_mgr_url,
+                          params.enable_ranger_hive, conf_dict=params.hive_server_interactive_conf_dir,
+                          component_user=params.hive_user, component_group=params.user_group, cache_service_list=['hive-server2-hive2'],
+                          plugin_audit_properties=params.config['configurations']['ranger-hive-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-hive-audit'],
+                          plugin_security_properties=params.config['configurations']['ranger-hive-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-hive-security'],
+                          plugin_policymgr_ssl_properties=params.config['configurations']['ranger-hive-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-hive-policymgr-ssl'],
+                          component_list=['hive-client', 'hive-metastore', 'hive-server2','hive-server2-hive2'], audit_db_is_enabled=False,
+                          credential_file=params.credential_file, xa_audit_db_password=None,
+                          ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
+                          stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble, api_version='v2',
+                          is_security_enabled = params.security_enabled,
+                          is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos,
+                          component_user_principal=params.hive_principal if params.security_enabled else None,
+                          component_user_keytab=params.hive_server2_keytab if params.security_enabled else None)
+
+  else:
+    Logger.info('Ranger admin not installed')

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/status_params.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/status_params.py
new file mode 100755
index 0000000..b7cb148
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/status_params.py
@@ -0,0 +1,118 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from ambari_commons import OSCheck
+
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+
+
+# a map of the Ambari role to the component name
+# for use with <stack-root>/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'HIVE_METASTORE' : 'hive-metastore',
+  'HIVE_SERVER' : 'hive-server2',
+  'WEBHCAT_SERVER' : 'hive-webhcat',
+  'HIVE_CLIENT' : 'hive-client',
+  'HCAT' : 'hive-client',
+  'HIVE_SERVER_INTERACTIVE' : 'hive-server2-hive2'
+}
+
+
+# Either HIVE_METASTORE, HIVE_SERVER, WEBHCAT_SERVER, HIVE_CLIENT, HCAT, HIVE_SERVER_INTERACTIVE
+role = default("/role", None)
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "HIVE_CLIENT")
+component_directory_interactive = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "HIVE_SERVER_INTERACTIVE")
+
+config = Script.get_config()
+
+stack_root = Script.get_stack_root()
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted_major = format_stack_version(stack_version_unformatted)
+
+if OSCheck.is_windows_family():
+  hive_metastore_win_service_name = "metastore"
+  hive_client_win_service_name = "hwi"
+  hive_server_win_service_name = "hiveserver2"
+  webhcat_server_win_service_name = "templeton"
+else:
+  hive_pid_dir = config['configurations']['hive-env']['hive_pid_dir']
+  hive_pid = 'hive-server.pid'
+  hive_interactive_pid = 'hive-interactive.pid'
+  hive_metastore_pid = 'hive.pid'
+
+  hcat_pid_dir = config['configurations']['hive-env']['hcat_pid_dir'] #hcat_pid_dir
+  webhcat_pid_file = format('{hcat_pid_dir}/webhcat.pid')
+
+  process_name = 'mysqld'
+  if OSCheck.is_suse_family() or OSCheck.is_ubuntu_family():
+    daemon_name = 'mysql'
+  else:
+    daemon_name = 'mysqld'
+
+  # Security related/required params
+  hostname = config['hostname']
+  security_enabled = config['configurations']['cluster-env']['security_enabled']
+  kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+  tmp_dir = Script.get_tmp_dir()
+  hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+  hive_user = config['configurations']['hive-env']['hive_user']
+  webhcat_user = config['configurations']['hive-env']['webhcat_user']
+
+  # default configuration directories
+  hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+  hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+  hive_etc_dir_prefix = "/etc/hive"
+  hive_interactive_etc_dir_prefix = "/etc/hive2"
+
+  hive_server_conf_dir = "/etc/hive/conf.server"
+  hive_server_interactive_conf_dir = "/etc/hive2/conf.server"
+
+  webhcat_conf_dir = format("{stack_root}/current/hive-webhcat/conf")
+  hive_home_dir = format("{stack_root}/current/{component_directory}")
+  hive_conf_dir = format("{stack_root}/current/{component_directory}/conf")
+  hive_client_conf_dir = format("{stack_root}/current/{component_directory}/conf")
+
+  if check_stack_feature(StackFeature.CONFIG_VERSIONING, stack_version_formatted_major):
+    hive_server_conf_dir = format("{stack_root}/current/{component_directory}/conf/conf.server")
+    hive_conf_dir = hive_server_conf_dir
+
+  if check_stack_feature(StackFeature.HIVE_WEBHCAT_SPECIFIC_CONFIGS, stack_version_formatted_major):
+    # this is NOT a typo. Configs for hcatalog/webhcat point to a
+    # specific directory which is NOT called 'conf'
+    webhcat_conf_dir = format("{stack_root}/current/hive-webhcat/etc/webhcat")
+
+  # if stack version supports hive serve interactive
+  if check_stack_feature(StackFeature.HIVE_SERVER_INTERACTIVE, stack_version_formatted_major):
+    hive_server_interactive_conf_dir = format("{stack_root}/current/{component_directory_interactive}/conf/conf.server")
+
+  hive_config_dir = hive_client_conf_dir
+
+  if 'role' in config and config['role'] in ["HIVE_SERVER", "HIVE_METASTORE", "HIVE_SERVER_INTERACTIVE"]:
+    hive_config_dir = hive_server_conf_dir
+    
+stack_name = default("/hostLevelParams/stack_name", None)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat.py
new file mode 100755
index 0000000..fe3f34a
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat.py
@@ -0,0 +1,145 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import sys
+import os.path
+from resource_management import *
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster, setup_atlas_hook
+from ambari_commons import OSConst
+from ambari_commons.constants import SERVICE
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def webhcat():
+  import params
+  XmlConfig("webhcat-site.xml",
+            conf_dir=params.hcat_config_dir,
+            configurations=params.config['configurations']['webhcat-site']
+  )
+  # Manually overriding service logon user & password set by the installation package
+  ServiceConfig(params.webhcat_server_win_service_name,
+                action="change_user",
+                username = params.hcat_user,
+                password = Script.get_password(params.hcat_user))
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def webhcat():
+  import params
+
+  Directory(params.templeton_pid_dir,
+            owner=params.webhcat_user,
+            mode=0755,
+            group=params.user_group,
+            create_parents = True)
+
+  Directory(params.templeton_log_dir,
+            owner=params.webhcat_user,
+            mode=0755,
+            group=params.user_group,
+            create_parents = True)
+
+  Directory(params.config_dir,
+            create_parents = True,
+            owner=params.webhcat_user,
+            group=params.user_group,
+            cd_access="a")
+
+  if params.security_enabled:
+    kinit_if_needed = format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name};")
+  else:
+    kinit_if_needed = ""
+
+  if kinit_if_needed:
+    Execute(kinit_if_needed,
+            user=params.webhcat_user,
+            path='/bin'
+    )
+
+  # Replace _HOST with hostname in relevant principal-related properties
+  webhcat_site = params.config['configurations']['webhcat-site'].copy()
+  for prop_name in ['templeton.hive.properties', 'templeton.kerberos.principal']:
+    if prop_name in webhcat_site:
+      webhcat_site[prop_name] = webhcat_site[prop_name].replace("_HOST", params.hostname)
+
+  XmlConfig("webhcat-site.xml",
+            conf_dir=params.config_dir,
+            configurations=webhcat_site,
+            configuration_attributes=params.config['configuration_attributes']['webhcat-site'],
+            owner=params.webhcat_user,
+            group=params.user_group,
+            )
+
+  # if we're in an upgrade of a secure cluster, make sure hive-site and yarn-site are created
+  if params.stack_version_formatted_major  and check_stack_feature(StackFeature.CONFIG_VERSIONING, params.stack_version_formatted_major) and \
+       params.version and params.stack_root:
+    XmlConfig("hive-site.xml",
+      conf_dir = format("{stack_root}/{version}/hive/conf"),
+      configurations = params.config['configurations']['hive-site'],
+      configuration_attributes = params.config['configuration_attributes']['hive-site'],
+      owner = params.hive_user,
+      group = params.user_group,
+      )
+
+    XmlConfig("yarn-site.xml",
+      conf_dir = format("{stack_root}/{version}/hadoop/conf"),
+      configurations = params.config['configurations']['yarn-site'],
+      configuration_attributes = params.config['configuration_attributes']['yarn-site'],
+      owner = params.yarn_user,
+      group = params.user_group,    
+  )
+  
+
+  File(format("{config_dir}/webhcat-env.sh"),
+       owner=params.webhcat_user,
+       group=params.user_group,
+       content=InlineTemplate(params.webhcat_env_sh_template)
+  )
+  
+  Directory(params.webhcat_conf_dir,
+       cd_access='a',
+       create_parents = True
+  )
+
+  log4j_webhcat_filename = 'webhcat-log4j.properties'
+  if (params.log4j_webhcat_props != None):
+    File(format("{config_dir}/{log4j_webhcat_filename}"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.webhcat_user,
+         content=params.log4j_webhcat_props
+    )
+  elif (os.path.exists("{config_dir}/{log4j_webhcat_filename}.template")):
+    File(format("{config_dir}/{log4j_webhcat_filename}"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.webhcat_user,
+         content=StaticFile(format("{config_dir}/{log4j_webhcat_filename}.template"))
+    )
+
+  # Generate atlas-application.properties.xml file
+  if has_atlas_in_cluster():
+    # WebHCat uses a different config dir than the rest of the daemons in Hive.
+    atlas_hook_filepath = os.path.join(params.config_dir, params.atlas_hook_filename)
+    setup_atlas_hook(SERVICE.HIVE, params.hive_atlas_application_properties, atlas_hook_filepath, params.hive_user, params.user_group)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat_server.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat_server.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat_server.py
new file mode 100755
index 0000000..34687c4
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat_server.py
@@ -0,0 +1,164 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+from webhcat import webhcat
+from webhcat_service import webhcat_service
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+
+class WebHCatServer(Script):
+  def install(self, env):
+    import params
+    self.install_packages(env)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    webhcat_service(action='start', upgrade_type=upgrade_type)
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    webhcat_service(action='stop')
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    webhcat()
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class WebHCatServerWindows(WebHCatServer):
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_windows_service_status(status_params.webhcat_server_win_service_name)
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class WebHCatServerDefault(WebHCatServer):
+  def get_component_name(self):
+    return "hive-webhcat"
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.webhcat_pid_file)
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing WebHCat Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version): 
+      # webhcat has no conf, but uses hadoop home, so verify that regular hadoop conf is set
+      conf_select.select(params.stack_name, "hive-hcatalog", params.version)
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hive-webhcat", params.version)
+
+  def security_status(self, env):
+    import status_params
+    env.set_params(status_params)
+
+    if status_params.security_enabled:
+      expectations ={}
+      expectations.update(
+        build_expectations(
+          'webhcat-site',
+          {
+            "templeton.kerberos.secret": "secret"
+          },
+          [
+            "templeton.kerberos.keytab",
+            "templeton.kerberos.principal"
+          ],
+          [
+            "templeton.kerberos.keytab"
+          ]
+        )
+      )
+      expectations.update(
+        build_expectations(
+          'hive-site',
+          {
+            "hive.server2.authentication": "KERBEROS",
+            "hive.metastore.sasl.enabled": "true",
+            "hive.security.authorization.enabled": "true"
+          },
+          None,
+          None
+        )
+      )
+
+      security_params = {}
+      security_params.update(get_params_from_filesystem(status_params.hive_conf_dir,
+                                                        {'hive-site.xml': FILE_TYPE_XML}))
+      security_params.update(get_params_from_filesystem(status_params.webhcat_conf_dir,
+                                                        {'webhcat-site.xml': FILE_TYPE_XML}))
+      result_issues = validate_security_config_properties(security_params, expectations)
+      if not result_issues: # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if 'webhcat-site' not in security_params \
+            or 'templeton.kerberos.keytab' not in security_params['webhcat-site'] \
+            or 'templeton.kerberos.principal' not in security_params['webhcat-site']:
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.webhcat_user,
+                                security_params['webhcat-site']['templeton.kerberos.keytab'],
+                                security_params['webhcat-site']['templeton.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+  def get_log_folder(self):
+    import params
+    return params.hcat_log_dir
+  
+  def get_user(self):
+    import params
+    return params.webhcat_user
+
+if __name__ == "__main__":
+  WebHCatServer().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat_service.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat_service.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat_service.py
new file mode 100755
index 0000000..c24db4c
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat_service.py
@@ -0,0 +1,96 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management import *
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+from resource_management.core.shell import as_user
+from resource_management.core.logger import Logger
+import traceback
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def webhcat_service(action='start', rolling_restart=False):
+  import params
+  if action == 'start' or action == 'stop':
+    Service(params.webhcat_server_win_service_name, action=action)
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def webhcat_service(action='start', upgrade_type=None):
+  import params
+
+  environ = {
+    'HADOOP_HOME': params.hadoop_home
+  }
+
+  cmd = format('{webhcat_bin_dir}/webhcat_server.sh')
+
+  if action == 'start':
+    if upgrade_type is not None and params.version and params.stack_root:
+      environ['HADOOP_HOME'] = format("{stack_root}/{version}/hadoop")
+
+    daemon_cmd = format('cd {hcat_pid_dir} ; {cmd} start')
+    no_op_test = as_user(format('ls {webhcat_pid_file} >/dev/null 2>&1 && ps -p `cat {webhcat_pid_file}` >/dev/null 2>&1'), user=params.webhcat_user)
+    try:
+      Execute(daemon_cmd,
+              user=params.webhcat_user,
+              not_if=no_op_test,
+              environment = environ)
+    except:
+      show_logs(params.hcat_log_dir, params.webhcat_user)
+      raise
+  elif action == 'stop':
+    try:
+      graceful_stop(cmd, environ)
+    except Fail:
+      show_logs(params.hcat_log_dir, params.webhcat_user)
+      Logger.info(traceback.format_exc())
+
+    pid_expression = "`" + as_user(format("cat {webhcat_pid_file}"), user=params.webhcat_user) + "`"
+    process_id_exists_command = format("ls {webhcat_pid_file} >/dev/null 2>&1 && ps -p {pid_expression} >/dev/null 2>&1")
+    daemon_hard_kill_cmd = format("{sudo} kill -9 {pid_expression}")
+    wait_time = 10
+    Execute(daemon_hard_kill_cmd,
+            not_if = format("! ({process_id_exists_command}) || ( sleep {wait_time} && ! ({process_id_exists_command}) )"),
+            ignore_failures = True
+    )
+
+    try:
+      # check if stopped the process, else fail the task
+      Execute(format("! ({process_id_exists_command})"),
+              tries=20,
+              try_sleep=3,
+      )
+    except:
+      show_logs(params.hcat_log_dir, params.webhcat_user)
+      raise
+
+    File(params.webhcat_pid_file,
+         action="delete",
+    )
+
+def graceful_stop(cmd, environ):
+  import params
+  daemon_cmd = format('{cmd} stop')
+
+  Execute(daemon_cmd,
+          user = params.webhcat_user,
+          environment = environ)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat_service_check.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat_service_check.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat_service_check.py
new file mode 100755
index 0000000..8e80d48
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/webhcat_service_check.py
@@ -0,0 +1,128 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import urllib2
+
+from resource_management import *
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+import time
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def webhcat_service_check():
+  Logger.info("Webhcat smoke test - service status")
+
+  import params
+  # AMBARI-11633 [WinTP2] Webhcat service check fails
+  # Hive doesn't pass the environment variables correctly to child processes, which fails the smoke test.
+  # Reducing the amount of URLs checked to the minimum required.
+  #smoke_cmd = os.path.join(params.stack_root,"Run-SmokeTests.cmd")
+  #service = "WEBHCAT"
+  #Execute(format("cmd /C {smoke_cmd} {service}"), user=params.hcat_user, logoutput=True)
+
+  url_tests = [
+    "status",
+    #These are the failing ones:
+    #"ddl/database?user.name=hadoop",
+    #"ddl/database/default/table?user.name=hadoop"
+  ]
+
+
+  import socket
+
+  url_host = socket.getfqdn()
+  url_port = params.config["configurations"]["webhcat-site"]["templeton.port"]
+
+  for url_test in url_tests:
+    url_request = "http://{0}:{1}/templeton/v1/{2}".format(url_host, url_port, url_test)
+    url_response = None
+
+    try:
+      # execute the query for the JSON that includes WebHCat status
+      url_response = urllib2.urlopen(url_request, timeout=30)
+
+      status = url_response.getcode()
+      response = url_response.read()
+
+      if status != 200:
+        Logger.warning("Webhcat service check status: {0}".format(status))
+      Logger.info("Webhcat service check response: {0}".format(response))
+    except urllib2.HTTPError as he:
+      raise Fail("Webhcat check {0} failed: {1}".format(url_request, he.msg))
+    finally:
+      if url_response is not None:
+        try:
+          url_response.close()
+        except:
+          pass
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def webhcat_service_check():
+  import params
+  File(format("{tmp_dir}/templetonSmoke.sh"),
+       content= StaticFile('templetonSmoke.sh'),
+       mode=0755
+  )
+
+  if params.security_enabled:
+    smokeuser_keytab=params.smoke_user_keytab
+    smoke_user_principal=params.smokeuser_principal
+  else:
+    smokeuser_keytab= "no_keytab"
+    smoke_user_principal="no_principal"
+    
+  unique_name = format("{smokeuser}.{timestamp}", timestamp = time.time())
+  templeton_test_script = format("idtest.{unique_name}.pig")
+  templeton_test_input = format("/tmp/idtest.{unique_name}.in")
+  templeton_test_output = format("/tmp/idtest.{unique_name}.out")
+
+  File(format("{tmp_dir}/{templeton_test_script}"),
+       content = Template("templeton_smoke.pig.j2", templeton_test_input=templeton_test_input, templeton_test_output=templeton_test_output),
+       owner=params.hdfs_user
+  )
+  
+  params.HdfsResource(format("/tmp/{templeton_test_script}"),
+                      action = "create_on_execute",
+                      type = "file",
+                      source = format("{tmp_dir}/{templeton_test_script}"),
+                      owner = params.smokeuser
+  )
+  
+  params.HdfsResource(templeton_test_input,
+                      action = "create_on_execute",
+                      type = "file",
+                      source = "/etc/passwd",
+                      owner = params.smokeuser
+  )
+  
+  params.HdfsResource(None, action = "execute")
+
+  cmd = format("{tmp_dir}/templetonSmoke.sh {webhcat_server_host[0]} {smokeuser} {templeton_port} {templeton_test_script} {smokeuser_keytab}"
+               " {security_param} {kinit_path_local} {smoke_user_principal}"
+               " {tmp_dir}")
+
+  Execute(cmd,
+          tries=3,
+          try_sleep=5,
+          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+          logoutput=True)
+
+
+

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-hivemetastore.properties.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-hivemetastore.properties.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-hivemetastore.properties.j2
new file mode 100755
index 0000000..e4d88bc
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-hivemetastore.properties.j2
@@ -0,0 +1,54 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+{% if has_metric_collector %}
+
+  *.period={{metrics_collection_period}}
+  *.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
+  *.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+  *.sink.timeline.period={{metrics_collection_period}}
+  *.sink.timeline.sendInterval={{metrics_report_interval}}000
+  *.sink.timeline.slave.host.name = {{hostname}}
+
+  # HTTPS properties
+  *.sink.timeline.truststore.path = {{metric_truststore_path}}
+  *.sink.timeline.truststore.type = {{metric_truststore_type}}
+  *.sink.timeline.truststore.password = {{metric_truststore_password}}
+
+  hivemetastore.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+
+
+{% endif %}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-hiveserver2.properties.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-hiveserver2.properties.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-hiveserver2.properties.j2
new file mode 100755
index 0000000..b5c4891
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-hiveserver2.properties.j2
@@ -0,0 +1,54 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+{% if has_metric_collector %}
+
+  *.period={{metrics_collection_period}}
+  *.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
+  *.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+  *.sink.timeline.period={{metrics_collection_period}}
+  *.sink.timeline.sendInterval={{metrics_report_interval}}000
+  *.sink.timeline.slave.host.name = {{hostname}}
+
+  # HTTPS properties
+  *.sink.timeline.truststore.path = {{metric_truststore_path}}
+  *.sink.timeline.truststore.type = {{metric_truststore_type}}
+  *.sink.timeline.truststore.password = {{metric_truststore_password}}
+
+  hiveserver2.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+
+
+{% endif %}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-llapdaemon.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-llapdaemon.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-llapdaemon.j2
new file mode 100755
index 0000000..1d75ccf
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-llapdaemon.j2
@@ -0,0 +1,52 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+{% if has_metric_collector %}
+
+  *.period={{metrics_collection_period}}
+  *.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
+  *.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+  *.sink.timeline.period={{metrics_collection_period}}
+  *.sink.timeline.sendInterval={{metrics_report_interval}}000
+
+  # HTTPS properties
+  *.sink.timeline.truststore.path = {{metric_truststore_path}}
+  *.sink.timeline.truststore.type = {{metric_truststore_type}}
+  *.sink.timeline.truststore.password = {{metric_truststore_password}}
+
+  llapdaemon.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+
+{% endif %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-llaptaskscheduler.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-llaptaskscheduler.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-llaptaskscheduler.j2
new file mode 100755
index 0000000..5ab787c
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hadoop-metrics2-llaptaskscheduler.j2
@@ -0,0 +1,52 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+{% if has_metric_collector %}
+
+  *.period={{metrics_collection_period}}
+  *.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
+  *.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+  *.sink.timeline.period={{metrics_collection_period}}
+  *.sink.timeline.sendInterval={{metrics_report_interval}}000
+
+  # HTTPS properties
+  *.sink.timeline.truststore.path = {{metric_truststore_path}}
+  *.sink.timeline.truststore.type = {{metric_truststore_type}}
+  *.sink.timeline.truststore.password = {{metric_truststore_password}}
+
+  llaptaskscheduler.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+
+{% endif %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hive.conf.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hive.conf.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hive.conf.j2
new file mode 100755
index 0000000..5af53d0
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/hive.conf.j2
@@ -0,0 +1,35 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{hive_user}}   - nofile {{hive_user_nofile_limit}}
+{{hive_user}}   - nproc  {{hive_user_nproc_limit}}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/startHiveserver2.sh.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/startHiveserver2.sh.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/startHiveserver2.sh.j2
new file mode 100755
index 0000000..70b418c
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/startHiveserver2.sh.j2
@@ -0,0 +1,24 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+HIVE_SERVER2_OPTS=" -hiveconf hive.log.file=hiveserver2.log -hiveconf hive.log.dir=$5"
+HIVE_CONF_DIR=$4 {{hive_bin}}/hiveserver2 -hiveconf hive.metastore.uris=" " ${HIVE_SERVER2_OPTS} > $1 2> $2 &
+echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/startHiveserver2Interactive.sh.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/startHiveserver2Interactive.sh.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/startHiveserver2Interactive.sh.j2
new file mode 100755
index 0000000..6062a7e
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/startHiveserver2Interactive.sh.j2
@@ -0,0 +1,24 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+HIVE_SERVER2_INTERACTIVE_OPTS=" -hiveconf hive.log.file=hiveserver2Interactive.log -hiveconf hive.log.dir=$5"
+HIVE_INTERACTIVE_CONF_DIR=$4 {{hive_interactive_bin}}/hiveserver2 -hiveconf hive.metastore.uris=" " ${HIVE_SERVER2_INTERACTIVE_OPTS} > $1 2> $2 &
+echo $!|cat>$3
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/templeton_smoke.pig.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/templeton_smoke.pig.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/templeton_smoke.pig.j2
new file mode 100755
index 0000000..3153e81
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/templates/templeton_smoke.pig.j2
@@ -0,0 +1,24 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+A = load '{{templeton_test_input}}' using PigStorage(':');
+B = foreach A generate \$0 as id; 
+store B into '{{templeton_test_output}}';
\ No newline at end of file


[42/52] bigtop git commit: BIGTOP-1406. package Ambari in Bigtop

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive.py
deleted file mode 100755
index 4f53ea9..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive.py
+++ /dev/null
@@ -1,481 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-import glob
-from urlparse import urlparse
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.core.resources.service import ServiceConfig
-from resource_management.core.resources.system import File, Execute, Directory
-from resource_management.core.source import StaticFile, Template, DownloadSource, InlineTemplate
-from resource_management.core.shell import as_user
-from resource_management.libraries.functions.is_empty import is_empty
-from resource_management.libraries.resources.xml_config import XmlConfig
-from resource_management.libraries.functions.format import format
-from resource_management.core.exceptions import Fail
-from resource_management.core.shell import as_sudo
-from resource_management.core.shell import quote_bash_args
-from resource_management.core.logger import Logger
-from resource_management.core import utils
-from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster, setup_atlas_hook
-from ambari_commons.constants import SERVICE
-
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-from ambari_commons import OSConst
-
-
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def hive(name=None):
-  import params
-
-  XmlConfig("hive-site.xml",
-            conf_dir = params.hive_conf_dir,
-            configurations = params.config['configurations']['hive-site'],
-            owner=params.hive_user,
-            configuration_attributes=params.config['configuration_attributes']['hive-site']
-  )
-
-  if name in ["hiveserver2","metastore"]:
-    # Manually overriding service logon user & password set by the installation package
-    service_name = params.service_map[name]
-    ServiceConfig(service_name,
-                  action="change_user",
-                  username = params.hive_user,
-                  password = Script.get_password(params.hive_user))
-    Execute(format("cmd /c hadoop fs -mkdir -p {hive_warehouse_dir}"), logoutput=True, user=params.hadoop_user)
-
-  if name == 'metastore':
-    if params.init_metastore_schema:
-      check_schema_created_cmd = format('cmd /c "{hive_bin}\\hive.cmd --service schematool -info '
-                                        '-dbType {hive_metastore_db_type} '
-                                        '-userName {hive_metastore_user_name} '
-                                        '-passWord {hive_metastore_user_passwd!p}'
-                                        '&set EXITCODE=%ERRORLEVEL%&exit /B %EXITCODE%"', #cmd "feature", propagate the process exit code manually
-                                        hive_bin=params.hive_bin,
-                                        hive_metastore_db_type=params.hive_metastore_db_type,
-                                        hive_metastore_user_name=params.hive_metastore_user_name,
-                                        hive_metastore_user_passwd=params.hive_metastore_user_passwd)
-      try:
-        Execute(check_schema_created_cmd)
-      except Fail:
-        create_schema_cmd = format('cmd /c {hive_bin}\\hive.cmd --service schematool -initSchema '
-                                   '-dbType {hive_metastore_db_type} '
-                                   '-userName {hive_metastore_user_name} '
-                                   '-passWord {hive_metastore_user_passwd!p}',
-                                   hive_bin=params.hive_bin,
-                                   hive_metastore_db_type=params.hive_metastore_db_type,
-                                   hive_metastore_user_name=params.hive_metastore_user_name,
-                                   hive_metastore_user_passwd=params.hive_metastore_user_passwd)
-        Execute(create_schema_cmd,
-                user = params.hive_user,
-                logoutput=True
-        )
-
-  if name == "hiveserver2":
-    if params.hive_execution_engine == "tez":
-      # Init the tez app dir in hadoop
-      script_file = __file__.replace('/', os.sep)
-      cmd_file = os.path.normpath(os.path.join(os.path.dirname(script_file), "..", "files", "hiveTezSetup.cmd"))
-
-      Execute("cmd /c " + cmd_file, logoutput=True, user=params.hadoop_user)
-
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def hive(name=None):
-  import params
-
-  if name == 'hiveserver2':
-    # copy tarball to HDFS feature not supported
-    if not (params.stack_version_formatted_major and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS, params.stack_version_formatted_major)):  
-      params.HdfsResource(params.webhcat_apps_dir,
-                            type="directory",
-                            action="create_on_execute",
-                            owner=params.webhcat_user,
-                            mode=0755
-                          )
-    
-    # Create webhcat dirs.
-    if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
-      params.HdfsResource(params.hcat_hdfs_user_dir,
-                           type="directory",
-                           action="create_on_execute",
-                           owner=params.hcat_user,
-                           mode=params.hcat_hdfs_user_mode
-      )
-
-    params.HdfsResource(params.webhcat_hdfs_user_dir,
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.webhcat_user,
-                         mode=params.webhcat_hdfs_user_mode
-    )
-
-    # ****** Begin Copy Tarballs ******
-    # *********************************
-    #  if copy tarball to HDFS feature  supported copy mapreduce.tar.gz and tez.tar.gz to HDFS
-    if params.stack_version_formatted_major and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS, params.stack_version_formatted_major):
-      copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
-      copy_to_hdfs("tez", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
-
-    # Always copy pig.tar.gz and hive.tar.gz using the appropriate mode.
-    # This can use a different source and dest location to account
-    copy_to_hdfs("pig",
-                 params.user_group,
-                 params.hdfs_user,
-                 file_mode=params.tarballs_mode,
-                 custom_source_file=params.pig_tar_source,
-                 custom_dest_file=params.pig_tar_dest_file,
-                 host_sys_prepped=params.host_sys_prepped)
-    copy_to_hdfs("hive",
-                 params.user_group,
-                 params.hdfs_user,
-                 file_mode=params.tarballs_mode,
-                 custom_source_file=params.hive_tar_source,
-                 custom_dest_file=params.hive_tar_dest_file,
-                 host_sys_prepped=params.host_sys_prepped)
-
-    wildcard_tarballs = ["sqoop", "hadoop_streaming"]
-    for tarball_name in wildcard_tarballs:
-      source_file_pattern = eval("params." + tarball_name + "_tar_source")
-      dest_dir = eval("params." + tarball_name + "_tar_dest_dir")
-
-      if source_file_pattern is None or dest_dir is None:
-        continue
-
-      source_files = glob.glob(source_file_pattern) if "*" in source_file_pattern else [source_file_pattern]
-      for source_file in source_files:
-        src_filename = os.path.basename(source_file)
-        dest_file = os.path.join(dest_dir, src_filename)
-
-        copy_to_hdfs(tarball_name,
-                     params.user_group,
-                     params.hdfs_user,
-                     file_mode=params.tarballs_mode,
-                     custom_source_file=source_file,
-                     custom_dest_file=dest_file,
-                     host_sys_prepped=params.host_sys_prepped)
-    # ******* End Copy Tarballs *******
-    # *********************************
-    
-    # if warehouse directory is in DFS
-    if not params.whs_dir_protocol or params.whs_dir_protocol == urlparse(params.default_fs).scheme:
-      # Create Hive Metastore Warehouse Dir
-      params.HdfsResource(params.hive_apps_whs_dir,
-                           type="directory",
-                            action="create_on_execute",
-                            owner=params.hive_user,
-                            mode=0777
-      )
-    else:
-      Logger.info(format("Not creating warehouse directory '{hive_apps_whs_dir}', as the location is not in DFS."))
-
-    # Create Hive User Dir
-    params.HdfsResource(params.hive_hdfs_user_dir,
-                         type="directory",
-                          action="create_on_execute",
-                          owner=params.hive_user,
-                          mode=params.hive_hdfs_user_mode
-    )
-    
-    if not is_empty(params.hive_exec_scratchdir) and not urlparse(params.hive_exec_scratchdir).path.startswith("/tmp"):
-      params.HdfsResource(params.hive_exec_scratchdir,
-                           type="directory",
-                           action="create_on_execute",
-                           owner=params.hive_user,
-                           group=params.hdfs_user,
-                           mode=0777) # Hive expects this dir to be writeable by everyone as it is used as a temp dir
-      
-    params.HdfsResource(None, action="execute")
-
-  Directory(params.hive_etc_dir_prefix,
-            mode=0755
-  )
-
-  # We should change configurations for client as well as for server.
-  # The reason is that stale-configs are service-level, not component.
-  Logger.info("Directories to fill with configs: %s" % str(params.hive_conf_dirs_list))
-  for conf_dir in params.hive_conf_dirs_list:
-    fill_conf_dir(conf_dir)
-
-  XmlConfig("hive-site.xml",
-            conf_dir=params.hive_config_dir,
-            configurations=params.hive_site_config,
-            configuration_attributes=params.config['configuration_attributes']['hive-site'],
-            owner=params.hive_user,
-            group=params.user_group,
-            mode=0644)
-
-  # Generate atlas-application.properties.xml file
-  if has_atlas_in_cluster():
-    atlas_hook_filepath = os.path.join(params.hive_config_dir, params.atlas_hook_filename)
-    setup_atlas_hook(SERVICE.HIVE, params.hive_atlas_application_properties, atlas_hook_filepath, params.hive_user, params.user_group)
-  
-  if name == 'hiveserver2':
-    XmlConfig("hiveserver2-site.xml",
-              conf_dir=params.hive_server_conf_dir,
-              configurations=params.config['configurations']['hiveserver2-site'],
-              configuration_attributes=params.config['configuration_attributes']['hiveserver2-site'],
-              owner=params.hive_user,
-              group=params.user_group,
-              mode=0644)
-
-  if params.hive_metastore_site_supported and name == 'metastore':
-    XmlConfig("hivemetastore-site.xml",
-              conf_dir=params.hive_server_conf_dir,
-              configurations=params.config['configurations']['hivemetastore-site'],
-              configuration_attributes=params.config['configuration_attributes']['hivemetastore-site'],
-              owner=params.hive_user,
-              group=params.user_group,
-              mode=0644)
-  
-  File(format("{hive_config_dir}/hive-env.sh"),
-       owner=params.hive_user,
-       group=params.user_group,
-       content=InlineTemplate(params.hive_env_sh_template)
-  )
-
-  # On some OS this folder could be not exists, so we will create it before pushing there files
-  Directory(params.limits_conf_dir,
-            create_parents = True,
-            owner='root',
-            group='root'
-            )
-
-  File(os.path.join(params.limits_conf_dir, 'hive.conf'),
-       owner='root',
-       group='root',
-       mode=0644,
-       content=Template("hive.conf.j2")
-       )
-
-  if name == 'metastore' or name == 'hiveserver2':
-    if params.hive_jdbc_target is not None and not os.path.exists(params.hive_jdbc_target):
-      jdbc_connector(params.hive_jdbc_target, params.hive_previous_jdbc_jar)
-    if params.hive2_jdbc_target is not None and not os.path.exists(params.hive2_jdbc_target):
-      jdbc_connector(params.hive2_jdbc_target, params.hive2_previous_jdbc_jar)
-
-  File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
-       content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}")),
-       mode = 0644,
-  )
-
-  if name == 'metastore':
-    File(os.path.join(params.hive_server_conf_dir, "hadoop-metrics2-hivemetastore.properties"),
-         owner=params.hive_user,
-         group=params.user_group,
-         content=Template("hadoop-metrics2-hivemetastore.properties.j2")
-    )
-
-    File(params.start_metastore_path,
-         mode=0755,
-         content=StaticFile('startMetastore.sh')
-    )
-    if params.init_metastore_schema:
-      create_schema_cmd = format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
-                                 "{hive_schematool_bin}/schematool -initSchema "
-                                 "-dbType {hive_metastore_db_type} "
-                                 "-userName {hive_metastore_user_name} "
-                                 "-passWord {hive_metastore_user_passwd!p} -verbose")
-
-      check_schema_created_cmd = as_user(format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
-                                        "{hive_schematool_bin}/schematool -info "
-                                        "-dbType {hive_metastore_db_type} "
-                                        "-userName {hive_metastore_user_name} "
-                                        "-passWord {hive_metastore_user_passwd!p} -verbose"), params.hive_user)
-
-      # HACK: in cases with quoted passwords and as_user (which does the quoting as well) !p won't work for hiding passwords.
-      # Fixing it with the hack below:
-      quoted_hive_metastore_user_passwd = quote_bash_args(quote_bash_args(params.hive_metastore_user_passwd))
-      if quoted_hive_metastore_user_passwd[0] == "'" and quoted_hive_metastore_user_passwd[-1] == "'" \
-          or quoted_hive_metastore_user_passwd[0] == '"' and quoted_hive_metastore_user_passwd[-1] == '"':
-        quoted_hive_metastore_user_passwd = quoted_hive_metastore_user_passwd[1:-1]
-      Logger.sensitive_strings[repr(check_schema_created_cmd)] = repr(check_schema_created_cmd.replace(
-          format("-passWord {quoted_hive_metastore_user_passwd}"), "-passWord " + utils.PASSWORDS_HIDE_STRING))
-
-      Execute(create_schema_cmd,
-              not_if = check_schema_created_cmd,
-              user = params.hive_user
-      )
-  elif name == 'hiveserver2':
-    File(params.start_hiveserver2_path,
-         mode=0755,
-         content=Template(format('{start_hiveserver2_script}'))
-    )
-
-    File(os.path.join(params.hive_server_conf_dir, "hadoop-metrics2-hiveserver2.properties"),
-         owner=params.hive_user,
-         group=params.user_group,
-         content=Template("hadoop-metrics2-hiveserver2.properties.j2")
-    )
-
-  if name != "client":
-    Directory(params.hive_pid_dir,
-              create_parents = True,
-              cd_access='a',
-              owner=params.hive_user,
-              group=params.user_group,
-              mode=0755)
-    Directory(params.hive_log_dir,
-              create_parents = True,
-              cd_access='a',
-              owner=params.hive_user,
-              group=params.user_group,
-              mode=0755)
-    Directory(params.hive_var_lib,
-              create_parents = True,
-              cd_access='a',
-              owner=params.hive_user,
-              group=params.user_group,
-              mode=0755)
-
-"""
-Writes configuration files required by Hive.
-"""
-def fill_conf_dir(component_conf_dir):
-  import params
-
-  Directory(component_conf_dir,
-            owner=params.hive_user,
-            group=params.user_group,
-            create_parents = True
-  )
-
-  XmlConfig("mapred-site.xml",
-            conf_dir=component_conf_dir,
-            configurations=params.config['configurations']['mapred-site'],
-            configuration_attributes=params.config['configuration_attributes']['mapred-site'],
-            owner=params.hive_user,
-            group=params.user_group,
-            mode=0644)
-
-
-  File(format("{component_conf_dir}/hive-default.xml.template"),
-       owner=params.hive_user,
-       group=params.user_group
-  )
-
-  File(format("{component_conf_dir}/hive-env.sh.template"),
-       owner=params.hive_user,
-       group=params.user_group
-  )
-
-  # Create hive-log4j.properties and hive-exec-log4j.properties
-  # in /etc/hive/conf and not in /etc/hive2/conf
-  if params.log4j_version == '1':
-    log4j_exec_filename = 'hive-exec-log4j.properties'
-    if (params.log4j_exec_props != None):
-      File(format("{component_conf_dir}/{log4j_exec_filename}"),
-           mode=0644,
-           group=params.user_group,
-           owner=params.hive_user,
-           content=params.log4j_exec_props
-      )
-    elif (os.path.exists("{component_conf_dir}/{log4j_exec_filename}.template")):
-      File(format("{component_conf_dir}/{log4j_exec_filename}"),
-           mode=0644,
-           group=params.user_group,
-           owner=params.hive_user,
-           content=StaticFile(format("{component_conf_dir}/{log4j_exec_filename}.template"))
-      )
-
-    log4j_filename = 'hive-log4j.properties'
-    if (params.log4j_props != None):
-      File(format("{component_conf_dir}/{log4j_filename}"),
-           mode=0644,
-           group=params.user_group,
-           owner=params.hive_user,
-           content=params.log4j_props
-      )
-    elif (os.path.exists("{component_conf_dir}/{log4j_filename}.template")):
-      File(format("{component_conf_dir}/{log4j_filename}"),
-           mode=0644,
-           group=params.user_group,
-           owner=params.hive_user,
-           content=StaticFile(format("{component_conf_dir}/{log4j_filename}.template"))
-      )
-    pass # if params.log4j_version == '1'
-
-
-def jdbc_connector(target, hive_previous_jdbc_jar):
-  """
-  Shared by Hive Batch, Hive Metastore, and Hive Interactive
-  :param target: Target of jdbc jar name, which could be for any of the components above.
-  """
-  import params
-
-  if not params.jdbc_jar_name:
-    return
-
-  if params.hive_jdbc_driver in params.hive_jdbc_drivers_list and params.hive_use_existing_db:
-    environment = {
-      "no_proxy": format("{ambari_server_hostname}")
-    }
-
-    if hive_previous_jdbc_jar and os.path.isfile(hive_previous_jdbc_jar):
-      File(hive_previous_jdbc_jar, action='delete')
-
-    # TODO: should be removed after ranger_hive_plugin will not provide jdbc
-    if params.prepackaged_jdbc_name != params.jdbc_jar_name:
-      Execute(('rm', '-f', params.prepackaged_ojdbc_symlink),
-              path=["/bin", "/usr/bin/"],
-              sudo = True)
-    
-    File(params.downloaded_custom_connector,
-         content = DownloadSource(params.driver_curl_source))
-
-    # maybe it will be more correcvly to use db type
-    if params.sqla_db_used:
-      untar_sqla_type2_driver = ('tar', '-xvf', params.downloaded_custom_connector, '-C', params.tmp_dir)
-
-      Execute(untar_sqla_type2_driver, sudo = True)
-
-      Execute(format("yes | {sudo} cp {jars_path_in_archive} {hive_lib}"))
-
-      Directory(params.jdbc_libs_dir,
-                create_parents = True)
-
-      Execute(format("yes | {sudo} cp {libs_path_in_archive} {jdbc_libs_dir}"))
-
-      Execute(format("{sudo} chown -R {hive_user}:{user_group} {hive_lib}/*"))
-
-    else:
-      Execute(('cp', '--remove-destination', params.downloaded_custom_connector, target),
-            #creates=target, TODO: uncomment after ranger_hive_plugin will not provide jdbc
-            path=["/bin", "/usr/bin/"],
-            sudo = True)
-
-  else:
-    #for default hive db (Mysql)
-    Execute(('cp', '--remove-destination', format('/usr/share/java/{jdbc_jar_name}'), target),
-            #creates=target, TODO: uncomment after ranger_hive_plugin will not provide jdbc
-            path=["/bin", "/usr/bin/"],
-            sudo=True
-    )
-  pass
-
-  File(target,
-       mode = 0644,
-  )

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_client.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_client.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_client.py
deleted file mode 100755
index 3d9bfd7..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_client.py
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import sys
-from resource_management import *
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from hive import hive
-from ambari_commons.os_family_impl import OsFamilyImpl
-from ambari_commons import OSConst
-from resource_management.core.exceptions import ClientComponentHasNoStatus
-
-class HiveClient(Script):
-  def install(self, env):
-    import params
-    self.install_packages(env)
-    self.configure(env)
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    hive(name='client')
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class HiveClientWindows(HiveClient):
-  pass
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class HiveClientDefault(HiveClient):
-  def get_component_name(self):
-    return "hadoop-client"
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    Logger.info("Executing Hive client Stack Upgrade pre-restart")
-
-    import params
-    env.set_params(params)
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hive", params.version)
-      conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-client", params.version)
-
-
-if __name__ == "__main__":
-  HiveClient().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_interactive.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_interactive.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_interactive.py
deleted file mode 100755
index 74c67fc..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_interactive.py
+++ /dev/null
@@ -1,302 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-# Python Imports
-import os
-import glob
-from urlparse import urlparse
-
-# Resource Management and Common Imports
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.version import compare_versions
-from resource_management.core.resources.service import ServiceConfig
-from resource_management.core.resources.system import File, Execute, Directory
-from resource_management.core.source import StaticFile, Template, DownloadSource, InlineTemplate
-from resource_management.core.shell import as_user
-from resource_management.libraries.functions.is_empty import is_empty
-from resource_management.libraries.resources.xml_config import XmlConfig
-from resource_management.libraries.functions.format import format
-from resource_management.core.exceptions import Fail
-from resource_management.core.shell import as_sudo
-from resource_management.core.shell import quote_bash_args
-from resource_management.core.logger import Logger
-from resource_management.core import utils
-
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-from ambari_commons import OSConst
-from hive import fill_conf_dir, jdbc_connector
-
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def hive_interactive(name=None):
-  pass
-
-"""
-Sets up the configs, jdbc connection and tarball copy to HDFS for Hive Server Interactive.
-"""
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def hive_interactive(name=None):
-  import params
-
-  # list of properties that should be excluded from the config
-  # this approach is a compromise against adding a dedicated config
-  # type for hive_server_interactive or needed config groups on a
-  # per component basis
-  exclude_list = ['hive.enforce.bucketing',
-                  'hive.enforce.sorting']
-
-  # List of configs to be excluded from hive2 client, but present in Hive2 server.
-  exclude_list_for_hive2_client = ['javax.jdo.option.ConnectionPassword']
-
-  # Copy Tarballs in HDFS.
-  if params.stack_version_formatted_major and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.stack_version_formatted_major):
-    resource_created = copy_to_hdfs("tez_hive2",
-                 params.user_group,
-                 params.hdfs_user,
-                 file_mode=params.tarballs_mode,
-                 host_sys_prepped=params.host_sys_prepped)
-
-    if resource_created:
-      params.HdfsResource(None, action="execute")
-
-  Directory(params.hive_interactive_etc_dir_prefix,
-            mode=0755
-            )
-
-  Logger.info("Directories to fill with configs: %s" % str(params.hive_conf_dirs_list))
-  for conf_dir in params.hive_conf_dirs_list:
-    fill_conf_dir(conf_dir)
-
-  '''
-  As hive2/hive-site.xml only contains the new + the changed props compared to hive/hive-site.xml,
-  we need to merge hive/hive-site.xml and hive2/hive-site.xml and store it in hive2/hive-site.xml.
-  '''
-  merged_hive_interactive_site = {}
-  merged_hive_interactive_site.update(params.config['configurations']['hive-site'])
-  merged_hive_interactive_site.update(params.config['configurations']['hive-interactive-site'])
-  for item in exclude_list:
-    if item in merged_hive_interactive_site.keys():
-      del merged_hive_interactive_site[item]
-
-  '''
-  Hive2 doesn't have support for Atlas, we need to remove the Hook 'org.apache.atlas.hive.hook.HiveHook',
-  which would have come in config 'hive.exec.post.hooks' during the site merge logic, if Atlas is installed.
-  '''
-  remove_atlas_hook_if_exists(merged_hive_interactive_site)
-
-  '''
-  As tez_hive2/tez-site.xml only contains the new + the changed props compared to tez/tez-site.xml,
-  we need to merge tez/tez-site.xml and tez_hive2/tez-site.xml and store it in tez_hive2/tez-site.xml.
-  '''
-  merged_tez_interactive_site = {}
-  if 'tez-site' in params.config['configurations']:
-    merged_tez_interactive_site.update(params.config['configurations']['tez-site'])
-    Logger.info("Retrieved 'tez/tez-site' for merging with 'tez_hive2/tez-interactive-site'.")
-  else:
-    Logger.error("Tez's 'tez-site' couldn't be retrieved from passed-in configurations.")
-
-  merged_tez_interactive_site.update(params.config['configurations']['tez-interactive-site'])
-  XmlConfig("tez-site.xml",
-            conf_dir = params.tez_interactive_config_dir,
-            configurations = merged_tez_interactive_site,
-            configuration_attributes=params.config['configuration_attributes']['tez-interactive-site'],
-            owner = params.tez_interactive_user,
-            group = params.user_group,
-            mode = 0664)
-
-  '''
-  Merge properties from hiveserver2-interactive-site into hiveserver2-site
-  '''
-  merged_hiveserver2_interactive_site = {}
-  if 'hiveserver2-site' in params.config['configurations']:
-    merged_hiveserver2_interactive_site.update(params.config['configurations']['hiveserver2-site'])
-    Logger.info("Retrieved 'hiveserver2-site' for merging with 'hiveserver2-interactive-site'.")
-  else:
-    Logger.error("'hiveserver2-site' couldn't be retrieved from passed-in configurations.")
-  merged_hiveserver2_interactive_site.update(params.config['configurations']['hiveserver2-interactive-site'])
-
-
-  # Create config files under /etc/hive2/conf and /etc/hive2/conf/conf.server:
-  #   hive-site.xml
-  #   hive-env.sh
-  #   llap-daemon-log4j2.properties
-  #   llap-cli-log4j2.properties
-  #   hive-log4j2.properties
-  #   hive-exec-log4j2.properties
-  #   beeline-log4j2.properties
-
-  hive2_conf_dirs_list = params.hive_conf_dirs_list
-  hive2_client_conf_path = format("{stack_root}/current/{component_directory}/conf")
-
-  # Making copy of 'merged_hive_interactive_site' in 'merged_hive_interactive_site_copy', and deleting 'javax.jdo.option.ConnectionPassword'
-  # config from there, as Hive2 client shouldn't have that config.
-  merged_hive_interactive_site_copy = merged_hive_interactive_site.copy()
-  for item in exclude_list_for_hive2_client:
-    if item in merged_hive_interactive_site.keys():
-      del merged_hive_interactive_site_copy[item]
-
-  for conf_dir in hive2_conf_dirs_list:
-      if conf_dir == hive2_client_conf_path:
-        XmlConfig("hive-site.xml",
-                  conf_dir=conf_dir,
-                  configurations=merged_hive_interactive_site_copy,
-                  configuration_attributes=params.config['configuration_attributes']['hive-interactive-site'],
-                  owner=params.hive_user,
-                  group=params.user_group,
-                  mode=0644)
-      else:
-        XmlConfig("hive-site.xml",
-                  conf_dir=conf_dir,
-                  configurations=merged_hive_interactive_site,
-                  configuration_attributes=params.config['configuration_attributes']['hive-interactive-site'],
-                  owner=params.hive_user,
-                  group=params.user_group,
-                  mode=0644)
-
-      XmlConfig("hiveserver2-site.xml",
-                conf_dir=conf_dir,
-                configurations=merged_hiveserver2_interactive_site,
-                configuration_attributes=params.config['configuration_attributes']['hiveserver2-interactive-site'],
-                owner=params.hive_user,
-                group=params.user_group,
-                mode=0644)
-
-      hive_server_interactive_conf_dir = conf_dir
-
-      File(format("{hive_server_interactive_conf_dir}/hive-env.sh"),
-           owner=params.hive_user,
-           group=params.user_group,
-           content=InlineTemplate(params.hive_interactive_env_sh_template))
-
-      llap_daemon_log4j_filename = 'llap-daemon-log4j2.properties'
-      File(format("{hive_server_interactive_conf_dir}/{llap_daemon_log4j_filename}"),
-           mode=0644,
-           group=params.user_group,
-           owner=params.hive_user,
-           content=params.llap_daemon_log4j)
-
-      llap_cli_log4j2_filename = 'llap-cli-log4j2.properties'
-      File(format("{hive_server_interactive_conf_dir}/{llap_cli_log4j2_filename}"),
-           mode=0644,
-           group=params.user_group,
-           owner=params.hive_user,
-           content=params.llap_cli_log4j2)
-
-      hive_log4j2_filename = 'hive-log4j2.properties'
-      File(format("{hive_server_interactive_conf_dir}/{hive_log4j2_filename}"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.hive_user,
-         content=params.hive_log4j2)
-
-      hive_exec_log4j2_filename = 'hive-exec-log4j2.properties'
-      File(format("{hive_server_interactive_conf_dir}/{hive_exec_log4j2_filename}"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.hive_user,
-         content=params.hive_exec_log4j2)
-
-      beeline_log4j2_filename = 'beeline-log4j2.properties'
-      File(format("{hive_server_interactive_conf_dir}/{beeline_log4j2_filename}"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.hive_user,
-         content=params.beeline_log4j2)
-
-      File(os.path.join(hive_server_interactive_conf_dir, "hadoop-metrics2-hiveserver2.properties"),
-           owner=params.hive_user,
-           group=params.user_group,
-           content=Template("hadoop-metrics2-hiveserver2.properties.j2")
-           )
-
-      File(format("{hive_server_interactive_conf_dir}/hadoop-metrics2-llapdaemon.properties"),
-           owner=params.hive_user,
-           group=params.user_group,
-           content=Template("hadoop-metrics2-llapdaemon.j2"))
-
-      File(format("{hive_server_interactive_conf_dir}/hadoop-metrics2-llaptaskscheduler.properties"),
-           owner=params.hive_user,
-           group=params.user_group,
-           content=Template("hadoop-metrics2-llaptaskscheduler.j2"))
-
-
-  # On some OS this folder could be not exists, so we will create it before pushing there files
-  Directory(params.limits_conf_dir,
-            create_parents = True,
-            owner='root',
-            group='root')
-
-  File(os.path.join(params.limits_conf_dir, 'hive.conf'),
-       owner='root',
-       group='root',
-       mode=0644,
-       content=Template("hive.conf.j2"))
-
-  if not os.path.exists(params.target_hive_interactive):
-    jdbc_connector(params.target_hive_interactive, params.hive_intaractive_previous_jdbc_jar)
-
-  File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
-       content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}")),
-       mode = 0644)
-  File(params.start_hiveserver2_interactive_path,
-       mode=0755,
-       content=Template(format('{start_hiveserver2_interactive_script}')))
-
-  Directory(params.hive_pid_dir,
-            create_parents=True,
-            cd_access='a',
-            owner=params.hive_user,
-            group=params.user_group,
-            mode=0755)
-  Directory(params.hive_log_dir,
-            create_parents=True,
-            cd_access='a',
-            owner=params.hive_user,
-            group=params.user_group,
-            mode=0755)
-  Directory(params.hive_interactive_var_lib,
-            create_parents=True,
-            cd_access='a',
-            owner=params.hive_user,
-            group=params.user_group,
-            mode=0755)
-
-"""
-Remove 'org.apache.atlas.hive.hook.HiveHook' value from Hive2/hive-site.xml config 'hive.exec.post.hooks', if exists.
-"""
-def remove_atlas_hook_if_exists(merged_hive_interactive_site):
-  if 'hive.exec.post.hooks' in merged_hive_interactive_site.keys():
-    existing_hive_exec_post_hooks = merged_hive_interactive_site.get('hive.exec.post.hooks')
-    if existing_hive_exec_post_hooks:
-      hook_splits = existing_hive_exec_post_hooks.split(",")
-      updated_hook_splits = [hook for hook in hook_splits if not hook.strip() == 'org.apache.atlas.hive.hook.HiveHook']
-      updated_hooks_str = ",".join((str(hook)).strip() for hook in updated_hook_splits)
-      if updated_hooks_str != existing_hive_exec_post_hooks:
-        merged_hive_interactive_site['hive.exec.post.hooks'] = updated_hooks_str
-        Logger.info("Updated Hive2/hive-site.xml 'hive.exec.post.hooks' value from : '{0}' to : '{1}'"
-                    .format(existing_hive_exec_post_hooks, updated_hooks_str))
-      else:
-        Logger.info("No change done to Hive2/hive-site.xml 'hive.exec.post.hooks' value.")
-  else:
-      Logger.debug("'hive.exec.post.hooks' doesn't exist in Hive2/hive-site.xml")

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_metastore.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_metastore.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_metastore.py
deleted file mode 100755
index 17bf581..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_metastore.py
+++ /dev/null
@@ -1,259 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-
-from resource_management.core.logger import Logger
-from resource_management.core.resources.system import Execute, Directory
-from resource_management.libraries.script import Script
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions.constants import Direction
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.version import format_stack_version
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.security_commons import build_expectations
-from resource_management.libraries.functions.security_commons import cached_kinit_executor
-from resource_management.libraries.functions.security_commons import get_params_from_filesystem
-from resource_management.libraries.functions.security_commons import validate_security_config_properties
-from resource_management.libraries.functions.security_commons import FILE_TYPE_XML
-from resource_management.core.resources.system import File
-
-from hive import hive
-from hive import jdbc_connector
-from hive_service import hive_service
-from ambari_commons.os_family_impl import OsFamilyImpl
-from ambari_commons import OSConst
-
-# the legacy conf.server location in previous stack versions
-LEGACY_HIVE_SERVER_CONF = "/etc/hive/conf.server"
-
-class HiveMetastore(Script):
-  def install(self, env):
-    import params
-    self.install_packages(env)
-
-
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-    # writing configurations on start required for securtity
-    self.configure(env)
-
-    hive_service('metastore', action='start', upgrade_type=upgrade_type)
-
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    hive_service('metastore', action='stop', upgrade_type=upgrade_type)
-
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    hive(name = 'metastore')
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class HiveMetastoreWindows(HiveMetastore):
-  def status(self, env):
-    import status_params
-    from resource_management.libraries.functions import check_windows_service_status
-    check_windows_service_status(status_params.hive_metastore_win_service_name)
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class HiveMetastoreDefault(HiveMetastore):
-  def get_component_name(self):
-    return "hive-metastore"
-
-
-  def status(self, env):
-    import status_params
-    from resource_management.libraries.functions import check_process_status
-
-    env.set_params(status_params)
-    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
-    # Recursively check all existing gmetad pid files
-    check_process_status(pid_file)
-
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    Logger.info("Executing Metastore Stack Upgrade pre-restart")
-    import params
-
-    env.set_params(params)
-
-    is_upgrade = params.upgrade_direction == Direction.UPGRADE
-
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hive", params.version)
-      stack_select.select("hive-metastore", params.version)
-
-    if is_upgrade and params.stack_version_formatted_major and \
-            check_stack_feature(StackFeature.HIVE_METASTORE_UPGRADE_SCHEMA, params.stack_version_formatted_major):
-      self.upgrade_schema(env)
-
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"hive.server2.authentication": "KERBEROS",
-                           "hive.metastore.sasl.enabled": "true",
-                           "hive.security.authorization.enabled": "true"}
-      props_empty_check = ["hive.metastore.kerberos.keytab.file",
-                           "hive.metastore.kerberos.principal"]
-
-      props_read_check = ["hive.metastore.kerberos.keytab.file"]
-      hive_site_props = build_expectations('hive-site', props_value_check, props_empty_check,
-                                            props_read_check)
-
-      hive_expectations ={}
-      hive_expectations.update(hive_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hive_conf_dir,
-                                                   {'hive-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, hive_expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if 'hive-site' not in security_params \
-            or 'hive.metastore.kerberos.keytab.file' not in security_params['hive-site'] \
-            or 'hive.metastore.kerberos.principal' not in security_params['hive-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hive_user,
-                                security_params['hive-site']['hive.metastore.kerberos.keytab.file'],
-                                security_params['hive-site']['hive.metastore.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
-
-  def upgrade_schema(self, env):
-    """
-    Executes the schema upgrade binary.  This is its own function because it could
-    be called as a standalone task from the upgrade pack, but is safe to run it for each
-    metastore instance. The schema upgrade on an already upgraded metastore is a NOOP.
-
-    The metastore schema upgrade requires a database driver library for most
-    databases. During an upgrade, it's possible that the library is not present,
-    so this will also attempt to copy/download the appropriate driver.
-
-    This function will also ensure that configurations are written out to disk before running
-    since the new configs will most likely not yet exist on an upgrade.
-
-    Should not be invoked for a DOWNGRADE; Metastore only supports schema upgrades.
-    """
-    Logger.info("Upgrading Hive Metastore Schema")
-    import status_params
-    import params
-    env.set_params(params)
-
-    # ensure that configurations are written out before trying to upgrade the schema
-    # since the schematool needs configs and doesn't know how to use the hive conf override
-    self.configure(env)
-
-    if params.security_enabled:
-      cached_kinit_executor(status_params.kinit_path_local,
-        status_params.hive_user,
-        params.hive_metastore_keytab_path,
-        params.hive_metastore_principal,
-        status_params.hostname,
-        status_params.tmp_dir)
-      
-    # ensure that the JDBC drive is present for the schema tool; if it's not
-    # present, then download it first
-    if params.hive_jdbc_driver in params.hive_jdbc_drivers_list:
-      target_directory = format("{stack_root}/{version}/hive/lib")
-
-      # download it if it does not exist
-      if not os.path.exists(params.source_jdbc_file):
-        jdbc_connector(params.hive_jdbc_target, params.hive_previous_jdbc_jar)
-
-      target_directory_and_filename = os.path.join(target_directory, os.path.basename(params.source_jdbc_file))
-
-      if params.sqla_db_used:
-        target_native_libs_directory = format("{target_directory}/native/lib64")
-
-        Execute(format("yes | {sudo} cp {jars_in_hive_lib} {target_directory}"))
-
-        Directory(target_native_libs_directory, create_parents = True)
-
-        Execute(format("yes | {sudo} cp {libs_in_hive_lib} {target_native_libs_directory}"))
-
-        Execute(format("{sudo} chown -R {hive_user}:{user_group} {hive_lib}/*"))
-      else:
-        # copy the JDBC driver from the older metastore location to the new location only
-        # if it does not already exist
-        if not os.path.exists(target_directory_and_filename):
-          Execute(('cp', params.source_jdbc_file, target_directory),
-            path=["/bin", "/usr/bin/"], sudo = True)
-
-      File(target_directory_and_filename, mode = 0644)
-
-    # build the schema tool command
-    binary = format("{hive_schematool_ver_bin}/schematool")
-
-    # the conf.server directory changed locations between stack versions
-    # since the configurations have not been written out yet during an upgrade
-    # we need to choose the original legacy location
-    schematool_hive_server_conf_dir = params.hive_server_conf_dir
-    if params.current_version is not None:
-      current_version = format_stack_version(params.current_version)
-      if not(check_stack_feature(StackFeature.CONFIG_VERSIONING, current_version)):
-        schematool_hive_server_conf_dir = LEGACY_HIVE_SERVER_CONF
-
-    env_dict = {
-      'HIVE_CONF_DIR': schematool_hive_server_conf_dir
-    }
-
-    command = format("{binary} -dbType {hive_metastore_db_type} -upgradeSchema")
-    Execute(command, user=params.hive_user, tries=1, environment=env_dict, logoutput=True)
-    
-  def get_log_folder(self):
-    import params
-    return params.hive_log_dir
-
-  def get_user(self):
-    import params
-    return params.hive_user
-
-
-if __name__ == "__main__":
-  HiveMetastore().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_server.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_server.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_server.py
deleted file mode 100755
index 31b083b..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_server.py
+++ /dev/null
@@ -1,211 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
-from resource_management.libraries.functions.get_stack_version import get_stack_version
-from resource_management.libraries.functions.check_process_status import check_process_status
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.security_commons import build_expectations, \
-  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
-  FILE_TYPE_XML
-from ambari_commons import OSCheck, OSConst
-if OSCheck.is_windows_family():
-  from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
-from setup_ranger_hive import setup_ranger_hive
-from ambari_commons.os_family_impl import OsFamilyImpl
-from ambari_commons.constants import UPGRADE_TYPE_ROLLING
-from resource_management.core.logger import Logger
-
-import hive_server_upgrade
-from hive import hive
-from hive_service import hive_service
-
-
-class HiveServer(Script):
-  def install(self, env):
-    import params
-    self.install_packages(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    hive(name='hiveserver2')
-
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class HiveServerWindows(HiveServer):
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    hive_service('hiveserver2', action='start')
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-    hive_service('hiveserver2', action='stop')
-
-  def status(self, env):
-    import status_params
-    check_windows_service_status(status_params.hive_server_win_service_name)
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class HiveServerDefault(HiveServer):
-  def get_component_name(self):
-    return "hive-server2"
-
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-
-    setup_ranger_hive(upgrade_type=upgrade_type)
-    hive_service('hiveserver2', action = 'start', upgrade_type=upgrade_type)
-
-    # only perform this if upgrading and rolling; a non-rolling upgrade doesn't need
-    # to do this since hive is already down
-    if upgrade_type == UPGRADE_TYPE_ROLLING:
-      hive_server_upgrade.post_upgrade_deregister()
-
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-    # During rolling upgrade, HiveServer2 should not be stopped before new server is available.
-    # Once new server is started, old one is stopped by the --deregister command which is 
-    # invoked by the 'hive_server_upgrade.post_upgrade_deregister()' method
-    if upgrade_type != UPGRADE_TYPE_ROLLING:
-      hive_service( 'hiveserver2', action = 'stop' )
-
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{hive_pid_dir}/{hive_pid}")
-
-    # Recursively check all existing gmetad pid files
-    check_process_status(pid_file)
-
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    Logger.info("Executing Hive Server Stack Upgrade pre-restart")
-    import params
-    env.set_params(params)
-
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hive", params.version)
-      stack_select.select("hive-server2", params.version)
-
-      # Copy mapreduce.tar.gz and tez.tar.gz to HDFS
-      resource_created = copy_to_hdfs(
-        "mapreduce",
-        params.user_group,
-        params.hdfs_user,
-        host_sys_prepped=params.host_sys_prepped)
-
-      resource_created = copy_to_hdfs(
-        "tez",
-        params.user_group,
-        params.hdfs_user,
-        host_sys_prepped=params.host_sys_prepped) or resource_created
-
-      if resource_created:
-        params.HdfsResource(None, action="execute")
-
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"hive.server2.authentication": "KERBEROS",
-                           "hive.metastore.sasl.enabled": "true",
-                           "hive.security.authorization.enabled": "true"}
-      props_empty_check = ["hive.server2.authentication.kerberos.keytab",
-                           "hive.server2.authentication.kerberos.principal",
-                           "hive.server2.authentication.spnego.principal",
-                           "hive.server2.authentication.spnego.keytab"]
-
-      props_read_check = ["hive.server2.authentication.kerberos.keytab",
-                          "hive.server2.authentication.spnego.keytab"]
-      hive_site_props = build_expectations('hive-site', props_value_check, props_empty_check,
-                                            props_read_check)
-
-      hive_expectations ={}
-      hive_expectations.update(hive_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hive_conf_dir,
-                                                   {'hive-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, hive_expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if 'hive-site' not in security_params \
-            or 'hive.server2.authentication.kerberos.keytab' not in security_params['hive-site'] \
-            or 'hive.server2.authentication.kerberos.principal' not in security_params['hive-site']  \
-            or 'hive.server2.authentication.spnego.keytab' not in security_params['hive-site'] \
-            or 'hive.server2.authentication.spnego.principal' not in security_params['hive-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hive_user,
-                                security_params['hive-site']['hive.server2.authentication.kerberos.keytab'],
-                                security_params['hive-site']['hive.server2.authentication.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hive_user,
-                                security_params['hive-site']['hive.server2.authentication.spnego.keytab'],
-                                security_params['hive-site']['hive.server2.authentication.spnego.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
-  def get_log_folder(self):
-    import params
-    return params.hive_log_dir
-  
-  def get_user(self):
-    import params
-    return params.hive_user
-
-if __name__ == "__main__":
-  HiveServer().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_server_interactive.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_server_interactive.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_server_interactive.py
deleted file mode 100755
index 2df001c..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hive_server_interactive.py
+++ /dev/null
@@ -1,535 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-# Python Imports
-import subprocess
-import os
-import re
-import time
-import shutil
-from datetime import datetime
-import json
-
-# Ambari Commons & Resource Management imports
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.check_process_status import check_process_status
-from resource_management.core.source import InlineTemplate
-from resource_management.core.resources.system import Execute
-
-# Imports needed for Rolling/Express Upgrade
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
-
-from resource_management.core import shell
-from resource_management.core.exceptions import Fail
-from resource_management.core.logger import Logger
-from ambari_commons import OSCheck, OSConst
-from ambari_commons.os_family_impl import OsFamilyImpl
-
-from resource_management.core.exceptions import ComponentIsNotRunning
-from resource_management.libraries.functions.decorator import retry
-from resource_management.libraries.functions.security_commons import build_expectations, \
-  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
-  FILE_TYPE_XML
-
-# Local Imports
-from setup_ranger_hive import setup_ranger_hive
-from hive_service_interactive import hive_service_interactive
-from hive_interactive import hive_interactive
-from hive_server import HiveServerDefault
-from setup_ranger_hive_interactive import setup_ranger_hive_interactive
-
-import traceback
-
-class HiveServerInteractive(Script):
-  pass
-
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class HiveServerInteractiveDefault(HiveServerInteractive):
-
-    def get_component_name(self):
-      return "hive-server2-hive2"
-
-    def install(self, env):
-      import params
-      self.install_packages(env)
-
-    def configure(self, env):
-      import params
-      env.set_params(params)
-      hive_interactive(name='hiveserver2')
-
-    def pre_upgrade_restart(self, env, upgrade_type=None):
-      Logger.info("Executing Hive Server Interactive Stack Upgrade pre-restart")
-      import params
-      env.set_params(params)
-
-      if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-        stack_select.select("hive-server2-hive2", params.version)
-        conf_select.select(params.stack_name, "hive2", params.version)
-
-        # Copy hive.tar.gz and tez.tar.gz used by Hive Interactive to HDFS
-        resource_created = copy_to_hdfs(
-          "hive2",
-          params.user_group,
-          params.hdfs_user,
-          host_sys_prepped=params.host_sys_prepped)
-
-        resource_created = copy_to_hdfs(
-          "tez_hive2",
-          params.user_group,
-          params.hdfs_user,
-          host_sys_prepped=params.host_sys_prepped) or resource_created
-
-        if resource_created:
-          params.HdfsResource(None, action="execute")
-
-    def start(self, env, upgrade_type=None):
-      import params
-      env.set_params(params)
-      self.configure(env)
-
-      if params.security_enabled:
-        # Do the security setup, internally calls do_kinit()
-        self.setup_security()
-
-      # TODO : We need have conditional [re]start of LLAP once "status check command" for LLAP is ready.
-      # Check status and based on that decide on [re]starting.
-
-      # Start LLAP before Hive Server Interactive start.
-      status = self._llap_start(env)
-      if not status:
-        raise Fail("Skipping START of Hive Server Interactive since LLAP app couldn't be STARTED.")
-
-      # TODO : test the workability of Ranger and Hive2 during upgrade
-      setup_ranger_hive_interactive(upgrade_type=upgrade_type)
-      hive_service_interactive('hiveserver2', action='start', upgrade_type=upgrade_type)
-
-
-    def stop(self, env, upgrade_type=None):
-      import params
-      env.set_params(params)
-
-      if params.security_enabled:
-        self.do_kinit()
-
-      # Stop Hive Interactive Server first
-      hive_service_interactive('hiveserver2', action='stop')
-
-      self._llap_stop(env)
-
-    def status(self, env):
-      import status_params
-      env.set_params(status_params)
-
-      # We are not doing 'llap' status check done here as part of status check for 'HSI', as 'llap' status
-      # check is a heavy weight operation.
-
-      pid_file = format("{hive_pid_dir}/{hive_interactive_pid}")
-      # Recursively check all existing gmetad pid files
-      check_process_status(pid_file)
-
-    def security_status(self, env):
-      import status_params
-      env.set_params(status_params)
-
-      if status_params.security_enabled:
-        props_value_check = {"hive.server2.authentication": "KERBEROS",
-                             "hive.metastore.sasl.enabled": "true",
-                             "hive.security.authorization.enabled": "true"}
-        props_empty_check = ["hive.server2.authentication.kerberos.keytab",
-                             "hive.server2.authentication.kerberos.principal",
-                             "hive.server2.authentication.spnego.principal",
-                             "hive.server2.authentication.spnego.keytab"]
-
-        props_read_check = ["hive.server2.authentication.kerberos.keytab",
-                            "hive.server2.authentication.spnego.keytab"]
-        hive_site_props = build_expectations('hive-site', props_value_check, props_empty_check,
-                                             props_read_check)
-
-        hive_expectations ={}
-        hive_expectations.update(hive_site_props)
-
-        security_params = get_params_from_filesystem(status_params.hive_server_interactive_conf_dir,
-                                                     {'hive-site.xml': FILE_TYPE_XML})
-        result_issues = validate_security_config_properties(security_params, hive_expectations)
-        if not result_issues: # If all validations passed successfully
-          try:
-            # Double check the dict before calling execute
-            if 'hive-site' not in security_params \
-              or 'hive.server2.authentication.kerberos.keytab' not in security_params['hive-site'] \
-              or 'hive.server2.authentication.kerberos.principal' not in security_params['hive-site'] \
-              or 'hive.server2.authentication.spnego.keytab' not in security_params['hive-site'] \
-              or 'hive.server2.authentication.spnego.principal' not in security_params['hive-site']:
-              self.put_structured_out({"securityState": "UNSECURED"})
-              self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
-              return
-
-            cached_kinit_executor(status_params.kinit_path_local,
-                                  status_params.hive_user,
-                                  security_params['hive-site']['hive.server2.authentication.kerberos.keytab'],
-                                  security_params['hive-site']['hive.server2.authentication.kerberos.principal'],
-                                  status_params.hostname,
-                                  status_params.tmp_dir)
-            cached_kinit_executor(status_params.kinit_path_local,
-                                  status_params.hive_user,
-                                  security_params['hive-site']['hive.server2.authentication.spnego.keytab'],
-                                  security_params['hive-site']['hive.server2.authentication.spnego.principal'],
-                                  status_params.hostname,
-                                  status_params.tmp_dir)
-            self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-          except Exception as e:
-            self.put_structured_out({"securityState": "ERROR"})
-            self.put_structured_out({"securityStateErrorInfo": str(e)})
-        else:
-          issues = []
-          for cf in result_issues:
-            issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-          self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-          self.put_structured_out({"securityState": "UNSECURED"})
-      else:
-        self.put_structured_out({"securityState": "UNSECURED"})
-
-    def restart_llap(self, env):
-      """
-      Custom command to Restart LLAP
-      """
-      Logger.info("Custom Command to retart LLAP")
-      import params
-      env.set_params(params)
-
-      if params.security_enabled:
-        self.do_kinit()
-
-      self._llap_stop(env)
-      self._llap_start(env)
-
-    def _llap_stop(self, env):
-      import params
-      Logger.info("Stopping LLAP")
-      SLIDER_APP_NAME = "llap0"
-
-      stop_cmd = ["slider", "stop", SLIDER_APP_NAME]
-
-      code, output, error = shell.call(stop_cmd, user=params.hive_user, stderr=subprocess.PIPE, logoutput=True)
-      if code == 0:
-        Logger.info(format("Stopped {SLIDER_APP_NAME} application on Slider successfully"))
-      elif code == 69 and output is not None and "Unknown application instance" in output:
-        Logger.info(format("Application {SLIDER_APP_NAME} was already stopped on Slider"))
-      else:
-        raise Fail(format("Could not stop application {SLIDER_APP_NAME} on Slider. {error}\n{output}"))
-
-      # Will exit with code 4 if need to run with "--force" to delete directories and registries.
-      Execute(('slider', 'destroy', SLIDER_APP_NAME, "--force"),
-              user=params.hive_user,
-              timeout=30,
-              ignore_failures=True,
-      )
-
-    """
-    Controls the start of LLAP.
-    """
-    def _llap_start(self, env, cleanup=False):
-      import params
-      env.set_params(params)
-      Logger.info("Starting LLAP")
-      LLAP_PACKAGE_CREATION_PATH = Script.get_tmp_dir()
-      LLAP_APP_NAME = 'llap0'
-
-      unique_name = "llap-slider%s" % datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S')
-
-      cmd = format("{stack_root}/current/hive-server2-hive2/bin/hive --service llap --instances {params.num_llap_nodes}"
-                   " --slider-am-container-mb {params.slider_am_container_mb} --size {params.llap_daemon_container_size}m "
-                   " --cache {params.hive_llap_io_mem_size}m --xmx {params.llap_heap_size}m --loglevel {params.llap_log_level}"
-                   " --output {LLAP_PACKAGE_CREATION_PATH}/{unique_name}")
-      if params.security_enabled:
-        llap_keytab_splits = params.hive_llap_keytab_file.split("/")
-        Logger.debug("llap_keytab_splits : {0}".format(llap_keytab_splits))
-        cmd += format(" --slider-keytab-dir .slider/keytabs/{params.hive_user}/ --slider-keytab "
-                      "{llap_keytab_splits[4]} --slider-principal {params.hive_llap_principal}")
-
-      # Append args.
-      llap_java_args = InlineTemplate(params.llap_app_java_opts).get_content()
-      cmd += format(" --args \" {llap_java_args}\"")
-
-      run_file_path = None
-      try:
-        Logger.info(format("Command: {cmd}"))
-        code, output, error = shell.checked_call(cmd, user=params.hive_user, stderr=subprocess.PIPE, logoutput=True)
-
-        if code != 0 or output is None:
-          raise Fail("Command failed with either non-zero return code or no output.")
-
-        # E.g., output:
-        # Prepared llap-slider-05Apr2016/run.sh for running LLAP on Slider
-        exp = r"Prepared (.*?run.sh) for running LLAP"
-        run_file_path = None
-        out_splits = output.split("\n")
-        for line in out_splits:
-          line = line.strip()
-          m = re.match(exp, line, re.I)
-          if m and len(m.groups()) == 1:
-            run_file_name = m.group(1)
-            run_file_path = os.path.join(params.hive_user_home_dir, run_file_name)
-            break
-        if not run_file_path:
-          raise Fail("Did not find run.sh file in output: " + str(output))
-
-        Logger.info(format("Run file path: {run_file_path}"))
-        Execute(run_file_path, user=params.hive_user)
-        Logger.info("Submitted LLAP app name : {0}".format(LLAP_APP_NAME))
-
-        # We need to check the status of LLAP app to figure out it got
-        # launched properly and is in running state. Then go ahead with Hive Interactive Server start.
-        status = self.check_llap_app_status(LLAP_APP_NAME, params.num_retries_for_checking_llap_status)
-        if status:
-          Logger.info("LLAP app '{0}' deployed successfully.".format(LLAP_APP_NAME))
-          return True
-        else:
-          Logger.error("LLAP app '{0}' deployment unsuccessful.".format(LLAP_APP_NAME))
-          return False
-      except:
-        # Attempt to clean up the packaged application, or potentially rename it with a .bak
-        if run_file_path is not None and cleanup:
-          try:
-            parent_dir = os.path.dirname(run_file_path)
-            if os.path.isdir(parent_dir):
-              shutil.rmtree(parent_dir)
-          except Exception, e:
-            Logger.error("Could not cleanup LLAP app package. Error: " + str(e))
-
-        # throw the original exception
-        raise
-
-    """
-    Does kinit and copies keytab for Hive/LLAP to HDFS.
-    """
-    def setup_security(self):
-      import params
-
-      self.do_kinit()
-
-      # Copy params.hive_llap_keytab_file to hdfs://<host>:<port>/user/<hive_user>/.slider/keytabs/<hive_user> , required by LLAP
-      slider_keytab_install_cmd = format("slider install-keytab --keytab {params.hive_llap_keytab_file} --folder {params.hive_user} --overwrite")
-      Execute(slider_keytab_install_cmd, user=params.hive_user)
-
-    def do_kinit(self):
-      import params
-
-      hive_interactive_kinit_cmd = format("{kinit_path_local} -kt {params.hive_server2_keytab} {params.hive_principal}; ")
-      Execute(hive_interactive_kinit_cmd, user=params.hive_user)
-
-      llap_kinit_cmd = format("{kinit_path_local} -kt {params.hive_llap_keytab_file} {params.hive_llap_principal}; ")
-      Execute(llap_kinit_cmd, user=params.hive_user)
-
-    """
-    Get llap app status data.
-    """
-    def _get_llap_app_status_info(self, app_name):
-      import status_params
-      LLAP_APP_STATUS_CMD_TIMEOUT = 0
-
-      llap_status_cmd = format("{stack_root}/current/hive-server2-hive2/bin/hive --service llapstatus --name {app_name} --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
-      code, output, error = shell.checked_call(llap_status_cmd, user=status_params.hive_user, stderr=subprocess.PIPE,
-                                               logoutput=False)
-      Logger.info("Received 'llapstatus' command 'output' : {0}".format(output))
-      return self._make_valid_json(output)
-
-
-    """
-    Remove extra lines from 'llapstatus' status output (eg: because of MOTD logging) so as to have a valid JSON data to be passed in
-    to JSON converter.
-    """
-    def _make_valid_json(self, output):
-      '''
-
-      Note: It is assumed right now that extra lines will be only at the start and not at the end.
-
-      Sample expected JSON to be passed for 'loads' is either of the form :
-
-      Case 'A':
-      {
-          "amInfo" : {
-          "appName" : "llap0",
-          "appType" : "org-apache-slider",
-          "appId" : "APP1",
-          "containerId" : "container_1466036628595_0010_01_000001",
-          "hostname" : "hostName",
-          "amWebUrl" : "http://hostName:port/"
-        },
-        "state" : "LAUNCHING",
-        ....
-        "desiredInstances" : 1,
-        "liveInstances" : 0,
-        ....
-        ....
-      }
-
-      or
-
-      Case 'B':
-      {
-        "state" : "APP_NOT_FOUND"
-      }
-
-      '''
-      splits = output.split("\n")
-
-      len_splits = len(splits)
-      if (len_splits < 3):
-        raise Fail ("Malformed JSON data received from 'llapstatus' command. Exiting ....")
-
-      marker_idx = None # To detect where from to start reading for JSON data
-      for idx, split in enumerate(splits):
-        curr_elem = split.strip()
-        if idx+2 > len_splits:
-          raise Fail("Iterated over the received 'llapstatus' comamnd. Couldn't validate the received output for JSON parsing.")
-        next_elem = (splits[(idx + 1)]).strip()
-        if curr_elem == "{":
-          if next_elem == "\"amInfo\" : {" and (splits[len_splits-1]).strip() == '}':
-            # For Case 'A'
-            marker_idx = idx
-            break;
-          elif idx+3 == len_splits and next_elem.startswith('"state" : ') and (splits[idx + 2]).strip() == '}':
-              # For Case 'B'
-              marker_idx = idx
-              break;
-
-      Logger.info("Marker index for start of JSON data for 'llapsrtatus' comamnd : {0}".format(marker_idx))
-
-      # Remove extra logging from possible JSON output
-      if marker_idx is None:
-        raise Fail("Couldn't validate the received output for JSON parsing.")
-      else:
-        if marker_idx != 0:
-          del splits[0:marker_idx]
-          Logger.info("Removed lines: '1-{0}' from the received 'llapstatus' output to make it valid for JSON parsing.".format(marker_idx))
-
-      scanned_output = '\n'.join(splits)
-      llap_app_info = json.loads(scanned_output)
-      return llap_app_info
-
-
-    """
-    Checks llap app status. The states can be : 'COMPLETE', 'APP_NOT_FOUND', 'RUNNING_PARTIAL', 'RUNNING_ALL' & 'LAUNCHING'.
-
-    if app is in 'APP_NOT_FOUND', 'RUNNING_PARTIAL' and 'LAUNCHING' state:
-       we wait for 'num_times_to_wait' to have app in (1). 'RUNNING_ALL' or (2). 'RUNNING_PARTIAL'
-       state with 80% or more 'desiredInstances' running and Return True
-    else :
-       Return False
-
-    Parameters: llap_app_name : deployed llap app name.
-                num_retries :   Number of retries to check the LLAP app status.
-    """
-    def check_llap_app_status(self, llap_app_name, num_retries):
-      # counters based on various states.
-      curr_time = time.time()
-
-      if num_retries <= 0:
-        num_retries = 2
-      if num_retries > 20:
-        num_retries = 20
-      @retry(times=num_retries, sleep_time=2, err_class=Fail)
-      def do_retries():
-        live_instances = 0
-        desired_instances = 0
-
-        percent_desired_instances_to_be_up = 80 # Used in 'RUNNING_PARTIAL' state.
-        llap_app_info = self._get_llap_app_status_info(llap_app_name)
-        if llap_app_info is None or 'state' not in llap_app_info:
-          Logger.error("Malformed JSON data received for LLAP app. Exiting ....")
-          return False
-
-        if llap_app_info['state'].upper() == 'RUNNING_ALL':
-          Logger.info(
-            "LLAP app '{0}' in '{1}' state.".format(llap_app_name, llap_app_info['state']))
-          return True
-        elif llap_app_info['state'].upper() == 'RUNNING_PARTIAL':
-          # Check how many instances were up.
-          if 'liveInstances' in llap_app_info and 'desiredInstances' in llap_app_info:
-            live_instances = llap_app_info['liveInstances']
-            desired_instances = llap_app_info['desiredInstances']
-          else:
-            Logger.info(
-              "LLAP app '{0}' is in '{1}' state, but 'instances' information not available in JSON received. " \
-              "Exiting ....".format(llap_app_name, llap_app_info['state']))
-            Logger.info(llap_app_info)
-            return False
-          if desired_instances == 0:
-            Logger.info("LLAP app '{0}' desired instance are set to 0. Exiting ....".format(llap_app_name))
-            return False
-
-          percentInstancesUp = 0
-          if live_instances > 0:
-            percentInstancesUp = float(live_instances) / desired_instances * 100
-          if percentInstancesUp >= percent_desired_instances_to_be_up:
-            Logger.info("LLAP app '{0}' in '{1}' state. Live Instances : '{2}'  >= {3}% of Desired Instances : " \
-                        "'{4}'.".format(llap_app_name, llap_app_info['state'],
-                                       llap_app_info['liveInstances'],
-                                       percent_desired_instances_to_be_up,
-                                       llap_app_info['desiredInstances']))
-            return True
-          else:
-            Logger.info("LLAP app '{0}' in '{1}' state. Live Instances : '{2}'. Desired Instances : " \
-                        "'{3}' after {4} secs.".format(llap_app_name, llap_app_info['state'],
-                                                       llap_app_info['liveInstances'],
-                                                       llap_app_info['desiredInstances'],
-                                                       time.time() - curr_time))
-            raise Fail("App state is RUNNING_PARTIAL. Live Instances : '{0}', Desired Instance : '{1}'".format(llap_app_info['liveInstances'],
-                                                                                                           llap_app_info['desiredInstances']))
-        elif llap_app_info['state'].upper() in ['APP_NOT_FOUND', 'LAUNCHING', 'COMPLETE']:
-          status_str = format("LLAP app '{0}' current state is {1}.".format(llap_app_name, llap_app_info['state']))
-          Logger.info(status_str)
-          raise Fail(status_str)
-        else:  # Covers any unknown that we get.
-          Logger.info(
-            "LLAP app '{0}' current state is '{1}'. Expected : 'RUNNING'.".format(llap_app_name, llap_app_info['state']))
-          return False
-
-      try:
-        status = do_retries()
-        return status
-      except Exception, e:
-        Logger.info("LLAP app '{0}' did not come up after a wait of {1} seconds.".format(llap_app_name,
-                                                                                          time.time() - curr_time))
-        traceback.print_exc()
-        return False
-
-    def get_log_folder(self):
-      import params
-      return params.hive_log_dir
-
-    def get_user(self):
-      import params
-      return params.hive_user
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class HiveServerInteractiveWindows(HiveServerInteractive):
-
-  def status(self, env):
-    pass
-
-if __name__ == "__main__":
-  HiveServerInteractive().execute()
\ No newline at end of file


[05/52] bigtop git commit: Working around ODPI-186

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/YARN_metrics.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/YARN_metrics.json b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/YARN_metrics.json
new file mode 100755
index 0000000..a66bb34
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/YARN_metrics.json
@@ -0,0 +1,3486 @@
+{
+  "NODEMANAGER": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/cpu/cpu_idle": {
+              "metric": "cpu_idle",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/cpu/cpu_nice": {
+              "metric": "cpu_nice",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/cpu/cpu_system": {
+              "metric": "cpu_system",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/cpu/cpu_user": {
+              "metric": "cpu_user",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/cpu/cpu_wio": {
+              "metric": "cpu_wio",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/disk_free": {
+              "metric": "disk_free",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/disk_total": {
+              "metric": "disk_total",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/load/load_fifteen": {
+              "metric": "load_fifteen",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/load/load_five": {
+              "metric": "load_five",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/load/load_one": {
+              "metric": "load_one",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/memory/mem_buffered": {
+              "metric": "mem_buffered",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/memory/mem_cached": {
+              "metric": "mem_cached",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/memory/mem_free": {
+              "metric": "mem_free",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/memory/mem_shared": {
+              "metric": "mem_shared",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/memory/mem_total": {
+              "metric": "mem_total",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/memory/swap_free": {
+              "metric": "swap_free",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/bytes_in": {
+              "metric": "bytes_in",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/bytes_out": {
+              "metric": "bytes_out",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/pkts_in": {
+              "metric": "pkts_in",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/pkts_out": {
+              "metric": "pkts_out",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/process/proc_run": {
+              "metric": "proc_run",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/process/proc_total": {
+              "metric": "proc_total",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/read_count": {
+              "metric": "read_count",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/write_count": {
+              "metric": "write_count",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/read_bytes": {
+              "metric": "read_bytes",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/write_bytes": {
+              "metric": "write_bytes",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/disk/read_time": {
+              "metric": "read_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/write_time": {
+              "metric": "write_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/read_bps":{
+              "metric":"read_bps",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/write_bps":{
+              "metric":"write_bps",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/mapred/ShuffleOutputsFailed": {
+              "metric": "mapred.ShuffleMetrics.ShuffleOutputsFailed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "jvm.JvmMetrics.ThreadsRunnable",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "jvm.JvmMetrics.ThreadsNew",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "rpc.rpc.RpcAuthorizationFailures",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "rpc.rpc.SentBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logWarn": {
+              "metric": "jvm.JvmMetrics.LogWarn",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTimedWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcCount": {
+              "metric": "jvm.JvmMetrics.GcCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "rpc.rpc.ReceivedBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "jvm.JvmMetrics.ThreadsBlocked",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "rpc.rpc.RpcQueueTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/AllocatedContainers": {
+              "metric": "yarn.NodeManagerMetrics.AllocatedContainers",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/AllocatedGB": {
+              "metric": "yarn.NodeManagerMetrics.AllocatedGB",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/AvailableGB": {
+              "metric": "yarn.NodeManagerMetrics.AvailableGB",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/AllocatedVCores": {
+              "metric": "yarn.NodeManagerMetrics.AllocatedVCores",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/AvailableVCores": {
+              "metric": "yarn.NodeManagerMetrics.AvailableVCores",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/BadLocalDirs": {
+              "metric": "yarn.NodeManagerMetrics.BadLocalDirs",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/BadLogDirs": {
+              "metric": "yarn.NodeManagerMetrics.BadLogDirs",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainerLaunchDurationAvgTime": {
+              "metric": "yarn.NodeManagerMetrics.ContainerLaunchDurationAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainerLaunchDurationNumOps": {
+              "metric": "yarn.NodeManagerMetrics.ContainerLaunchDurationNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersCompleted": {
+              "metric": "yarn.NodeManagerMetrics.ContainersCompleted",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersFailed": {
+              "metric": "yarn.NodeManagerMetrics.ContainersFailed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersIniting": {
+              "metric": "yarn.NodeManagerMetrics.ContainersIniting",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersKilled": {
+              "metric": "yarn.NodeManagerMetrics.ContainersKilled",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersLaunched": {
+              "metric": "yarn.NodeManagerMetrics.ContainersLaunched",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersRunning": {
+              "metric": "yarn.NodeManagerMetrics.ContainersRunning",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/GoodLocalDirsDiskUtilizationPerc": {
+              "metric": "yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/GoodLogDirsDiskUtilizationPerc": {
+              "metric": "yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "rpc.rpc.NumOpenConnections",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/mapred/ShuffleOutputsOK": {
+              "metric": "mapred.ShuffleMetrics.ShuffleOutputsOK",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcTimeMillis": {
+              "metric": "jvm.JvmMetrics.GcTimeMillis",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTerminated": {
+              "metric": "jvm.JvmMetrics.ThreadsTerminated",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/callQueueLen": {
+              "metric": "rpc.rpc.CallQueueLength",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logInfo": {
+              "metric": "jvm.JvmMetrics.LogInfo",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_num_ops": {
+              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/mapred/ShuffleConnections": {
+              "metric": "mapred.ShuffleMetrics.ShuffleConnections",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/logError": {
+              "metric": "jvm.JvmMetrics.LogError",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationSuccesses": {
+              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logFatal": {
+              "metric": "jvm.JvmMetrics.LogFatal",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_avg_time": {
+              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/mapred/ShuffleOutputBytes": {
+              "metric": "mapred.ShuffleMetrics.ShuffleOutputBytes",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationSuccesses": {
+              "metric": "rpc.rpc.RpcAuthenticationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationFailures": {
+              "metric": "rpc.rpc.RpcAuthenticationFailures",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcCountCopy": {
+              "metric": "jvm.JvmMetrics.GcCountCopy",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcCountMarkSweepCompact": {
+              "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcTimeMillisCopy": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisCopy",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcTimeMillisMarkSweepCompact": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemMaxM": {
+              "metric": "jvm.JvmMetrics.MemMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemNonHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/DroppedPubAll": {
+              "metric": "metricssystem.MetricsSystem.DroppedPubAll",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumActiveSinks": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumActiveSources": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumAllSinks": {
+              "metric": "metricssystem.MetricsSystem.NumAllSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumAllSources": {
+              "metric": "metricssystem.MetricsSystem.NumAllSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/PublishAvgTime": {
+              "metric": "metricssystem.MetricsSystem.PublishAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/PublishNumOps": {
+              "metric": "metricssystem.MetricsSystem.PublishNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineAvgTime": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineDropped": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineDropped",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineNumOps": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineQsize": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineQsize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/SnapshotAvgTime": {
+              "metric": "metricssystem.MetricsSystem.SnapshotAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/SnapshotNumOps": {
+              "metric": "metricssystem.MetricsSystem.SnapshotNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpc/RpcClientBackoff": {
+              "metric": "rpc.rpc.RpcClientBackoff",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetContainerStatusesAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.GetContainerStatusesAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetContainerStatusesNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.GetContainerStatusesNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/HeartbeatAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.HeartbeatAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/HeartbeatNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.HeartbeatNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/StartContainersAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.StartContainersAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/StartContainersNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.StartContainersNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/StopContainersAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.StopContainersAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/StopContainersNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.StopContainersNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/UgiMetrics/GetGroupsAvgTime": {
+              "metric": "ugi.UgiMetrics.GetGroupsAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/UgiMetrics/GetGroupsNumOps": {
+              "metric": "ugi.UgiMetrics.GetGroupsNumOps",
+              "pointInTime": true,
+              "temporal": true
+            }
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/cpu/cpu_idle": {
+              "metric": "cpu_idle",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/cpu/cpu_nice": {
+              "metric": "cpu_nice",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/cpu/cpu_system": {
+              "metric": "cpu_system",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/cpu/cpu_user": {
+              "metric": "cpu_user",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/cpu/cpu_wio": {
+              "metric": "cpu_wio",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/disk/disk_free": {
+              "metric": "disk_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/disk/disk_total": {
+              "metric": "disk_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/load/load_fifteen": {
+              "metric": "load_fifteen",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/load/load_five": {
+              "metric": "load_five",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/load/load_one": {
+              "metric": "load_one",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/memory/mem_buffered": {
+              "metric": "mem_buffered",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/memory/mem_cached": {
+              "metric": "mem_cached",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/memory/mem_free": {
+              "metric": "mem_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/memory/mem_shared": {
+              "metric": "mem_shared",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/memory/mem_total": {
+              "metric": "mem_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/memory/swap_free": {
+              "metric": "swap_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/network/bytes_in": {
+              "metric": "bytes_in",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/network/bytes_out": {
+              "metric": "bytes_out",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/network/pkts_in": {
+              "metric": "pkts_in",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/network/pkts_out": {
+              "metric": "pkts_out",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/process/proc_run": {
+              "metric": "proc_run",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/process/proc_total": {
+              "metric": "proc_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/disk/read_count": {
+              "metric": "read_count",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/disk/write_count": {
+              "metric": "write_count",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/disk/read_bytes": {
+              "metric": "read_bytes",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/disk/write_bytes": {
+              "metric": "write_bytes",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/disk/read_time": {
+              "metric": "read_time",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/disk/write_time": {
+              "metric": "write_time",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/mapred/ShuffleOutputsFailed": {
+              "metric": "mapred.ShuffleMetrics.ShuffleOutputsFailed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "jvm.JvmMetrics.ThreadsRunnable",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "jvm.JvmMetrics.ThreadsNew",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "rpc.rpc.RpcAuthorizationFailures",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "rpc.rpc.SentBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logWarn": {
+              "metric": "jvm.JvmMetrics.LogWarn",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTimedWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcCount": {
+              "metric": "jvm.JvmMetrics.GcCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "rpc.rpc.ReceivedBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "jvm.JvmMetrics.ThreadsBlocked",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "rpc.rpc.RpcQueueTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/AllocatedContainers": {
+              "metric": "yarn.NodeManagerMetrics.AllocatedContainers",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/AllocatedGB": {
+              "metric": "yarn.NodeManagerMetrics.AllocatedGB",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/AvailableGB": {
+              "metric": "yarn.NodeManagerMetrics.AvailableGB",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/AllocatedVCores": {
+              "metric": "yarn.NodeManagerMetrics.AllocatedVCores",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/AvailableVCores": {
+              "metric": "yarn.NodeManagerMetrics.AvailableVCores",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/BadLocalDirs": {
+              "metric": "yarn.NodeManagerMetrics.BadLocalDirs",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/BadLogDirs": {
+              "metric": "yarn.NodeManagerMetrics.BadLogDirs",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainerLaunchDurationAvgTime": {
+              "metric": "yarn.NodeManagerMetrics.ContainerLaunchDurationAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainerLaunchDurationNumOps": {
+              "metric": "yarn.NodeManagerMetrics.ContainerLaunchDurationNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersCompleted": {
+              "metric": "yarn.NodeManagerMetrics.ContainersCompleted",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersFailed": {
+              "metric": "yarn.NodeManagerMetrics.ContainersFailed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersIniting": {
+              "metric": "yarn.NodeManagerMetrics.ContainersIniting",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersKilled": {
+              "metric": "yarn.NodeManagerMetrics.ContainersKilled",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersLaunched": {
+              "metric": "yarn.NodeManagerMetrics.ContainersLaunched",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersRunning": {
+              "metric": "yarn.NodeManagerMetrics.ContainersRunning",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/GoodLocalDirsDiskUtilizationPerc": {
+              "metric": "yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/GoodLogDirsDiskUtilizationPerc": {
+              "metric": "yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "rpc.rpc.NumOpenConnections",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/mapred/ShuffleOutputsOK": {
+              "metric": "mapred.ShuffleMetrics.ShuffleOutputsOK",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcTimeMillis": {
+              "metric": "jvm.JvmMetrics.GcTimeMillis",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTerminated": {
+              "metric": "jvm.JvmMetrics.ThreadsTerminated",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/callQueueLen": {
+              "metric": "rpc.rpc.CallQueueLength",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logInfo": {
+              "metric": "jvm.JvmMetrics.LogInfo",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_num_ops": {
+              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/mapred/ShuffleConnections": {
+              "metric": "mapred.ShuffleMetrics.ShuffleConnections",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/logError": {
+              "metric": "jvm.JvmMetrics.LogError",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationSuccesses": {
+              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logFatal": {
+              "metric": "jvm.JvmMetrics.LogFatal",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_avg_time": {
+              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/mapred/ShuffleOutputBytes": {
+              "metric": "mapred.ShuffleMetrics.ShuffleOutputBytes",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationSuccesses": {
+              "metric": "rpc.rpc.RpcAuthenticationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationFailures": {
+              "metric": "rpc.rpc.RpcAuthenticationFailures",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcCountCopy": {
+              "metric": "jvm.JvmMetrics.GcCountCopy",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcCountMarkSweepCompact": {
+              "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcTimeMillisCopy": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisCopy",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcTimeMillisMarkSweepCompact": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemMaxM": {
+              "metric": "jvm.JvmMetrics.MemMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemNonHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/DroppedPubAll": {
+              "metric": "metricssystem.MetricsSystem.DroppedPubAll",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumActiveSinks": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumActiveSources": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumAllSinks": {
+              "metric": "metricssystem.MetricsSystem.NumAllSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumAllSources": {
+              "metric": "metricssystem.MetricsSystem.NumAllSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/PublishAvgTime": {
+              "metric": "metricssystem.MetricsSystem.PublishAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/PublishNumOps": {
+              "metric": "metricssystem.MetricsSystem.PublishNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineAvgTime": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineDropped": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineDropped",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineNumOps": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineQsize": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineQsize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/SnapshotAvgTime": {
+              "metric": "metricssystem.MetricsSystem.SnapshotAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/SnapshotNumOps": {
+              "metric": "metricssystem.MetricsSystem.SnapshotNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpc/RpcClientBackoff": {
+              "metric": "rpc.rpc.RpcClientBackoff",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetContainerStatusesAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.GetContainerStatusesAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetContainerStatusesNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.GetContainerStatusesNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/HeartbeatAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.HeartbeatAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/HeartbeatNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.HeartbeatNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/StartContainersAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.StartContainersAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/StartContainersNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.StartContainersNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/StopContainersAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.StopContainersAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/StopContainersNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.StopContainersNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/UgiMetrics/GetGroupsAvgTime": {
+              "metric": "ugi.UgiMetrics.GetGroupsAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/UgiMetrics/GetGroupsNumOps": {
+              "metric": "ugi.UgiMetrics.GetGroupsNumOps",
+              "pointInTime": true,
+              "temporal": true
+            }
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "default": {
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapCommittedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsRunnable",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsNew",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memNonHeapCommittedM": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/callQueueLen": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.CallQueueLength",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthorizationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcQueueTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginSuccessAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.SentBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/logInfo": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogInfo",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/logWarn": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogWarn",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ugi/loginFailure_num_ops": {
+              "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginFailureNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsTimedWaiting": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTime_num_ops": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcProcessingTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/gcCount": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.GcCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.ReceivedBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsBlocked",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcQueueTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/logError": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogError",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ugi/loginFailure_avg_time": {
+              "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginFailureAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthorizationSuccesses": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthorizationSuccesses",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.NumOpenConnections",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memHeapUsedM": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/logFatal": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogFatal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTime_avg_time": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcProcessingTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsWaiting": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsWaiting",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/gcTimeMillis": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ugi/loginSuccess_num_ops": {
+              "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginSuccessNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthenticationSuccesses": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthenticationSuccesses",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthenticationFailures": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthenticationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsTerminated": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsTerminated",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/nodemanager/GoodLocalDirsDiskUtilizationPerc": {
+              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/nodemanager/GoodLogDirsDiskUtilizationPerc": {
+              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/nodemanager/AllocatedGB": {
+              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.AllocatedGB",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/nodemanager/AllocatedVCores": {
+              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.AllocatedVCores",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/nodemanager/BadLocalDirs": {
+              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.BadLocalDirs",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/nodemanager/BadLogDirs": {
+              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.BadLogDirs",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/nodemanager/ContainersFailed": {
+              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.ContainersFailed",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/nodemanager/AllocatedContainers": {
+              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.AllocatedContainers",
+              "pointInTime": true,
+              "temporal": false
+            }
+          }
+        }
+      }
+    ]
+  },
+  "RESOURCEMANAGER": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/rpcdetailed/FinishApplicationMasterNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsCompleted": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AppsCompleted",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
+              "metric": "yarn.ClusterMetrics.NumUnhealthyNMs",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "jvm.JvmMetrics.ThreadsRunnable",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
+              "metric": "yarn.ClusterMetrics.NumRebootedNMs",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "jvm.JvmMetrics.ThreadsNew",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsSubmitted": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AppsSubmitted",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/ClusterMetrics/NumLostNMs": {
+              "metric": "yarn.ClusterMetrics.NumLostNMs",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedContainers": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedContainers",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "rpc.rpc.SentBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsKilled": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AppsKilled",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/ClusterMetrics/NumActiveNMs": {
+              "metric": "yarn.ClusterMetrics.NumActiveNMs",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logWarn": {
+              "metric": "jvm.JvmMetrics.LogWarn",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTimedWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsFailed": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AppsFailed",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcCount": {
+              "metric": "jvm.JvmMetrics.GcCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "rpc.rpc.ReceivedBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/AllocateNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.AllocateNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/gcCountMarkSweepCompact": {
+              "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "jvm.JvmMetrics.ThreadsBlocked",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsRunning": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AppsRunning",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ActiveApplications": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).ActiveApplications",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AMResourceLimitMB": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AMResourceLimitMB",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AMResourceLimitVCores": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AMResourceLimitVCores",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ActiveUsers": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).ActiveUsers",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AggregateContainersAllocated": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AggregateContainersAllocated",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AggregateContainersReleased": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AggregateContainersReleased",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedVCores": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedVCores",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppAttemptFirstContainerAllocationDelayAvgTime": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AppAttemptFirstContainerAllocationDelayAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppAttemptFirstContainerAllocationDelayNumOps": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AppAttemptFirstContainerAllocationDelayNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AvailableVCores": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AvailableVCores",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingVCores": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).PendingVCores",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ReservedMB": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).ReservedMB",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ReservedVCores": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).ReservedVCores",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/UsedAMResourceMB": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).UsedAMResourceMB",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/UsedAMResourceVCores": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).UsedAMResourceVCores",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/running_0": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).running_0",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/running_1440": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).running_1440",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/running_300": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).running_300",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/running_60": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).running_60",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/ClusterMetrics/NumDecommissionedNMs": {
+              "metric": "yarn.ClusterMetrics.NumDecommissionedNMs",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "rpc.rpc.RpcQueueTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "rpc.rpc.NumOpenConnections",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcTimeMillisCopy": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisCopy",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/gcTimeMillis": {
+              "metric": "jvm.JvmMetrics.GcTimeMillis",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingContainers": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).PendingContainers",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memMaxM": {
+              "metric": "jvm.JvmMetrics.MemMaxM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTerminated": {
+              "metric": "jvm.JvmMetrics.ThreadsTerminated",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/AllocateAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.AllocateAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/GetApplicationReportNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.GetApplicationReportNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/FinishApplicationMasterAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/callQueueLen": {
+              "metric": "rpc.rpc.CallQueueLength",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/RegisterApplicationMasterNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AvailableMB": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AvailableMB",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingMB": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).PendingMB",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logInfo": {
+              "metric": "jvm.JvmMetrics.LogInfo",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_num_ops": {
+              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/SubmitApplicationAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.SubmitApplicationAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/GetNewApplicationNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.GetNewApplicationNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsPending": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AppsPending",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcCountCopy": {
+              "metric": "jvm.JvmMetrics.GcCountCopy",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/logError": {
+              "metric": "jvm.JvmMetrics.LogError",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/SubmitApplicationNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.SubmitApplicationNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/gcTimeMillisMarkSweepCompact": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationSuccesses": {
+              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedMB": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedMB",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logFatal": {
+              "metric": "jvm.JvmMetrics.LogFatal",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_avg_time": {
+              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/GetApplicationReportAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.GetApplicationReportAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/NodeHeartbeatAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.NodeHeartbeatAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/GetNewApplicationAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.GetNewApplicationAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/RegisterApplicationMasterAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ReservedContainers": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).ReservedContainers",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcCountMarkSweepCompact": {
+              "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemNonHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/ThreadsBlocked": {
+              "metric": "jvm.JvmMetrics.ThreadsBlocked",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/DroppedPubAll": {
+              "metric": "metricssystem.MetricsSystem.DroppedPubAll",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumActiveSinks": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumActiveSources": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumAllSinks": {
+              "metric": "metricssystem.MetricsSystem.NumAllSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumAllSources": {
+              "metric": "metricssystem.MetricsSystem.NumAllSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/PublishAvgTime": {
+              "metric": "metricssystem.MetricsSystem.PublishAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/PublishNumOps": {
+              "metric": "metricssystem.MetricsSystem.PublishNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineAvgTime": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineDropped": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineDropped",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineNumOps": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineQsize": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineQsize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/SnapshotAvgTime": {
+              "metric": "metricssystem.MetricsSystem.SnapshotAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/SnapshotNumOps": {
+              "metric": "metricssystem.MetricsSystem.SnapshotNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpc/RpcAuthenticationFailures": {
+              "metric": "rpc.rpc.RpcAuthenticationFailures",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpc/RpcAuthenticationSuccesses": {
+              "metric": "rpc.rpc.RpcAuthenticationSuccesses",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpc/RpcAuthorizationFailures": {
+              "metric": "rpc.rpc.RpcAuthorizationFailures",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpc/RpcClientBackoff": {
+              "metric": "rpc.rpc.RpcClientBackoff",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/AllocateNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.AllocateNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetClusterMetricsAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.GetClusterMetricsAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetClusterMetricsNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.GetClusterMetricsNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetClusterNodesAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.GetClusterNodesAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetClusterNodesNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.GetClusterNodesNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetQueueInfoAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.GetQueueInfoAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetQueueInfoNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.GetQueueInfoNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetQueueUserAclsAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.GetQueueUserAclsAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetQueueUserAclsNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.GetQueueUserAclsNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/NodeHeartbeatNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.NodeHeartbeatNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/RegisterNodeManagerAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.RegisterNodeManagerAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/RegisterNodeManagerNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.RegisterNodeManagerNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/UgiMetrics/GetGroupsAvgTime": {
+              "metric": "ugi.UgiMetrics.GetGroupsAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/UgiMetrics/GetGroupsNumOps": {
+              "metric": "ugi.UgiMetrics.GetGroupsNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ClusterMetrics/AMLaunchDelayAvgTime": {
+              "metric": "yarn.ClusterMetrics.AMLaunchDelayAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ClusterMetrics/AMLaunchDelayNumOps": {
+              "metric": "yarn.ClusterMetrics.AMLaunchDelayNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ClusterMetrics/AMRegisterDelayAvgTime": {
+              "metric": "yarn.ClusterMetrics.AMRegisterDelayAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ClusterMetrics/AMRegisterDelayNumOps": {
+              "metric": "yarn.ClusterMetrics.AMRegisterDelayNumOps",
+              "pointInTime": true,
+              "temporal": true
+            }
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "default": {
+            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsFailed": {
+              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsFailed",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/rm_metrics/cluster/rebootedNMcount": {
+              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumRebootedNMs",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemHeapCommittedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
+              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumUnhealthyNMs",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/HeapMemoryMax": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/HeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/NonHeapMemoryMax": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/NonHeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsRunnable",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsNew",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
+              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumRebootedNMs",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/runtime/StartTime": {
+              "metric": "java.lang:type=Runtime.StartTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsKilled": {
+              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsKilled",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthorizationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AggregateContainersAllocated": {
+              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AggregateContainersAllocated",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/ClusterMetrics/NumLostNMs": {
+              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumLostNMs",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginSuccessAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/StartTime": {
+              "metric": "java.lang:type=Runtime.StartTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedContainers": {
+              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedContainers",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsSubmitted": {
+              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsSubmitted",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.SentBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/ClusterMetrics/NumActiveNMs": {
+              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumActiveNMs",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_300": {
+              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_300",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/logWarn": {
+              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogWarn",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsTimedWaiting": {
+              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/gcCount": {
+              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.GcCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.ReceivedBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsBlocked",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_60": {
+              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_60",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/ClusterMetrics/NumDecommissionedNMs": {
+          

<TRUNCATED>

[10/52] bigtop git commit: ODPI-5. Integrate Ambari packaging into Bigtop

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/params.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/params.py
new file mode 100755
index 0000000..fc2c61f
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/params.py
@@ -0,0 +1,318 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
+from ambari_commons.os_check import OSCheck
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+
+config = Script.get_config()
+
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+dfs_type = default("/commandParams/dfs_type", "")
+hadoop_conf_dir = "/etc/hadoop/conf"
+
+component_list = default("/localComponents", [])
+
+hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']
+
+# hadoop default params
+mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
+hadoop_lib_home = stack_select.get_hadoop_dir("lib")
+hadoop_bin = stack_select.get_hadoop_dir("sbin")
+hadoop_home = '/usr'
+create_lib_snappy_symlinks = True
+
+# HDP 2.2+ params
+if Script.is_stack_greater_or_equal("2.2"):
+  mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
+  hadoop_home = stack_select.get_hadoop_dir("home")
+  create_lib_snappy_symlinks = False
+  
+current_service = config['serviceName']
+
+#security params
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+#users and groups
+has_hadoop_env = 'hadoop-env' in config['configurations']
+mapred_user = config['configurations']['mapred-env']['mapred_user']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+yarn_user = config['configurations']['yarn-env']['yarn_user']
+
+user_group = config['configurations']['cluster-env']['user_group']
+
+#hosts
+hostname = config["hostname"]
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
+
+has_namenode = not len(namenode_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_slaves = not len(slave_hosts) == 0
+has_oozie_server = not len(oozie_servers) == 0
+has_hcat_server_host = not len(hcat_server_hosts) == 0
+has_hive_server_host = not len(hive_server_host) == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_zk_host = not len(zk_hosts) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_metric_collector = not len(ams_collector_hosts) == 0
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+if has_metric_collector:
+  if 'cluster-env' in config['configurations'] and \
+      'metrics_collector_vip_host' in config['configurations']['cluster-env']:
+    metric_collector_host = config['configurations']['cluster-env']['metrics_collector_vip_host']
+  else:
+    metric_collector_host = ams_collector_hosts[0]
+  if 'cluster-env' in config['configurations'] and \
+      'metrics_collector_vip_port' in config['configurations']['cluster-env']:
+    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
+  else:
+    metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "localhost:6188")
+    if metric_collector_web_address.find(':') != -1:
+      metric_collector_port = metric_collector_web_address.split(':')[1]
+    else:
+      metric_collector_port = '6188'
+  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+    metric_collector_protocol = 'https'
+  else:
+    metric_collector_protocol = 'http'
+  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
+
+  pass
+metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
+metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
+
+#hadoop params
+
+if has_namenode or dfs_type == 'HCFS':
+  hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
+  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+  task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
+
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hbase_tmp_dir = "/tmp/hbase-hbase"
+#db params
+server_db_name = config['hostLevelParams']['db_name']
+db_driver_filename = config['hostLevelParams']['db_driver_filename']
+oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
+mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
+ambari_server_resources = config['hostLevelParams']['jdk_location']
+oracle_driver_symlink_url = format("{ambari_server_resources}oracle-jdbc-driver.jar")
+mysql_driver_symlink_url = format("{ambari_server_resources}mysql-jdbc-driver.jar")
+
+ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url'][0]
+ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0]
+ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0]
+ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]
+
+if has_namenode and 'rca_enabled' in config['configurations']['hadoop-env']:
+  rca_enabled =  config['configurations']['hadoop-env']['rca_enabled']
+else:
+  rca_enabled = False
+rca_disabled_prefix = "###"
+if rca_enabled == True:
+  rca_prefix = ""
+else:
+  rca_prefix = rca_disabled_prefix
+
+#hadoop-env.sh
+java_home = config['hostLevelParams']['java_home']
+
+jsvc_path = "/usr/lib/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
+namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
+
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize =  "1024m"
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+
+#log4j.properties
+
+yarn_log_dir_prefix = default("/configurations/yarn-env/yarn_log_dir_prefix","/var/log/hadoop-yarn")
+
+dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
+
+#log4j.properties
+if (('hdfs-log4j' in config['configurations']) and ('content' in config['configurations']['hdfs-log4j'])):
+  log4j_props = config['configurations']['hdfs-log4j']['content']
+  if (('yarn-log4j' in config['configurations']) and ('content' in config['configurations']['yarn-log4j'])):
+    log4j_props += config['configurations']['yarn-log4j']['content']
+else:
+  log4j_props = None
+
+refresh_topology = False
+command_params = config["commandParams"] if "commandParams" in config else None
+if command_params is not None:
+  refresh_topology = bool(command_params["refresh_topology"]) if "refresh_topology" in command_params else False
+  
+ambari_libs_dir = "/var/lib/ambari-agent/lib"
+is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+#host info
+all_hosts = default("/clusterHostInfo/all_hosts", [])
+all_racks = default("/clusterHostInfo/all_racks", [])
+all_ipv4_ips = default("/clusterHostInfo/all_ipv4_ips", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+
+#topology files
+net_topology_script_file_path = "/etc/hadoop/conf/topology_script.py"
+net_topology_script_dir = os.path.dirname(net_topology_script_file_path)
+net_topology_mapping_data_file_name = 'topology_mappings.data'
+net_topology_mapping_data_file_path = os.path.join(net_topology_script_dir, net_topology_mapping_data_file_name)
+
+#Added logic to create /tmp and /user directory for HCFS stack.  
+has_core_site = 'core-site' in config['configurations']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+kinit_path_local = get_kinit_path()
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
+smoke_hdfs_user_dir = format("/user/{smoke_user}")
+smoke_hdfs_user_mode = 0770
+
+
+##### Namenode RPC ports - metrics config section start #####
+
+# Figure out the rpc ports for current namenode
+nn_rpc_client_port = None
+nn_rpc_dn_port = None
+nn_rpc_healthcheck_port = None
+
+namenode_id = None
+namenode_rpc = None
+
+dfs_ha_enabled = False
+dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
+if dfs_ha_nameservices is None:
+  dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None)
+dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+
+dfs_ha_namemodes_ids_list = []
+other_namenode_id = None
+
+if dfs_ha_namenode_ids:
+ dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
+ dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
+ if dfs_ha_namenode_ids_array_len > 1:
+   dfs_ha_enabled = True
+
+if dfs_ha_enabled:
+ for nn_id in dfs_ha_namemodes_ids_list:
+   nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
+   if hostname in nn_host:
+     namenode_id = nn_id
+     namenode_rpc = nn_host
+   pass
+ pass
+else:
+ namenode_rpc = default('/configurations/hdfs-site/dfs.namenode.rpc-address', None)
+
+if namenode_rpc:
+ nn_rpc_client_port = namenode_rpc.split(':')[1].strip()
+
+if dfs_ha_enabled:
+ dfs_service_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.servicerpc-address.{dfs_ha_nameservices}.{namenode_id}'), None)
+ dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address.{dfs_ha_nameservices}.{namenode_id}'), None)
+else:
+ dfs_service_rpc_address = default('/configurations/hdfs-site/dfs.namenode.servicerpc-address', None)
+ dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address'), None)
+
+if dfs_service_rpc_address:
+ nn_rpc_dn_port = dfs_service_rpc_address.split(':')[1].strip()
+
+if dfs_lifeline_rpc_address:
+ nn_rpc_healthcheck_port = dfs_lifeline_rpc_address.split(':')[1].strip()
+
+is_nn_client_port_configured = False if nn_rpc_client_port is None else True
+is_nn_dn_port_configured = False if nn_rpc_dn_port is None else True
+is_nn_healthcheck_port_configured = False if nn_rpc_healthcheck_port is None else True
+
+##### end #####
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
+)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/rack_awareness.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/rack_awareness.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/rack_awareness.py
new file mode 100755
index 0000000..548f051
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/rack_awareness.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management.core.resources import File
+from resource_management.core.source import StaticFile, Template
+from resource_management.libraries.functions import format
+
+
+def create_topology_mapping():
+  import params
+
+  File(params.net_topology_mapping_data_file_path,
+       content=Template("topology_mappings.data.j2"),
+       owner=params.hdfs_user,
+       group=params.user_group,
+       only_if=format("test -d {net_topology_script_dir}"))
+
+def create_topology_script():
+  import params
+
+  File(params.net_topology_script_file_path,
+       content=StaticFile('topology_script.py'),
+       mode=0755,
+       only_if=format("test -d {net_topology_script_dir}"))
+
+def create_topology_script_and_mapping():
+  import params
+  if params.has_hadoop_env:
+    create_topology_mapping()
+    create_topology_script()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/shared_initialization.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/shared_initialization.py
new file mode 100755
index 0000000..ba9c8fb
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/shared_initialization.py
@@ -0,0 +1,175 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
+
+from resource_management import *
+
+def setup_hadoop():
+  """
+  Setup hadoop files and directories
+  """
+  import params
+
+  Execute(("setenforce","0"),
+          only_if="test -f /selinux/enforce",
+          not_if="(! which getenforce ) || (which getenforce && getenforce | grep -q Disabled)",
+          sudo=True,
+  )
+
+  #directories
+  if params.has_namenode or params.dfs_type == 'HCFS':
+    Directory(params.hdfs_log_dir_prefix,
+              create_parents = True,
+              owner='root',
+              group=params.user_group,
+              mode=0775,
+              cd_access='a',
+    )
+    if params.has_namenode:
+      Directory(params.hadoop_pid_dir_prefix,
+              create_parents = True,
+              owner='root',
+              group='root',
+              cd_access='a',
+      )
+    Directory(params.hadoop_tmp_dir,
+              create_parents = True,
+              owner=params.hdfs_user,
+              cd_access='a',
+              )
+  #files
+    if params.security_enabled:
+      tc_owner = "root"
+    else:
+      tc_owner = params.hdfs_user
+      
+    # if WebHDFS is not enabled we need this jar to create hadoop folders.
+    if params.host_sys_prepped:
+      print "Skipping copying of fast-hdfs-resource.jar as host is sys prepped"
+    elif params.dfs_type == 'HCFS' or not WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
+      # for source-code of jar goto contrib/fast-hdfs-resource
+      File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"),
+           mode=0644,
+           content=StaticFile("fast-hdfs-resource.jar")
+      )
+      
+    if os.path.exists(params.hadoop_conf_dir):
+      File(os.path.join(params.hadoop_conf_dir, 'commons-logging.properties'),
+           owner=tc_owner,
+           content=Template('commons-logging.properties.j2')
+      )
+
+      health_check_template_name = "health_check"
+      File(os.path.join(params.hadoop_conf_dir, health_check_template_name),
+           owner=tc_owner,
+           content=Template(health_check_template_name + ".j2")
+      )
+
+      log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties")
+      if (params.log4j_props != None):
+        File(log4j_filename,
+             mode=0644,
+             group=params.user_group,
+             owner=params.hdfs_user,
+             content=params.log4j_props
+        )
+      elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))):
+        File(log4j_filename,
+             mode=0644,
+             group=params.user_group,
+             owner=params.hdfs_user,
+        )
+
+      File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
+           owner=params.hdfs_user,
+           group=params.user_group,
+           content=Template("hadoop-metrics2.properties.j2")
+      )
+
+    if params.dfs_type == 'HCFS' and params.has_core_site and 'ECS_CLIENT' in params.component_list:
+       create_dirs()
+
+
+def setup_configs():
+  """
+  Creates configs for services HDFS mapred
+  """
+  import params
+
+  if params.has_namenode or params.dfs_type == 'HCFS':
+    if os.path.exists(params.hadoop_conf_dir):
+      File(params.task_log4j_properties_location,
+           content=StaticFile("task-log4j.properties"),
+           mode=0755
+      )
+
+    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
+      File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
+           owner=params.hdfs_user,
+           group=params.user_group
+      )
+    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
+      File(os.path.join(params.hadoop_conf_dir, 'masters'),
+                owner=params.hdfs_user,
+                group=params.user_group
+      )
+
+  generate_include_file()
+
+
+def generate_include_file():
+  import params
+
+  if params.has_namenode and params.dfs_hosts and params.has_slaves:
+    include_hosts_list = params.slave_hosts
+    File(params.dfs_hosts,
+         content=Template("include_hosts_list.j2"),
+         owner=params.hdfs_user,
+         group=params.user_group
+    )
+
+def create_javahome_symlink():
+  if os.path.exists("/usr/jdk/jdk1.6.0_31") and not os.path.exists("/usr/jdk64/jdk1.6.0_31"):
+    Directory("/usr/jdk64/",
+         create_parents = True,
+    )
+    Link("/usr/jdk/jdk1.6.0_31",
+         to="/usr/jdk64/jdk1.6.0_31",
+    )
+
+def create_dirs():
+   import params
+   params.HdfsResource(params.hdfs_tmp_dir,
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.hdfs_user,
+                       mode=0777
+   )
+   params.HdfsResource(params.smoke_hdfs_user_dir,
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.smoke_user,
+                       mode=params.smoke_hdfs_user_mode
+   )
+   params.HdfsResource(None,
+                      action="execute"
+   )
+

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/commons-logging.properties.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/commons-logging.properties.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/commons-logging.properties.j2
new file mode 100755
index 0000000..2197ba5
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/commons-logging.properties.j2
@@ -0,0 +1,43 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#Logging Implementation
+
+#Log4J
+org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
+
+#JDK Logger
+#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/exclude_hosts_list.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/exclude_hosts_list.j2
new file mode 100755
index 0000000..1adba80
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/exclude_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in hdfs_exclude_file %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
new file mode 100755
index 0000000..fcd9b23
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
@@ -0,0 +1,104 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+{% if has_ganglia_server %}
+*.period=60
+
+*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+*.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Hook up to the server
+namenode.sink.ganglia.servers={{ganglia_server_host}}:8661
+datanode.sink.ganglia.servers={{ganglia_server_host}}:8659
+jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662
+tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658
+maptask.sink.ganglia.servers={{ganglia_server_host}}:8660
+reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660
+resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664
+nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657
+historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666
+journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654
+nimbus.sink.ganglia.servers={{ganglia_server_host}}:8649
+supervisor.sink.ganglia.servers={{ganglia_server_host}}:8650
+
+resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
+
+{% endif %}
+
+{% if has_metric_collector %}
+
+*.period={{metrics_collection_period}}
+*.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
+*.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+*.sink.timeline.period={{metrics_collection_period}}
+*.sink.timeline.sendInterval={{metrics_report_interval}}000
+*.sink.timeline.slave.host.name = {{hostname}}
+
+# HTTPS properties
+*.sink.timeline.truststore.path = {{metric_truststore_path}}
+*.sink.timeline.truststore.type = {{metric_truststore_type}}
+*.sink.timeline.truststore.password = {{metric_truststore_password}}
+
+datanode.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+namenode.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+resourcemanager.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+nodemanager.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+jobhistoryserver.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+journalnode.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+maptask.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+reducetask.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+applicationhistoryserver.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+
+resourcemanager.sink.timeline.tagsForPrefix.yarn=Queue
+
+{% if is_nn_client_port_configured %}
+# Namenode rpc ports customization
+namenode.sink.timeline.metric.rpc.client.port={{nn_rpc_client_port}}
+{% endif %}
+{% if is_nn_dn_port_configured %}
+namenode.sink.timeline.metric.rpc.datanode.port={{nn_rpc_dn_port}}
+{% endif %}
+{% if is_nn_healthcheck_port_configured %}
+namenode.sink.timeline.metric.rpc.healthcheck.port={{nn_rpc_healthcheck_port}}
+{% endif %}
+
+{% endif %}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/health_check.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/health_check.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/health_check.j2
new file mode 100755
index 0000000..0a03d17
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/health_check.j2
@@ -0,0 +1,81 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#!/bin/bash
+#
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+err=0;
+
+function check_disks {
+
+  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
+    fsdev=""
+    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
+    if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then
+      msg_="$msg_ $m(u)"
+    else
+      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
+    fi
+  done
+
+  if [ -z "$msg_" ] ; then
+    echo "disks ok" ; exit 0
+  else
+    echo "$msg_" ; exit 2
+  fi
+
+}
+
+# Run all checks
+for check in disks ; do
+  msg=`check_${check}` ;
+  if [ $? -eq 0 ] ; then
+    ok_msg="$ok_msg$msg,"
+  else
+    err_msg="$err_msg$msg,"
+  fi
+done
+
+if [ ! -z "$err_msg" ] ; then
+  echo -n "ERROR $err_msg "
+fi
+if [ ! -z "$ok_msg" ] ; then
+  echo -n "OK: $ok_msg"
+fi
+
+echo
+
+# Success!
+exit 0

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/include_hosts_list.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/include_hosts_list.j2
new file mode 100755
index 0000000..4a9e713
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in slave_hosts %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/topology_mappings.data.j2
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/topology_mappings.data.j2 b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/topology_mappings.data.j2
new file mode 100755
index 0000000..15034d6
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/templates/topology_mappings.data.j2
@@ -0,0 +1,24 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+    #
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+[network_topology]
+{% for host in all_hosts %}
+{% if host in slave_hosts %}
+{{host}}={{all_racks[loop.index-1]}}
+{{all_ipv4_ips[loop.index-1]}}={{all_racks[loop.index-1]}}
+{% endif %}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/metainfo.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/metainfo.xml
new file mode 100755
index 0000000..ca45822
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/metainfo.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <versions>
+	  <active>true</active>
+    </versions>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_features.json b/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_features.json
new file mode 100755
index 0000000..8c838db
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_features.json
@@ -0,0 +1,308 @@
+{
+  "stack_features": [
+    {
+      "name": "snappy",
+      "description": "Snappy compressor/decompressor support",
+      "min_version": "2.0.0.0",
+      "max_version": "2.2.0.0"
+    },
+    {
+      "name": "lzo",
+      "description": "LZO libraries support",
+      "min_version": "2.2.1.0"
+    },
+    {
+      "name": "express_upgrade",
+      "description": "Express upgrade support",
+      "min_version": "2.1.0.0"
+    },
+    {
+      "name": "rolling_upgrade",
+      "description": "Rolling upgrade support",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "config_versioning",
+      "description": "Configurable versions support",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "datanode_non_root",
+      "description": "DataNode running as non-root support (AMBARI-7615)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "remove_ranger_hdfs_plugin_env",
+      "description": "HDFS removes Ranger env files (AMBARI-14299)",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "ranger",
+      "description": "Ranger Service support",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "ranger_tagsync_component",
+      "description": "Ranger Tagsync component support (AMBARI-14383)",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "phoenix",
+      "description": "Phoenix Service support",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "nfs",
+      "description": "NFS support",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "tez_for_spark",
+      "description": "Tez dependency for Spark",
+      "min_version": "2.2.0.0",
+      "max_version": "2.3.0.0"
+    },
+    {
+      "name": "timeline_state_store",
+      "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "copy_tarball_to_hdfs",
+      "description": "Copy tarball to HDFS support (AMBARI-12113)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "spark_16plus",
+      "description": "Spark 1.6+",
+      "min_version": "2.4.0.0"
+    },
+    {
+      "name": "spark_thriftserver",
+      "description": "Spark Thrift Server",
+      "min_version": "2.3.2.0"
+    },
+    {
+      "name": "storm_kerberos",
+      "description": "Storm Kerberos support (AMBARI-7570)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "storm_ams",
+      "description": "Storm AMS integration (AMBARI-10710)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "create_kafka_broker_id",
+      "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
+      "min_version": "2.2.0.0",
+      "max_version": "2.3.0.0"
+    },
+    {
+      "name": "kafka_listeners",
+      "description": "Kafka listeners (AMBARI-10984)",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "kafka_kerberos",
+      "description": "Kafka Kerberos support (AMBARI-10984)",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "pig_on_tez",
+      "description": "Pig on Tez support (AMBARI-7863)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "ranger_usersync_non_root",
+      "description": "Ranger Usersync as non-root user (AMBARI-10416)",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "ranger_audit_db_support",
+      "description": "Ranger Audit to DB support",
+      "min_version": "2.2.0.0",
+      "max_version": "2.5.0.0"
+    },
+    {
+      "name": "accumulo_kerberos_user_auth",
+      "description": "Accumulo Kerberos User Auth (AMBARI-10163)",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "knox_versioned_data_dir",
+      "description": "Use versioned data dir for Knox (AMBARI-13164)",
+      "min_version": "2.3.2.0"
+    },
+    {
+      "name": "knox_sso_topology",
+      "description": "Knox SSO Topology support (AMBARI-13975)",
+      "min_version": "2.3.8.0"
+    },
+    {
+      "name": "atlas_rolling_upgrade",
+      "description": "Rolling upgrade support for Atlas",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "oozie_admin_user",
+      "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "oozie_create_hive_tez_configs",
+      "description": "Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "oozie_setup_shared_lib",
+      "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "oozie_host_kerberos",
+      "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
+      "min_version": "2.0.0.0",
+      "max_version": "2.2.0.0"
+    },
+    {
+      "name": "falcon_extensions",
+      "description": "Falcon Extension",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "hive_metastore_upgrade_schema",
+      "description": "Hive metastore upgrade schema support (AMBARI-11176)",
+      "min_version": "2.3.0.0"
+     },
+    {
+      "name": "hive_server_interactive",
+      "description": "Hive server interactive support (AMBARI-15573)",
+      "min_version": "2.5.0.0"
+     },
+    {
+      "name": "hive_webhcat_specific_configs",
+      "description": "Hive webhcat specific configurations support (AMBARI-12364)",
+      "min_version": "2.3.0.0"
+     },
+    {
+      "name": "hive_purge_table",
+      "description": "Hive purge table support (AMBARI-12260)",
+      "min_version": "2.3.0.0"
+     },
+    {
+      "name": "hive_server2_kerberized_env",
+      "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
+      "min_version": "2.2.3.0",
+      "max_version": "2.2.5.0"
+     },
+    {
+      "name": "hive_env_heapsize",
+      "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "ranger_kms_hsm_support",
+      "description": "Ranger KMS HSM support (AMBARI-15752)",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_log4j_support",
+      "description": "Ranger supporting log-4j properties (AMBARI-15681)",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_kerberos_support",
+      "description": "Ranger Kerberos support",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "hive_metastore_site_support",
+      "description": "Hive Metastore site support",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_usersync_password_jceks",
+      "description": "Saving Ranger Usersync credentials in jceks",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_install_infra_client",
+      "description": "Ambari Infra Service support",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "falcon_atlas_support_2_3",
+      "description": "Falcon Atlas integration support for 2.3 stack",
+      "min_version": "2.3.99.0",
+      "max_version": "2.4.0.0"
+    },
+    {
+      "name": "falcon_atlas_support",
+      "description": "Falcon Atlas integration",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "hbase_home_directory",
+      "description": "Hbase home directory in HDFS needed for HBASE backup",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "spark_livy",
+      "description": "Livy as slave component of spark",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "atlas_ranger_plugin_support",
+      "description": "Atlas Ranger plugin support",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "atlas_conf_dir_in_path",
+      "description": "Prepend the Atlas conf dir (/etc/atlas/conf) to the classpath of Storm and Falcon",
+      "min_version": "2.3.0.0",
+      "max_version": "2.4.99.99"
+    },
+    {
+      "name": "atlas_upgrade_support",
+      "description": "Atlas supports express and rolling upgrades",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "atlas_hook_support",
+      "description": "Atlas support for hooks in Hive, Storm, Falcon, and Sqoop",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_pid_support",
+      "description": "Ranger Service support pid generation AMBARI-16756",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_kms_pid_support",
+      "description": "Ranger KMS Service support pid generation",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_admin_password_change",
+      "description": "Allow ranger admin credentials to be specified during cluster creation (AMBARI-17000)",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "storm_metrics_apache_classes",
+      "description": "Metrics sink for Storm that uses Apache class names",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "spark_java_opts_support",
+      "description": "Allow Spark to generate java-opts file",
+      "min_version": "2.2.0.0",
+      "max_version": "2.4.0.0"
+    },
+    {
+      "name": "atlas_hbase_setup",
+      "description": "Use script to create Atlas tables in Hbase and set permissions for Atlas user.",
+      "min_version": "2.5.0.0"
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_tools.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_tools.json b/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_tools.json
new file mode 100755
index 0000000..d1aab4b
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/properties/stack_tools.json
@@ -0,0 +1,4 @@
+{
+  "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"],
+  "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/repos/repoinfo.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/repos/repoinfo.xml
new file mode 100755
index 0000000..ab4f25f
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/repos/repoinfo.xml
@@ -0,0 +1,33 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <os family="redhat6">
+    <repo>
+      <baseurl>http://repo.odpi.org/ODPi/1.0/centos-6/</baseurl>
+      <repoid>ODPi-1.0</repoid>
+      <reponame>ODPi</reponame>
+    </repo>
+  </os>
+  <os family="ubuntu14">
+    <repo>
+      <baseurl>http://repo.odpi.org/ODPi/1.0/ubuntu-14.04/apt</baseurl>
+      <repoid>ODPi-1.0</repoid>
+      <reponame>odpi</reponame>
+    </repo>
+  </os>
+</reposinfo>

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/role_command_order.json b/bigtop-packages/src/common/ambari/ODPi/1.0/role_command_order.json
new file mode 100755
index 0000000..ab56c7f
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/role_command_order.json
@@ -0,0 +1,41 @@
+{
+  "_comment" : "Record format:",
+  "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
+  "general_deps" : {
+    "_comment" : "dependencies for all cases",
+    "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+    "ZOOKEEPER_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
+    "ZOOKEEPER_QUORUM_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"]
+  },
+  "_comment" : "GLUSTERFS-specific dependencies",
+  "optional_glusterfs": {
+    "GLUSTERFS_SERVICE_CHECK-SERVICE_CHECK": ["PEERSTATUS-START"]
+  },
+  "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster",
+  "optional_no_glusterfs": {
+    "SECONDARY_NAMENODE-START": ["NAMENODE-START"],
+    "RESOURCEMANAGER-START": ["NAMENODE-START", "DATANODE-START"],
+    "NODEMANAGER-START": ["NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START"],
+    "HISTORYSERVER-START": ["NAMENODE-START", "DATANODE-START"],
+    "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
+        "SECONDARY_NAMENODE-START"],
+    "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
+        "RESOURCEMANAGER-START", "HISTORYSERVER-START", "YARN_SERVICE_CHECK-SERVICE_CHECK"],
+    "YARN_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+    "RESOURCEMANAGER_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START"],
+    "NAMENODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP",
+        "HISTORYSERVER-STOP"],
+    "DATANODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP",
+        "HISTORYSERVER-STOP"]
+  },
+  "_comment" : "Dependencies that are used in HA NameNode cluster",
+  "namenode_optional_ha": {
+    "NAMENODE-START": ["ZKFC-START", "JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
+    "ZKFC-START": ["ZOOKEEPER_SERVER-START"]
+  },
+  "_comment" : "Dependencies that are used in ResourceManager HA cluster",
+  "resourcemanager_optional_ha" : {
+    "RESOURCEMANAGER-START": ["ZOOKEEPER_SERVER-START"]
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HDFS/metainfo.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HDFS/metainfo.xml
new file mode 100755
index 0000000..d6e30b7
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HDFS/metainfo.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <version>2.7.1+odpi</version>
+      <extends>common-services/HDFS/2.1.0.2.0</extends>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/metainfo.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/metainfo.xml
new file mode 100755
index 0000000..6458e29
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/metainfo.xml
@@ -0,0 +1,33 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>YARN</name>
+      <version>2.7.1+odpi</version>
+      <extends>common-services/YARN/2.1.0.2.0</extends>
+    </service>
+    <service>
+      <name>MAPREDUCE2</name>
+      <version>2.7.1+odpi</version>
+      <extends>common-services/MAPREDUCE2/2.1.0.2.0.6.0</extends>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/ZOOKEEPER/metainfo.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/ZOOKEEPER/metainfo.xml
new file mode 100755
index 0000000..0a89dc2
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/ZOOKEEPER/metainfo.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZOOKEEPER</name>
+      <version>3.4.6+odpi</version>
+      <extends>common-services/ZOOKEEPER/3.4.5</extends>
+    </service>
+  </services>
+</metainfo>


[23/52] bigtop git commit: ODPI-193. Add Hive 1.2 to ODPi ambari reference implementation

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-interactive-site.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-interactive-site.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-interactive-site.xml
new file mode 100755
index 0000000..27d3541
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-interactive-site.xml
@@ -0,0 +1,909 @@
+<configuration><property require-input="false">
+    <name>hive.server2.thrift.port</name>
+    <value>10500</value>
+    <description>
+      TCP port number to listen on, default 10500.
+    </description>
+    <display-name>HiveServer2 Port</display-name>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>int</type>
+        <overridable>false</overridable>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.server2.thrift.http.port</name>
+    <value>10501</value>
+    <description>Port number of HiveServer2 Thrift interface when hive.server2.transport.mode is 'http'.</description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.server2.tez.sessions.per.default.queue</name>
+    <value>1</value>
+    <description>
+      The maximum number of queries the Hive Interactive cluster will be able to handle concurrently.
+    </description>
+    <display-name>Maximum Total Concurrent Queries</display-name>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>int</type>
+        <maximum>10</maximum>
+        <minimum>1</minimum>
+        <increment-step>1</increment-step>
+    </value-attributes>
+    <depends-on>
+        <property>
+            <name>yarn.scheduler.capacity.root.queues</name>
+            <type>capacity-scheduler</type>
+        </property>
+        <property>
+            <name>hive.llap.daemon.queue.name</name>
+            <type>hive-interactive-site</type>
+        </property>
+        <property>
+            <name>llap_queue_capacity</name>
+            <type>hive-interactive-env</type>
+        </property>
+        <property>
+            <name>enable_hive_interactive</name>
+            <type>hive-interactive-env</type>
+        </property>
+    </depends-on>
+    <property_depended_by>
+        <dependedByProperties>
+            <name>hive.llap.daemon.num.executors</name>
+            <type>hive-interactive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>llap_heap_size</name>
+            <type>hive-interactive-env</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>tez.am.resource.memory.mb</name>
+            <type>tez-interactive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.llap.daemon.yarn.container.mb</name>
+            <type>hive-interactive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.llap.io.memory.size</name>
+            <type>hive-interactive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>num_llap_nodes</name>
+            <type>hive-interactive-env</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>slider_am_container_mb</name>
+            <type>hive-interactive-env</type>
+        </dependedByProperties>
+    </property_depended_by>
+</property><property require-input="false">
+    <name>hive.metastore.uris</name>
+    <value></value>
+    <description>Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore.</description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.server2.enable.doAs</name>
+    <value>false</value>
+    <description>
+      Setting this property to true will have HiveServer2 execute
+      Hive operations as the user making the calls to it.
+    </description>
+    <display-name>Run as end user instead of Hive user</display-name>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>value-list</type>
+        <entries>
+            <entry>
+                <value>true</value>
+                <label>True</label>
+            </entry>
+            <entry>
+                <value>false</value>
+                <label>False</label>
+            </entry>
+        </entries>
+        <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+        <property>
+            <name>hive_security_authorization</name>
+            <type>hive-env</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.prewarm.enabled</name>
+    <value>false</value>
+    <description>Enables container prewarm for Tez (Hadoop 2 only)</description>
+    <display-name>Hold Containers to Reduce Latency</display-name>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>value-list</type>
+        <entries>
+            <entry>
+                <value>true</value>
+                <label>True</label>
+            </entry>
+            <entry>
+                <value>false</value>
+                <label>False</label>
+            </entry>
+        </entries>
+        <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.vectorized.execution.reduce.enabled</name>
+    <value>true</value>
+    <description>
+      This flag should be set to true to enable vectorized mode of the reduce-side of
+      query execution.
+    </description>
+    <display-name>Enable Reduce Vectorization</display-name>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>value-list</type>
+        <entries>
+            <entry>
+                <value>true</value>
+                <label>True</label>
+            </entry>
+            <entry>
+                <value>false</value>
+                <label>False</label>
+            </entry>
+        </entries>
+        <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.server2.tez.default.queues</name>
+    <value>default</value>
+    <description>
+      A list of comma separated values corresponding to YARN queues of the same name.
+      When HiveServer2 is launched in Tez mode, this configuration needs to be set
+      for multiple Tez sessions to run in parallel on the cluster.
+    </description>
+    <display-name>Default query queues</display-name>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>combo</type>
+        <entries>
+            <entry>
+                <value>default</value>
+                <label>Default</label>
+            </entry>
+        </entries>
+        <selection-cardinality>1+</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+        <property>
+            <name>yarn.scheduler.capacity.root.queues</name>
+            <type>capacity-scheduler</type>
+        </property>
+        <property>
+            <name>hive.llap.daemon.queue.name</name>
+            <type>hive-interactive-site</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.server2.tez.initialize.default.sessions</name>
+    <value>true</value>
+    <description>
+      This flag is used in HiveServer2 to enable a user to use HiveServer2 without
+      turning on Tez for HiveServer2. The user could potentially want to run queries
+      over Tez without the pool of sessions.
+    </description>
+    <display-name>Start Tez session at Initialization</display-name>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>value-list</type>
+        <entries>
+            <entry>
+                <value>true</value>
+                <label>True</label>
+            </entry>
+            <entry>
+                <value>false</value>
+                <label>False</label>
+            </entry>
+        </entries>
+        <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.driver.parallel.compilation</name>
+    <value>true</value>
+    <description>
+      This flag allows HiveServer2 to compile queries in parallel.
+    </description>
+    <display-name>Compile queries in parallel</display-name>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>value-list</type>
+        <entries>
+            <entry>
+                <value>true</value>
+                <label>True</label>
+            </entry>
+            <entry>
+                <value>false</value>
+                <label>False</label>
+            </entry>
+        </entries>
+        <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.server2.webui.port</name>
+    <value>10502</value>
+    <description>Web UI port address</description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.server2.webui.use.ssl</name>
+    <value>false</value>
+    <description>Enable SSL for HiveServer2 Interactive</description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.server2.zookeeper.namespace</name>
+    <value>hiveserver2-hive2</value>
+    <description>The parent node in ZooKeeper used by HiveServer2 when supporting dynamic service discovery.</description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.llap.daemon.queue.name</name>
+    <value>default</value>
+    <description>Choose the YARN queue in this cluster that is dedicated to interactive query.</description>
+    <display-name>Interactive Query Queue</display-name>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>combo</type>
+        <entries>
+            <entry>
+                <value>default</value>
+                <label>Default</label>
+            </entry>
+        </entries>
+        <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+        <property>
+            <name>yarn.scheduler.capacity.root.queues</name>
+            <type>capacity-scheduler</type>
+        </property>
+        <property>
+            <name>enable_hive_interactive</name>
+            <type>hive-interactive-env</type>
+        </property>
+    </depends-on>
+    <property_depended_by>
+        <dependedByProperties>
+            <name>hive.server2.tez.default.queues</name>
+            <type>hive-interactive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.llap.daemon.num.executors</name>
+            <type>hive-interactive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>llap_queue_capacity</name>
+            <type>hive-interactive-env</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>llap_heap_size</name>
+            <type>hive-interactive-env</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>tez.am.resource.memory.mb</name>
+            <type>tez-interactive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.llap.daemon.yarn.container.mb</name>
+            <type>hive-interactive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.server2.tez.sessions.per.default.queue</name>
+            <type>hive-interactive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>hive.llap.io.memory.size</name>
+            <type>hive-interactive-site</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>num_llap_nodes</name>
+            <type>hive-interactive-env</type>
+        </dependedByProperties>
+        <dependedByProperties>
+            <name>slider_am_container_mb</name>
+            <type>hive-interactive-env</type>
+        </dependedByProperties>
+    </property_depended_by>
+</property><property require-input="false">
+    <name>hive.llap.daemon.yarn.shuffle.port</name>
+    <value>15551</value>
+    <description>YARN shuffle port for LLAP-daemon-hosted shuffle.</description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.execution.engine</name>
+    <value>tez</value>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.execution.mode</name>
+    <value>llap</value>
+    <description>Chooses whether query fragments will run in container or in llap</description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.llap.execution.mode</name>
+    <value>all</value>
+    <description>Chooses which fragments of a query will run in llap</description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.llap.io.enabled</name>
+    <value>true</value>
+    <description>Whether the LLAP IO layer is enabled.</description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on>
+        <property>
+            <name>hive.llap.io.memory.size</name>
+            <type>hive-interactive-site</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.llap.io.use.lrfu</name>
+    <value>true</value>
+    <description>Whether ORC low-level cache should use LRFU cache policy instead of default (FIFO).</description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.llap.auto.allow.uber</name>
+    <value>false</value>
+    <description>Whether or not to allow the planner to run vertices in the AM.</description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.llap.object.cache.enabled</name>
+    <value>true</value>
+    <description>Cache objects (plans, hashtables, etc) in llap</description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.tez.input.generate.consistent.splits</name>
+    <value>true</value>
+    <description>Whether to generate consistent split locations when generating splits in the AM</description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.llap.client.consistent.splits</name>
+    <value>true</value>
+    <description>
+      Whether to setup split locations to match nodes on which llap daemons are running,
+      instead of using the locations provided by the split itself.
+    </description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.llap.task.scheduler.locality.delay</name>
+    <value>-1</value>
+    <description>
+      Amount of time to wait before allocating a request which contains location information,
+      to a location other than the ones requested. Set to -1 for an infinite delay, 0
+      for no delay.
+    </description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.exec.orc.split.strategy</name>
+    <value>HYBRID</value>
+    <description>
+      This is not a user level config. BI strategy is used when the requirement is to spend less time in split generation
+      as opposed to query execution (split generation does not read or cache file footers).
+      ETL strategy is used when spending little more time in split generation is acceptable
+      (split generation reads and caches file footers). HYBRID chooses between the above strategies
+      based on heuristics.
+    </description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.llap.daemon.service.hosts</name>
+    <value>@llap0</value>
+    <description>
+      Explicitly specified hosts to use for LLAP scheduling. Useful for testing. By default,
+      YARN registry is used.
+    </description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.llap.daemon.allow.permanent.fns</name>
+    <value>false</value>
+    <description>Whether LLAP daemon should localize the resources for permanent UDFs.</description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.llap.io.memory.size</name>
+    <value>0</value>
+    <description>The amount of memory reserved for Hive's optimized in-memory cache.</description>
+    <display-name>In-Memory Cache per Daemon</display-name>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>int</type>
+        <unit>MB</unit>
+        <overridable>false</overridable>
+    </value-attributes>
+    <depends-on>
+        <property>
+            <name>tez.am.resource.memory.mb</name>
+            <type>tez-site</type>
+        </property>
+        <property>
+            <name>yarn.scheduler.capacity.root.queues</name>
+            <type>capacity-scheduler</type>
+        </property>
+        <property>
+            <name>hive.llap.daemon.queue.name</name>
+            <type>hive-interactive-site</type>
+        </property>
+        <property>
+            <name>llap_queue_capacity</name>
+            <type>hive-interactive-env</type>
+        </property>
+        <property>
+            <name>enable_hive_interactive</name>
+            <type>hive-interactive-env</type>
+        </property>
+        <property>
+            <name>hive.server2.tez.sessions.per.default.queue</name>
+            <type>hive-interactive-site</type>
+        </property>
+    </depends-on>
+    <property_depended_by>
+        <dependedByProperties>
+            <name>hive.llap.io.enabled</name>
+            <type>hive-interactive-site</type>
+        </dependedByProperties>
+    </property_depended_by>
+</property><property require-input="false">
+    <name>hive.llap.daemon.num.executors</name>
+    <value>1</value>
+    <description>The maximum number of CPUs a single LLAP daemon will use. Usually this should be equal to the number of available CPUs.</description>
+    <display-name>Maximum CPUs per Daemon</display-name>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>int</type>
+    </value-attributes>
+    <depends-on>
+        <property>
+            <name>tez.am.resource.memory.mb</name>
+            <type>tez-site</type>
+        </property>
+        <property>
+            <name>yarn.scheduler.capacity.root.queues</name>
+            <type>capacity-scheduler</type>
+        </property>
+        <property>
+            <name>hive.llap.daemon.queue.name</name>
+            <type>hive-interactive-site</type>
+        </property>
+        <property>
+            <name>llap_queue_capacity</name>
+            <type>hive-interactive-env</type>
+        </property>
+        <property>
+            <name>enable_hive_interactive</name>
+            <type>hive-interactive-env</type>
+        </property>
+        <property>
+            <name>hive.server2.tez.sessions.per.default.queue</name>
+            <type>hive-interactive-site</type>
+        </property>
+    </depends-on>
+    <property_depended_by>
+        <dependedByProperties>
+            <name>hive.llap.io.threadpool.size</name>
+            <type>hive-interactive-site</type>
+        </dependedByProperties>
+    </property_depended_by>
+</property><property require-input="false">
+    <name>hive.llap.daemon.vcpus.per.instance</name>
+    <value>${hive.llap.daemon.num.executors}</value>
+    <description>The total number of vcpus to use for the executors inside LLAP.</description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.llap.daemon.yarn.container.mb</name>
+    <value>341</value>
+    <description>Total memory used by individual LLAP daemons. This includes memory for the cache as well as for the query execution.</description>
+    <display-name>Memory per daemon</display-name>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>int</type>
+        <unit>MB</unit>
+        <overridable>false</overridable>
+    </value-attributes>
+    <depends-on>
+        <property>
+            <name>yarn.scheduler.capacity.root.queues</name>
+            <type>capacity-scheduler</type>
+        </property>
+        <property>
+            <name>hive.llap.daemon.queue.name</name>
+            <type>hive-interactive-site</type>
+        </property>
+        <property>
+            <name>llap_queue_capacity</name>
+            <type>hive-interactive-env</type>
+        </property>
+        <property>
+            <name>enable_hive_interactive</name>
+            <type>hive-interactive-env</type>
+        </property>
+        <property>
+            <name>hive.server2.tez.sessions.per.default.queue</name>
+            <type>hive-interactive-site</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>llap.shuffle.connection-keep-alive.enable</name>
+    <value>true</value>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>llap.shuffle.connection-keep-alive.timeout</name>
+    <value>60</value>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.llap.io.threadpool.size</name>
+    <value>2</value>
+    <description>Specify the number of threads to use for low-level IO thread pool.</description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on>
+        <property>
+            <name>hive.llap.daemon.num.executors</name>
+            <type>hive-interactive-site</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.llap.daemon.rpc.port</name>
+    <value>15001</value>
+    <description>The LLAP daemon RPC port.</description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.llap.management.rpc.port</name>
+    <value>15004</value>
+    <description>RPC port for LLAP daemon management service.</description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.llap.daemon.task.scheduler.enable.preemption</name>
+    <value>true</value>
+    <description>hive.llap.daemon.task.scheduler.enable.preemption</description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.tez.exec.print.summary</name>
+    <value>true</value>
+    <description>Display breakdown of execution steps, for every query executed by the shell.</description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.vectorized.execution.mapjoin.native.enabled</name>
+    <value>true</value>
+    <description>
+      This flag should be set to true to enable native (i.e. non-pass through) vectorization
+      of queries using MapJoin.
+    </description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.vectorized.execution.mapjoin.minmax.enabled</name>
+    <value>true</value>
+    <description>
+      This flag should be set to true to enable vector map join hash tables to
+      use max / max filtering for integer join queries using MapJoin.
+    </description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.vectorized.execution.mapjoin.native.fast.hashtable.enabled</name>
+    <value>true</value>
+    <description>
+      This flag should be set to true to enable use of native fast vector map join hash tables in
+      queries using MapJoin.
+    </description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.mapjoin.hybridgrace.hashtable</name>
+    <value>false</value>
+    <description>Whether to use hybrid grace hash join as the join method for mapjoin. Tez only.</description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.tez.bucket.pruning</name>
+    <value>true</value>
+    <description>
+      When pruning is enabled, filters on bucket columns will be processed by
+      filtering the splits against a bitset of included buckets. This needs predicates
+      produced by hive.optimize.ppd and hive.optimize.index.filters.
+    </description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.optimize.dynamic.partition.hashjoin</name>
+    <value>true</value>
+    <description>
+      Whether to enable dynamically partitioned hash join optimization.
+      This setting is also dependent on enabling hive.auto.convert.join
+    </description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.llap.zk.sm.connectionString</name>
+    <value>localhost:2181</value>
+    <description>ZooKeeper connection string for ZooKeeper SecretManager.</description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on>
+        <property>
+            <name>clientPort</name>
+            <type>zoo.cfg</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.llap.io.memory.mode</name>
+    <value></value>
+    <description>
+      LLAP IO memory usage; 'cache' (the default) uses data and metadata cache with a
+      custom off-heap allocator, 'allocator' uses the custom allocator without the caches,
+      'none' doesn't use either (this mode may result in significant performance degradation)
+    </description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.metastore.event.listeners</name>
+    <value></value>
+    <description>
+      Listeners for metastore events
+    </description>
+    <filename>hive-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-log4j.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-log4j.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-log4j.xml
new file mode 100755
index 0000000..3ecb24a
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-log4j.xml
@@ -0,0 +1,106 @@
+<configuration><property require-input="false">
+    <name>content</name>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+hive.log.threshold=ALL
+hive.root.logger=INFO,DRFA
+hive.log.dir=${java.io.tmpdir}/${user.name}
+hive.log.file=hive.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hive.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshold=${hive.log.threshold}
+
+#
+# Daily Rolling File Appender
+#
+# Use the PidDailyerRollingFileAppend class instead if you want to use separate log files
+# for different CLI session.
+#
+# log4j.appender.DRFA=org.apache.hadoop.hive.ql.log.PidDailyRollingFileAppender
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+
+log4j.appender.DRFA.File=${hive.log.dir}/${hive.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n
+log4j.appender.console.encoding=UTF-8
+
+#custom logging levels
+#log4j.logger.xxx=DEBUG
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter
+
+
+log4j.category.DataNucleus=ERROR,DRFA
+log4j.category.Datastore=ERROR,DRFA
+log4j.category.Datastore.Schema=ERROR,DRFA
+log4j.category.JPOX.Datastore=ERROR,DRFA
+log4j.category.JPOX.Plugin=ERROR,DRFA
+log4j.category.JPOX.MetaData=ERROR,DRFA
+log4j.category.JPOX.Query=ERROR,DRFA
+log4j.category.JPOX.General=ERROR,DRFA
+log4j.category.JPOX.Enhancer=ERROR,DRFA
+
+
+# Silence useless ZK logs
+log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,DRFA
+log4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,DRFA
+
+    </value>
+    <description>Custom log4j.properties</description>
+    <display-name>hive-log4j template</display-name>
+    <filename>hive-log4j.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>content</type>
+        <show-property-name>false</show-property-name>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-log4j2.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-log4j2.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-log4j2.xml
new file mode 100755
index 0000000..798063b
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-log4j2.xml
@@ -0,0 +1,90 @@
+<configuration><property require-input="false">
+    <name>content</name>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+status = INFO
+name = HiveLog4j2
+packages = org.apache.hadoop.hive.ql.log
+
+# list of properties
+property.hive.log.level = INFO
+property.hive.root.logger = DRFA
+property.hive.log.dir = ${sys:java.io.tmpdir}/${sys:user.name}
+property.hive.log.file = hive.log
+
+# list of all appenders
+appenders = console, DRFA
+
+# console appender
+appender.console.type = Console
+appender.console.name = console
+appender.console.target = SYSTEM_ERR
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = %d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n
+
+# daily rolling file appender
+appender.DRFA.type = RollingFile
+appender.DRFA.name = DRFA
+appender.DRFA.fileName = ${sys:hive.log.dir}/${sys:hive.log.file}
+# Use %pid in the filePattern to append process-id@host-name to the filename if you want separate log files for different CLI session
+appender.DRFA.filePattern = ${sys:hive.log.dir}/${sys:hive.log.file}.%d{yyyy-MM-dd}.gz
+appender.DRFA.layout.type = PatternLayout
+appender.DRFA.layout.pattern = %d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n
+appender.DRFA.policies.type = Policies
+appender.DRFA.policies.time.type = TimeBasedTriggeringPolicy
+appender.DRFA.policies.time.interval = 1
+appender.DRFA.policies.time.modulate = true
+appender.DRFA.strategy.type = DefaultRolloverStrategy
+appender.DRFA.strategy.max = 30
+
+# list of all loggers
+loggers = NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX
+
+logger.NIOServerCnxn.name = org.apache.zookeeper.server.NIOServerCnxn
+logger.NIOServerCnxn.level = WARN
+
+logger.ClientCnxnSocketNIO.name = org.apache.zookeeper.ClientCnxnSocketNIO
+logger.ClientCnxnSocketNIO.level = WARN
+
+logger.DataNucleus.name = DataNucleus
+logger.DataNucleus.level = ERROR
+
+logger.Datastore.name = Datastore
+logger.Datastore.level = ERROR
+
+logger.JPOX.name = JPOX
+logger.JPOX.level = ERROR
+
+# root logger
+rootLogger.level = ${sys:hive.log.level}
+rootLogger.appenderRefs = root
+rootLogger.appenderRef.root.ref = ${sys:hive.root.logger}
+  </value>
+    <description>Custom hive-log4j2.properties</description>
+    <display-name>hive-log4j2 template</display-name>
+    <filename>hive-log4j2.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>content</type>
+        <show-property-name>false</show-property-name>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property></configuration>
\ No newline at end of file


[46/52] bigtop git commit: BIGTOP-1406. package Ambari in Bigtop

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.13.0.oracle.sql
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.13.0.oracle.sql b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.13.0.oracle.sql
deleted file mode 100755
index 6bd8df9..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.13.0.oracle.sql
+++ /dev/null
@@ -1,835 +0,0 @@
--- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
--- NOTE: Some versions of SchemaTool do not automatically generate this table.
--- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
-CREATE TABLE SEQUENCE_TABLE
-(
-   SEQUENCE_NAME VARCHAR2(255) NOT NULL,
-   NEXT_VAL NUMBER NOT NULL
-);
-
-ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY (SEQUENCE_NAME);
-
--- Table NUCLEUS_TABLES is an internal table required by DataNucleus.
--- This table is required if datanucleus.autoStartMechanism=SchemaTable
--- NOTE: Some versions of SchemaTool do not automatically generate this table.
--- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
-CREATE TABLE NUCLEUS_TABLES
-(
-   CLASS_NAME VARCHAR2(128) NOT NULL,
-   TABLE_NAME VARCHAR2(128) NOT NULL,
-   TYPE VARCHAR2(4) NOT NULL,
-   OWNER VARCHAR2(2) NOT NULL,
-   VERSION VARCHAR2(20) NOT NULL,
-   INTERFACE_NAME VARCHAR2(255) NULL
-);
-
-ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY (CLASS_NAME);
-
--- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
-CREATE TABLE PART_COL_PRIVS
-(
-    PART_COLUMN_GRANT_ID NUMBER NOT NULL,
-    "COLUMN_NAME" VARCHAR2(128) NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PART_ID NUMBER NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    PART_COL_PRIV VARCHAR2(128) NULL
-);
-
-ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
-
--- Table CDS.
-CREATE TABLE CDS
-(
-    CD_ID NUMBER NOT NULL
-);
-
-ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
-
--- Table COLUMNS_V2 for join relationship
-CREATE TABLE COLUMNS_V2
-(
-    CD_ID NUMBER NOT NULL,
-    "COMMENT" VARCHAR2(256) NULL,
-    "COLUMN_NAME" VARCHAR2(128) NOT NULL,
-    TYPE_NAME VARCHAR2(4000) NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
-
--- Table PARTITION_KEY_VALS for join relationship
-CREATE TABLE PARTITION_KEY_VALS
-(
-    PART_ID NUMBER NOT NULL,
-    PART_KEY_VAL VARCHAR2(256) NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
-
--- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
-CREATE TABLE DBS
-(
-    DB_ID NUMBER NOT NULL,
-    "DESC" VARCHAR2(4000) NULL,
-    DB_LOCATION_URI VARCHAR2(4000) NOT NULL,
-    "NAME" VARCHAR2(128) NULL,
-    OWNER_NAME VARCHAR2(128) NULL,
-    OWNER_TYPE VARCHAR2(10) NULL
-);
-
-ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
-
--- Table PARTITION_PARAMS for join relationship
-CREATE TABLE PARTITION_PARAMS
-(
-    PART_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
-
--- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
-CREATE TABLE SERDES
-(
-    SERDE_ID NUMBER NOT NULL,
-    "NAME" VARCHAR2(128) NULL,
-    SLIB VARCHAR2(4000) NULL
-);
-
-ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
-
--- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
-CREATE TABLE TYPES
-(
-    TYPES_ID NUMBER NOT NULL,
-    TYPE_NAME VARCHAR2(128) NULL,
-    TYPE1 VARCHAR2(767) NULL,
-    TYPE2 VARCHAR2(767) NULL
-);
-
-ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
-
--- Table PARTITION_KEYS for join relationship
-CREATE TABLE PARTITION_KEYS
-(
-    TBL_ID NUMBER NOT NULL,
-    PKEY_COMMENT VARCHAR2(4000) NULL,
-    PKEY_NAME VARCHAR2(128) NOT NULL,
-    PKEY_TYPE VARCHAR2(767) NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
-
--- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
-CREATE TABLE ROLES
-(
-    ROLE_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    OWNER_NAME VARCHAR2(128) NULL,
-    ROLE_NAME VARCHAR2(128) NULL
-);
-
-ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
-
--- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
-CREATE TABLE PARTITIONS
-(
-    PART_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
-    PART_NAME VARCHAR2(767) NULL,
-    SD_ID NUMBER NULL,
-    TBL_ID NUMBER NULL
-);
-
-ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
-
--- Table INDEX_PARAMS for join relationship
-CREATE TABLE INDEX_PARAMS
-(
-    INDEX_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
-
--- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
-CREATE TABLE TBL_COL_PRIVS
-(
-    TBL_COLUMN_GRANT_ID NUMBER NOT NULL,
-    "COLUMN_NAME" VARCHAR2(128) NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    TBL_COL_PRIV VARCHAR2(128) NULL,
-    TBL_ID NUMBER NULL
-);
-
-ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
-
--- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
-CREATE TABLE IDXS
-(
-    INDEX_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    DEFERRED_REBUILD NUMBER(1) NOT NULL CHECK (DEFERRED_REBUILD IN (1,0)),
-    INDEX_HANDLER_CLASS VARCHAR2(4000) NULL,
-    INDEX_NAME VARCHAR2(128) NULL,
-    INDEX_TBL_ID NUMBER NULL,
-    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
-    ORIG_TBL_ID NUMBER NULL,
-    SD_ID NUMBER NULL
-);
-
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
-
--- Table BUCKETING_COLS for join relationship
-CREATE TABLE BUCKETING_COLS
-(
-    SD_ID NUMBER NOT NULL,
-    BUCKET_COL_NAME VARCHAR2(256) NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
-
--- Table TYPE_FIELDS for join relationship
-CREATE TABLE TYPE_FIELDS
-(
-    TYPE_NAME NUMBER NOT NULL,
-    "COMMENT" VARCHAR2(256) NULL,
-    FIELD_NAME VARCHAR2(128) NOT NULL,
-    FIELD_TYPE VARCHAR2(767) NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
-
--- Table SD_PARAMS for join relationship
-CREATE TABLE SD_PARAMS
-(
-    SD_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
-
--- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
-CREATE TABLE GLOBAL_PRIVS
-(
-    USER_GRANT_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    USER_PRIV VARCHAR2(128) NULL
-);
-
-ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
-
--- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
-CREATE TABLE SDS
-(
-    SD_ID NUMBER NOT NULL,
-    CD_ID NUMBER NULL,
-    INPUT_FORMAT VARCHAR2(4000) NULL,
-    IS_COMPRESSED NUMBER(1) NOT NULL CHECK (IS_COMPRESSED IN (1,0)),
-    LOCATION VARCHAR2(4000) NULL,
-    NUM_BUCKETS NUMBER (10) NOT NULL,
-    OUTPUT_FORMAT VARCHAR2(4000) NULL,
-    SERDE_ID NUMBER NULL,
-    IS_STOREDASSUBDIRECTORIES NUMBER(1) NOT NULL CHECK (IS_STOREDASSUBDIRECTORIES IN (1,0))
-);
-
-ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
-
--- Table TABLE_PARAMS for join relationship
-CREATE TABLE TABLE_PARAMS
-(
-    TBL_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
-
--- Table SORT_COLS for join relationship
-CREATE TABLE SORT_COLS
-(
-    SD_ID NUMBER NOT NULL,
-    "COLUMN_NAME" VARCHAR2(128) NULL,
-    "ORDER" NUMBER (10) NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
-
--- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
-CREATE TABLE TBL_PRIVS
-(
-    TBL_GRANT_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    TBL_PRIV VARCHAR2(128) NULL,
-    TBL_ID NUMBER NULL
-);
-
-ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
-
--- Table DATABASE_PARAMS for join relationship
-CREATE TABLE DATABASE_PARAMS
-(
-    DB_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(180) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
-
--- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
-CREATE TABLE ROLE_MAP
-(
-    ROLE_GRANT_ID NUMBER NOT NULL,
-    ADD_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    ROLE_ID NUMBER NULL
-);
-
-ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
-
--- Table SERDE_PARAMS for join relationship
-CREATE TABLE SERDE_PARAMS
-(
-    SERDE_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
-
--- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
-CREATE TABLE PART_PRIVS
-(
-    PART_GRANT_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PART_ID NUMBER NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    PART_PRIV VARCHAR2(128) NULL
-);
-
-ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
-
--- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
-CREATE TABLE DB_PRIVS
-(
-    DB_GRANT_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    DB_ID NUMBER NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    DB_PRIV VARCHAR2(128) NULL
-);
-
-ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
-
--- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
-CREATE TABLE TBLS
-(
-    TBL_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    DB_ID NUMBER NULL,
-    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
-    OWNER VARCHAR2(767) NULL,
-    RETENTION NUMBER (10) NOT NULL,
-    SD_ID NUMBER NULL,
-    TBL_NAME VARCHAR2(128) NULL,
-    TBL_TYPE VARCHAR2(128) NULL,
-    VIEW_EXPANDED_TEXT CLOB NULL,
-    VIEW_ORIGINAL_TEXT CLOB NULL
-);
-
-ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
-
--- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
-CREATE TABLE PARTITION_EVENTS
-(
-    PART_NAME_ID NUMBER NOT NULL,
-    DB_NAME VARCHAR2(128) NULL,
-    EVENT_TIME NUMBER NOT NULL,
-    EVENT_TYPE NUMBER (10) NOT NULL,
-    PARTITION_NAME VARCHAR2(767) NULL,
-    TBL_NAME VARCHAR2(128) NULL
-);
-
-ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
-
--- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
-CREATE TABLE SKEWED_STRING_LIST
-(
-    STRING_LIST_ID NUMBER NOT NULL
-);
-
-ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
-
-CREATE TABLE SKEWED_STRING_LIST_VALUES
-(
-    STRING_LIST_ID NUMBER NOT NULL,
-    "STRING_LIST_VALUE" VARCHAR2(256) NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
-
-ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
-
-CREATE TABLE SKEWED_COL_NAMES
-(
-    SD_ID NUMBER NOT NULL,
-    "SKEWED_COL_NAME" VARCHAR2(256) NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
-
-ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
-(
-    SD_ID NUMBER NOT NULL,
-    STRING_LIST_ID_KID NUMBER NOT NULL,
-    "LOCATION" VARCHAR2(4000) NULL
-);
-
-CREATE TABLE MASTER_KEYS
-(
-    KEY_ID NUMBER (10) NOT NULL,
-    MASTER_KEY VARCHAR2(767) NULL
-);
-
-CREATE TABLE DELEGATION_TOKENS
-(
-    TOKEN_IDENT VARCHAR2(767) NOT NULL,
-    TOKEN VARCHAR2(767) NULL
-);
-
-ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
-
-ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE TABLE SKEWED_VALUES
-(
-    SD_ID_OID NUMBER NOT NULL,
-    STRING_LIST_ID_EID NUMBER NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
-
-ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
--- column statistics
-
-CREATE TABLE TAB_COL_STATS (
- CS_ID NUMBER NOT NULL,
- DB_NAME VARCHAR2(128) NOT NULL,
- TABLE_NAME VARCHAR2(128) NOT NULL, 
- COLUMN_NAME VARCHAR2(128) NOT NULL,
- COLUMN_TYPE VARCHAR2(128) NOT NULL,
- TBL_ID NUMBER NOT NULL,
- LONG_LOW_VALUE NUMBER,
- LONG_HIGH_VALUE NUMBER,
- DOUBLE_LOW_VALUE NUMBER,
- DOUBLE_HIGH_VALUE NUMBER,
- BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
- BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
- NUM_NULLS NUMBER NOT NULL,
- NUM_DISTINCTS NUMBER,
- AVG_COL_LEN NUMBER,
- MAX_COL_LEN NUMBER,
- NUM_TRUES NUMBER,
- NUM_FALSES NUMBER,
- LAST_ANALYZED NUMBER NOT NULL
-);
-
-CREATE TABLE VERSION (
-  VER_ID NUMBER NOT NULL,
-  SCHEMA_VERSION VARCHAR(127) NOT NULL,
-  VERSION_COMMENT VARCHAR(255)
-);
-ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
-
-ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID);
-
-ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID);
-
-CREATE TABLE PART_COL_STATS (
- CS_ID NUMBER NOT NULL,
- DB_NAME VARCHAR2(128) NOT NULL,
- TABLE_NAME VARCHAR2(128) NOT NULL,
- PARTITION_NAME VARCHAR2(767) NOT NULL,
- COLUMN_NAME VARCHAR2(128) NOT NULL,
- COLUMN_TYPE VARCHAR2(128) NOT NULL,
- PART_ID NUMBER NOT NULL,
- LONG_LOW_VALUE NUMBER,
- LONG_HIGH_VALUE NUMBER,
- DOUBLE_LOW_VALUE NUMBER,
- DOUBLE_HIGH_VALUE NUMBER,
- BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
- BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
- NUM_NULLS NUMBER NOT NULL,
- NUM_DISTINCTS NUMBER,
- AVG_COL_LEN NUMBER,
- MAX_COL_LEN NUMBER,
- NUM_TRUES NUMBER,
- NUM_FALSES NUMBER,
- LAST_ANALYZED NUMBER NOT NULL
-);
-
-ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
-
-ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED;
-
-CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
-
-CREATE TABLE FUNCS (
-  FUNC_ID NUMBER NOT NULL,
-  CLASS_NAME VARCHAR2(4000),
-  CREATE_TIME NUMBER(10) NOT NULL,
-  DB_ID NUMBER,
-  FUNC_NAME VARCHAR2(128),
-  FUNC_TYPE NUMBER(10) NOT NULL,
-  OWNER_NAME VARCHAR2(128),
-  OWNER_TYPE VARCHAR2(10)
-);
-
-ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID);
-
-CREATE TABLE FUNC_RU (
-  FUNC_ID NUMBER NOT NULL,
-  RESOURCE_TYPE NUMBER(10) NOT NULL,
-  RESOURCE_URI VARCHAR2(4000),
-  INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID, INTEGER_IDX);
-
-
--- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
-ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
-
-CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table COLUMNS_V2
-ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
-
-
--- Constraints for table PARTITION_KEY_VALS
-ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
-
-
--- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
-CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME");
-
-
--- Constraints for table PARTITION_PARAMS
-ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
-
-
--- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
-
--- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
-CREATE UNIQUE INDEX UNIQUE_TYPE ON TYPES (TYPE_NAME);
-
-
--- Constraints for table PARTITION_KEYS
-ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
-
-
--- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
-CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
-
-
--- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
-ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
-
-CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
-
-CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
-
-
--- Constraints for table INDEX_PARAMS
-ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
-
-
--- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
-ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
-
-CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
-
-
--- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
-
-CREATE INDEX IDXS_N50 ON IDXS (INDEX_TBL_ID);
-
-CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
-
-CREATE INDEX IDXS_N49 ON IDXS (ORIG_TBL_ID);
-
-
--- Constraints for table BUCKETING_COLS
-ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
-
-
--- Constraints for table TYPE_FIELDS
-ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
-
-
--- Constraints for table SD_PARAMS
-ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
-
-
--- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
-CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
-ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
-ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
-CREATE INDEX SDS_N50 ON SDS (CD_ID);
-
-
--- Constraints for table TABLE_PARAMS
-ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
-
-
--- Constraints for table SORT_COLS
-ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
-
-
--- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
-ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
-
-CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table DATABASE_PARAMS
-ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
-
-
--- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
-ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
-
-CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table SERDE_PARAMS
-ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
-
-
--- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
-ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
-
-CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
-
-
--- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
-ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
-
-CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
-
-CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
-
-
--- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
-ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
-
-CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
-
-CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
-
-
--- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
-CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
-
-
--- Constraints for table FUNCS for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions]
-ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED;
-
-CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME, DB_ID);
-
-CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID);
-
-
--- Constraints for table FUNC_RU for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions]
-ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) INITIALLY DEFERRED;
-
-CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID);
-
-
--- -----------------------------------------------------------------------------------------------------------------------------------------------
--- Transaction and Lock Tables
--- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file.
--- -----------------------------------------------------------------------------------------------------------------------------------------------
-
-CREATE TABLE TXNS (
-  TXN_ID NUMBER(19) PRIMARY KEY,
-  TXN_STATE char(1) NOT NULL,
-  TXN_STARTED NUMBER(19) NOT NULL,
-  TXN_LAST_HEARTBEAT NUMBER(19) NOT NULL,
-  TXN_USER varchar(128) NOT NULL,
-  TXN_HOST varchar(128) NOT NULL
-);
-
-CREATE TABLE TXN_COMPONENTS (
-  TC_TXNID NUMBER(19) REFERENCES TXNS (TXN_ID),
-  TC_DATABASE VARCHAR2(128) NOT NULL,
-  TC_TABLE VARCHAR2(128),
-  TC_PARTITION VARCHAR2(767) NULL
-);
-
-CREATE TABLE COMPLETED_TXN_COMPONENTS (
-  CTC_TXNID NUMBER(19),
-  CTC_DATABASE varchar(128) NOT NULL,
-  CTC_TABLE varchar(128),
-  CTC_PARTITION varchar(767)
-);
-
-CREATE TABLE NEXT_TXN_ID (
-  NTXN_NEXT NUMBER(19) NOT NULL
-);
-INSERT INTO NEXT_TXN_ID VALUES(1);
-
-CREATE TABLE HIVE_LOCKS (
-  HL_LOCK_EXT_ID NUMBER(19) NOT NULL,
-  HL_LOCK_INT_ID NUMBER(19) NOT NULL,
-  HL_TXNID NUMBER(19),
-  HL_DB VARCHAR2(128) NOT NULL,
-  HL_TABLE VARCHAR2(128),
-  HL_PARTITION VARCHAR2(767),
-  HL_LOCK_STATE CHAR(1) NOT NULL,
-  HL_LOCK_TYPE CHAR(1) NOT NULL,
-  HL_LAST_HEARTBEAT NUMBER(19) NOT NULL,
-  HL_ACQUIRED_AT NUMBER(19),
-  HL_USER varchar(128) NOT NULL,
-  HL_HOST varchar(128) NOT NULL,
-  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
-); 
-
-CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID);
-
-CREATE TABLE NEXT_LOCK_ID (
-  NL_NEXT NUMBER(19) NOT NULL
-);
-INSERT INTO NEXT_LOCK_ID VALUES(1);
-
-CREATE TABLE COMPACTION_QUEUE (
-  CQ_ID NUMBER(19) PRIMARY KEY,
-  CQ_DATABASE varchar(128) NOT NULL,
-  CQ_TABLE varchar(128) NOT NULL,
-  CQ_PARTITION varchar(767),
-  CQ_STATE char(1) NOT NULL,
-  CQ_TYPE char(1) NOT NULL,
-  CQ_WORKER_ID varchar(128),
-  CQ_START NUMBER(19),
-  CQ_RUN_AS varchar(128)
-);
-
-CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
-  NCQ_NEXT NUMBER(19) NOT NULL
-);
-INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
-
-
--- -----------------------------------------------------------------
--- Record schema version. Should be the last step in the init script
--- -----------------------------------------------------------------
-INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.13.0', 'Hive release version 0.13.0');
-

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.13.0.postgres.sql
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.13.0.postgres.sql b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.13.0.postgres.sql
deleted file mode 100755
index 7b886e1..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.13.0.postgres.sql
+++ /dev/null
@@ -1,1538 +0,0 @@
---
--- PostgreSQL database dump
---
-
-SET statement_timeout = 0;
-SET client_encoding = 'UTF8';
-SET standard_conforming_strings = off;
-SET check_function_bodies = false;
-SET client_min_messages = warning;
-SET escape_string_warning = off;
-
-SET search_path = public, pg_catalog;
-
-SET default_tablespace = '';
-
-SET default_with_oids = false;
-
---
--- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "BUCKETING_COLS" (
-    "SD_ID" bigint NOT NULL,
-    "BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "CDS" (
-    "CD_ID" bigint NOT NULL
-);
-
-
---
--- Name: COLUMNS_OLD; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "COLUMNS_OLD" (
-    "SD_ID" bigint NOT NULL,
-    "COMMENT" character varying(256) DEFAULT NULL::character varying,
-    "COLUMN_NAME" character varying(128) NOT NULL,
-    "TYPE_NAME" character varying(4000) NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "COLUMNS_V2" (
-    "CD_ID" bigint NOT NULL,
-    "COMMENT" character varying(4000),
-    "COLUMN_NAME" character varying(128) NOT NULL,
-    "TYPE_NAME" character varying(4000),
-    "INTEGER_IDX" integer NOT NULL
-);
-
-
---
--- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "DATABASE_PARAMS" (
-    "DB_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(180) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "DBS" (
-    "DB_ID" bigint NOT NULL,
-    "DESC" character varying(4000) DEFAULT NULL::character varying,
-    "DB_LOCATION_URI" character varying(4000) NOT NULL,
-    "NAME" character varying(128) DEFAULT NULL::character varying,
-    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
-    "OWNER_TYPE" character varying(10) DEFAULT NULL::character varying
-);
-
-
---
--- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "DB_PRIVS" (
-    "DB_GRANT_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "DB_ID" bigint,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "DB_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "GLOBAL_PRIVS" (
-    "USER_GRANT_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "USER_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "IDXS" (
-    "INDEX_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "DEFERRED_REBUILD" boolean NOT NULL,
-    "INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character varying,
-    "INDEX_NAME" character varying(128) DEFAULT NULL::character varying,
-    "INDEX_TBL_ID" bigint,
-    "LAST_ACCESS_TIME" bigint NOT NULL,
-    "ORIG_TBL_ID" bigint,
-    "SD_ID" bigint
-);
-
-
---
--- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "INDEX_PARAMS" (
-    "INDEX_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "NUCLEUS_TABLES" (
-    "CLASS_NAME" character varying(128) NOT NULL,
-    "TABLE_NAME" character varying(128) NOT NULL,
-    "TYPE" character varying(4) NOT NULL,
-    "OWNER" character varying(2) NOT NULL,
-    "VERSION" character varying(20) NOT NULL,
-    "INTERFACE_NAME" character varying(255) DEFAULT NULL::character varying
-);
-
-
---
--- Name: PARTITIONS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITIONS" (
-    "PART_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "LAST_ACCESS_TIME" bigint NOT NULL,
-    "PART_NAME" character varying(767) DEFAULT NULL::character varying,
-    "SD_ID" bigint,
-    "TBL_ID" bigint
-);
-
-
---
--- Name: PARTITION_EVENTS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITION_EVENTS" (
-    "PART_NAME_ID" bigint NOT NULL,
-    "DB_NAME" character varying(128),
-    "EVENT_TIME" bigint NOT NULL,
-    "EVENT_TYPE" integer NOT NULL,
-    "PARTITION_NAME" character varying(767),
-    "TBL_NAME" character varying(128)
-);
-
-
---
--- Name: PARTITION_KEYS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITION_KEYS" (
-    "TBL_ID" bigint NOT NULL,
-    "PKEY_COMMENT" character varying(4000) DEFAULT NULL::character varying,
-    "PKEY_NAME" character varying(128) NOT NULL,
-    "PKEY_TYPE" character varying(767) NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: PARTITION_KEY_VALS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITION_KEY_VALS" (
-    "PART_ID" bigint NOT NULL,
-    "PART_KEY_VAL" character varying(256) DEFAULT NULL::character varying,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: PARTITION_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITION_PARAMS" (
-    "PART_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: PART_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PART_COL_PRIVS" (
-    "PART_COLUMN_GRANT_ID" bigint NOT NULL,
-    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PART_ID" bigint,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PART_COL_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: PART_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PART_PRIVS" (
-    "PART_GRANT_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PART_ID" bigint,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PART_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: ROLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "ROLES" (
-    "ROLE_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
-    "ROLE_NAME" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: ROLE_MAP; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "ROLE_MAP" (
-    "ROLE_GRANT_ID" bigint NOT NULL,
-    "ADD_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "ROLE_ID" bigint
-);
-
-
---
--- Name: SDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SDS" (
-    "SD_ID" bigint NOT NULL,
-    "INPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
-    "IS_COMPRESSED" boolean NOT NULL,
-    "LOCATION" character varying(4000) DEFAULT NULL::character varying,
-    "NUM_BUCKETS" bigint NOT NULL,
-    "OUTPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
-    "SERDE_ID" bigint,
-    "CD_ID" bigint,
-    "IS_STOREDASSUBDIRECTORIES" boolean NOT NULL
-);
-
-
---
--- Name: SD_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SD_PARAMS" (
-    "SD_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: SEQUENCE_TABLE; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SEQUENCE_TABLE" (
-    "SEQUENCE_NAME" character varying(255) NOT NULL,
-    "NEXT_VAL" bigint NOT NULL
-);
-
-
---
--- Name: SERDES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SERDES" (
-    "SERDE_ID" bigint NOT NULL,
-    "NAME" character varying(128) DEFAULT NULL::character varying,
-    "SLIB" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: SERDE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SERDE_PARAMS" (
-    "SERDE_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: SORT_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SORT_COLS" (
-    "SD_ID" bigint NOT NULL,
-    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
-    "ORDER" bigint NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: TABLE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TABLE_PARAMS" (
-    "TBL_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: TBLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TBLS" (
-    "TBL_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "DB_ID" bigint,
-    "LAST_ACCESS_TIME" bigint NOT NULL,
-    "OWNER" character varying(767) DEFAULT NULL::character varying,
-    "RETENTION" bigint NOT NULL,
-    "SD_ID" bigint,
-    "TBL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "VIEW_EXPANDED_TEXT" text,
-    "VIEW_ORIGINAL_TEXT" text
-);
-
-
---
--- Name: TBL_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TBL_COL_PRIVS" (
-    "TBL_COLUMN_GRANT_ID" bigint NOT NULL,
-    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "TBL_COL_PRIV" character varying(128) DEFAULT NULL::character varying,
-    "TBL_ID" bigint
-);
-
-
---
--- Name: TBL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TBL_PRIVS" (
-    "TBL_GRANT_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "TBL_PRIV" character varying(128) DEFAULT NULL::character varying,
-    "TBL_ID" bigint
-);
-
-
---
--- Name: TYPES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TYPES" (
-    "TYPES_ID" bigint NOT NULL,
-    "TYPE_NAME" character varying(128) DEFAULT NULL::character varying,
-    "TYPE1" character varying(767) DEFAULT NULL::character varying,
-    "TYPE2" character varying(767) DEFAULT NULL::character varying
-);
-
-
---
--- Name: TYPE_FIELDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TYPE_FIELDS" (
-    "TYPE_NAME" bigint NOT NULL,
-    "COMMENT" character varying(256) DEFAULT NULL::character varying,
-    "FIELD_NAME" character varying(128) NOT NULL,
-    "FIELD_TYPE" character varying(767) NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-CREATE TABLE "SKEWED_STRING_LIST" (
-    "STRING_LIST_ID" bigint NOT NULL
-);
-
-CREATE TABLE "SKEWED_STRING_LIST_VALUES" (
-    "STRING_LIST_ID" bigint NOT NULL,
-    "STRING_LIST_VALUE" character varying(256) DEFAULT NULL::character varying,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-CREATE TABLE "SKEWED_COL_NAMES" (
-    "SD_ID" bigint NOT NULL,
-    "SKEWED_COL_NAME" character varying(256) DEFAULT NULL::character varying,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-CREATE TABLE "SKEWED_COL_VALUE_LOC_MAP" (
-    "SD_ID" bigint NOT NULL,
-    "STRING_LIST_ID_KID" bigint NOT NULL,
-    "LOCATION" character varying(4000) DEFAULT NULL::character varying
-);
-
-CREATE TABLE "SKEWED_VALUES" (
-    "SD_ID_OID" bigint NOT NULL,
-    "STRING_LIST_ID_EID" bigint NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: TAB_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE  "MASTER_KEYS"
-(
-    "KEY_ID" SERIAL,
-    "MASTER_KEY" varchar(767) NULL,
-    PRIMARY KEY ("KEY_ID")
-);
-
-CREATE TABLE  "DELEGATION_TOKENS"
-(
-    "TOKEN_IDENT" varchar(767) NOT NULL,
-    "TOKEN" varchar(767) NULL,
-    PRIMARY KEY ("TOKEN_IDENT")
-);
-
-CREATE TABLE "TAB_COL_STATS" (
- "CS_ID" bigint NOT NULL,
- "DB_NAME" character varying(128) DEFAULT NULL::character varying,
- "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
- "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
- "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
- "TBL_ID" bigint NOT NULL,
- "LONG_LOW_VALUE" bigint,
- "LONG_HIGH_VALUE" bigint,
- "DOUBLE_LOW_VALUE" double precision,
- "DOUBLE_HIGH_VALUE" double precision,
- "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
- "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
- "NUM_NULLS" bigint NOT NULL,
- "NUM_DISTINCTS" bigint,
- "AVG_COL_LEN" double precision,
- "MAX_COL_LEN" bigint,
- "NUM_TRUES" bigint,
- "NUM_FALSES" bigint,
- "LAST_ANALYZED" bigint NOT NULL
-);
-
---
--- Table structure for VERSION
---
-CREATE TABLE "VERSION" (
-  "VER_ID" bigint,
-  "SCHEMA_VERSION" character varying(127) NOT NULL,
-  "VERSION_COMMENT" character varying(255) NOT NULL
-);
-
---
--- Name: PART_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PART_COL_STATS" (
- "CS_ID" bigint NOT NULL,
- "DB_NAME" character varying(128) DEFAULT NULL::character varying,
- "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
- "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying,
- "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
- "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
- "PART_ID" bigint NOT NULL,
- "LONG_LOW_VALUE" bigint,
- "LONG_HIGH_VALUE" bigint,
- "DOUBLE_LOW_VALUE" double precision,
- "DOUBLE_HIGH_VALUE" double precision,
- "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
- "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
- "NUM_NULLS" bigint NOT NULL,
- "NUM_DISTINCTS" bigint,
- "AVG_COL_LEN" double precision,
- "MAX_COL_LEN" bigint,
- "NUM_TRUES" bigint,
- "NUM_FALSES" bigint,
- "LAST_ANALYZED" bigint NOT NULL
-);
-
---
--- Table structure for FUNCS
---
-CREATE TABLE "FUNCS" (
-  "FUNC_ID" BIGINT NOT NULL,
-  "CLASS_NAME" VARCHAR(4000),
-  "CREATE_TIME" INTEGER NOT NULL,
-  "DB_ID" BIGINT,
-  "FUNC_NAME" VARCHAR(128),
-  "FUNC_TYPE" INTEGER NOT NULL,
-  "OWNER_NAME" VARCHAR(128),
-  "OWNER_TYPE" VARCHAR(10),
-  PRIMARY KEY ("FUNC_ID")
-);
-
---
--- Table structure for FUNC_RU
---
-CREATE TABLE "FUNC_RU" (
-  "FUNC_ID" BIGINT NOT NULL,
-  "RESOURCE_TYPE" INTEGER NOT NULL,
-  "RESOURCE_URI" VARCHAR(4000),
-  "INTEGER_IDX" INTEGER NOT NULL,
-  PRIMARY KEY ("FUNC_ID", "INTEGER_IDX")
-);
-
---
--- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "BUCKETING_COLS"
-    ADD CONSTRAINT "BUCKETING_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-
---
--- Name: CDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "CDS"
-    ADD CONSTRAINT "CDS_pkey" PRIMARY KEY ("CD_ID");
-
-
---
--- Name: COLUMNS_V2_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "COLUMNS_V2"
-    ADD CONSTRAINT "COLUMNS_V2_pkey" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
-
-
---
--- Name: COLUMNS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "COLUMNS_OLD"
-    ADD CONSTRAINT "COLUMNS_pkey" PRIMARY KEY ("SD_ID", "COLUMN_NAME");
-
-
---
--- Name: DATABASE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DATABASE_PARAMS"
-    ADD CONSTRAINT "DATABASE_PARAMS_pkey" PRIMARY KEY ("DB_ID", "PARAM_KEY");
-
-
---
--- Name: DBPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DB_PRIVS"
-    ADD CONSTRAINT "DBPRIVILEGEINDEX" UNIQUE ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: DBS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DBS"
-    ADD CONSTRAINT "DBS_pkey" PRIMARY KEY ("DB_ID");
-
-
---
--- Name: DB_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DB_PRIVS"
-    ADD CONSTRAINT "DB_PRIVS_pkey" PRIMARY KEY ("DB_GRANT_ID");
-
-
---
--- Name: GLOBALPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "GLOBAL_PRIVS"
-    ADD CONSTRAINT "GLOBALPRIVILEGEINDEX" UNIQUE ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: GLOBAL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "GLOBAL_PRIVS"
-    ADD CONSTRAINT "GLOBAL_PRIVS_pkey" PRIMARY KEY ("USER_GRANT_ID");
-
-
---
--- Name: IDXS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "IDXS_pkey" PRIMARY KEY ("INDEX_ID");
-
-
---
--- Name: INDEX_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "INDEX_PARAMS"
-    ADD CONSTRAINT "INDEX_PARAMS_pkey" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
-
-
---
--- Name: NUCLEUS_TABLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "NUCLEUS_TABLES"
-    ADD CONSTRAINT "NUCLEUS_TABLES_pkey" PRIMARY KEY ("CLASS_NAME");
-
-
---
--- Name: PARTITIONS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITIONS"
-    ADD CONSTRAINT "PARTITIONS_pkey" PRIMARY KEY ("PART_ID");
-
-
---
--- Name: PARTITION_EVENTS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITION_EVENTS"
-    ADD CONSTRAINT "PARTITION_EVENTS_pkey" PRIMARY KEY ("PART_NAME_ID");
-
-
---
--- Name: PARTITION_KEYS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITION_KEYS"
-    ADD CONSTRAINT "PARTITION_KEYS_pkey" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
-
-
---
--- Name: PARTITION_KEY_VALS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITION_KEY_VALS"
-    ADD CONSTRAINT "PARTITION_KEY_VALS_pkey" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
-
-
---
--- Name: PARTITION_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITION_PARAMS"
-    ADD CONSTRAINT "PARTITION_PARAMS_pkey" PRIMARY KEY ("PART_ID", "PARAM_KEY");
-
-
---
--- Name: PART_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PART_COL_PRIVS"
-    ADD CONSTRAINT "PART_COL_PRIVS_pkey" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
-
-
---
--- Name: PART_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PART_PRIVS"
-    ADD CONSTRAINT "PART_PRIVS_pkey" PRIMARY KEY ("PART_GRANT_ID");
-
-
---
--- Name: ROLEENTITYINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "ROLES"
-    ADD CONSTRAINT "ROLEENTITYINDEX" UNIQUE ("ROLE_NAME");
-
-
---
--- Name: ROLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "ROLES"
-    ADD CONSTRAINT "ROLES_pkey" PRIMARY KEY ("ROLE_ID");
-
-
---
--- Name: ROLE_MAP_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "ROLE_MAP"
-    ADD CONSTRAINT "ROLE_MAP_pkey" PRIMARY KEY ("ROLE_GRANT_ID");
-
-
---
--- Name: SDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SDS"
-    ADD CONSTRAINT "SDS_pkey" PRIMARY KEY ("SD_ID");
-
-
---
--- Name: SD_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SD_PARAMS"
-    ADD CONSTRAINT "SD_PARAMS_pkey" PRIMARY KEY ("SD_ID", "PARAM_KEY");
-
-
---
--- Name: SEQUENCE_TABLE_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SEQUENCE_TABLE"
-    ADD CONSTRAINT "SEQUENCE_TABLE_pkey" PRIMARY KEY ("SEQUENCE_NAME");
-
-
---
--- Name: SERDES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SERDES"
-    ADD CONSTRAINT "SERDES_pkey" PRIMARY KEY ("SERDE_ID");
-
-
---
--- Name: SERDE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SERDE_PARAMS"
-    ADD CONSTRAINT "SERDE_PARAMS_pkey" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
-
-
---
--- Name: SORT_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SORT_COLS"
-    ADD CONSTRAINT "SORT_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-
---
--- Name: TABLE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TABLE_PARAMS"
-    ADD CONSTRAINT "TABLE_PARAMS_pkey" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
-
-
---
--- Name: TBLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TBLS"
-    ADD CONSTRAINT "TBLS_pkey" PRIMARY KEY ("TBL_ID");
-
-
---
--- Name: TBL_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TBL_COL_PRIVS"
-    ADD CONSTRAINT "TBL_COL_PRIVS_pkey" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
-
-
---
--- Name: TBL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TBL_PRIVS"
-    ADD CONSTRAINT "TBL_PRIVS_pkey" PRIMARY KEY ("TBL_GRANT_ID");
-
-
---
--- Name: TYPES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TYPES"
-    ADD CONSTRAINT "TYPES_pkey" PRIMARY KEY ("TYPES_ID");
-
-
---
--- Name: TYPE_FIELDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TYPE_FIELDS"
-    ADD CONSTRAINT "TYPE_FIELDS_pkey" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
-
-ALTER TABLE ONLY "SKEWED_STRING_LIST"
-    ADD CONSTRAINT "SKEWED_STRING_LIST_pkey" PRIMARY KEY ("STRING_LIST_ID");
-
-ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
-    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_pkey" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
-
-
-ALTER TABLE ONLY "SKEWED_COL_NAMES"
-    ADD CONSTRAINT "SKEWED_COL_NAMES_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
-    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_pkey" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
-
-ALTER TABLE ONLY "SKEWED_VALUES"
-    ADD CONSTRAINT "SKEWED_VALUES_pkey" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
-
---
--- Name: TAB_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_pkey" PRIMARY KEY("CS_ID");
-
---
--- Name: PART_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY KEY("CS_ID");
-
---
--- Name: UNIQUEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "UNIQUEINDEX" UNIQUE ("INDEX_NAME", "ORIG_TBL_ID");
-
-
---
--- Name: UNIQUEPARTITION; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITIONS"
-    ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("PART_NAME", "TBL_ID");
-
-
---
--- Name: UNIQUETABLE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TBLS"
-    ADD CONSTRAINT "UNIQUETABLE" UNIQUE ("TBL_NAME", "DB_ID");
-
-
---
--- Name: UNIQUE_DATABASE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DBS"
-    ADD CONSTRAINT "UNIQUE_DATABASE" UNIQUE ("NAME");
-
-
---
--- Name: UNIQUE_TYPE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TYPES"
-    ADD CONSTRAINT "UNIQUE_TYPE" UNIQUE ("TYPE_NAME");
-
-
---
--- Name: USERROLEMAPINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "ROLE_MAP"
-    ADD CONSTRAINT "USERROLEMAPINDEX" UNIQUE ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "BUCKETING_COLS_N49" ON "BUCKETING_COLS" USING btree ("SD_ID");
-
-
---
--- Name: COLUMNS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "COLUMNS_N49" ON "COLUMNS_OLD" USING btree ("SD_ID");
-
-
---
--- Name: DATABASE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "DATABASE_PARAMS_N49" ON "DATABASE_PARAMS" USING btree ("DB_ID");
-
-
---
--- Name: DB_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "DB_PRIVS_N49" ON "DB_PRIVS" USING btree ("DB_ID");
-
-
---
--- Name: IDXS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "IDXS_N49" ON "IDXS" USING btree ("ORIG_TBL_ID");
-
-
---
--- Name: IDXS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "IDXS_N50" ON "IDXS" USING btree ("INDEX_TBL_ID");
-
-
---
--- Name: IDXS_N51; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "IDXS_N51" ON "IDXS" USING btree ("SD_ID");
-
-
---
--- Name: INDEX_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "INDEX_PARAMS_N49" ON "INDEX_PARAMS" USING btree ("INDEX_ID");
-
-
---
--- Name: PARTITIONCOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITIONCOLUMNPRIVILEGEINDEX" ON "PART_COL_PRIVS" USING btree ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: PARTITIONEVENTINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITIONEVENTINDEX" ON "PARTITION_EVENTS" USING btree ("PARTITION_NAME");
-
-
---
--- Name: PARTITIONS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITIONS_N49" ON "PARTITIONS" USING btree ("TBL_ID");
-
-
---
--- Name: PARTITIONS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITIONS_N50" ON "PARTITIONS" USING btree ("SD_ID");
-
-
---
--- Name: PARTITION_KEYS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITION_KEYS_N49" ON "PARTITION_KEYS" USING btree ("TBL_ID");
-
-
---
--- Name: PARTITION_KEY_VALS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITION_KEY_VALS_N49" ON "PARTITION_KEY_VALS" USING btree ("PART_ID");
-
-
---
--- Name: PARTITION_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITION_PARAMS_N49" ON "PARTITION_PARAMS" USING btree ("PART_ID");
-
-
---
--- Name: PARTPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTPRIVILEGEINDEX" ON "PART_PRIVS" USING btree ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: PART_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PART_COL_PRIVS_N49" ON "PART_COL_PRIVS" USING btree ("PART_ID");
-
-
---
--- Name: PART_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID");
-
-
---
--- Name: ROLE_MAP_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "ROLE_MAP_N49" ON "ROLE_MAP" USING btree ("ROLE_ID");
-
-
---
--- Name: SDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "SDS_N49" ON "SDS" USING btree ("SERDE_ID");
-
-
---
--- Name: SD_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "SD_PARAMS_N49" ON "SD_PARAMS" USING btree ("SD_ID");
-
-
---
--- Name: SERDE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "SERDE_PARAMS_N49" ON "SERDE_PARAMS" USING btree ("SERDE_ID");
-
-
---
--- Name: SORT_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "SORT_COLS_N49" ON "SORT_COLS" USING btree ("SD_ID");
-
-
---
--- Name: TABLECOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TABLECOLUMNPRIVILEGEINDEX" ON "TBL_COL_PRIVS" USING btree ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: TABLEPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TABLEPRIVILEGEINDEX" ON "TBL_PRIVS" USING btree ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: TABLE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TABLE_PARAMS_N49" ON "TABLE_PARAMS" USING btree ("TBL_ID");
-
-
---
--- Name: TBLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TBLS_N49" ON "TBLS" USING btree ("DB_ID");
-
-
---
--- Name: TBLS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TBLS_N50" ON "TBLS" USING btree ("SD_ID");
-
-
---
--- Name: TBL_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TBL_COL_PRIVS_N49" ON "TBL_COL_PRIVS" USING btree ("TBL_ID");
-
-
---
--- Name: TBL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TBL_PRIVS_N49" ON "TBL_PRIVS" USING btree ("TBL_ID");
-
-
---
--- Name: TYPE_FIELDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TYPE_FIELDS_N49" ON "TYPE_FIELDS" USING btree ("TYPE_NAME");
-
---
--- Name: TAB_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TAB_COL_STATS_N49" ON "TAB_COL_STATS" USING btree ("TBL_ID");
-
---
--- Name: PART_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID");
-
---
--- Name: UNIQUEFUNCTION; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE UNIQUE INDEX "UNIQUEFUNCTION" ON "FUNCS" ("FUNC_NAME", "DB_ID");
-
---
--- Name: FUNCS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "FUNCS_N49" ON "FUNCS" ("DB_ID");
-
---
--- Name: FUNC_RU_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "FUNC_RU_N49" ON "FUNC_RU" ("FUNC_ID");
-
-
-ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
-    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_fkey" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
-
-
-ALTER TABLE ONLY "SKEWED_COL_NAMES"
-    ADD CONSTRAINT "SKEWED_COL_NAMES_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
-ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
-    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey1" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
-    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
-
-ALTER TABLE ONLY "SKEWED_VALUES"
-    ADD CONSTRAINT "SKEWED_VALUES_fkey1" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
-
-ALTER TABLE ONLY "SKEWED_VALUES"
-    ADD CONSTRAINT "SKEWED_VALUES_fkey2" FOREIGN KEY ("SD_ID_OID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: BUCKETING_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "BUCKETING_COLS"
-    ADD CONSTRAINT "BUCKETING_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: COLUMNS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "COLUMNS_OLD"
-    ADD CONSTRAINT "COLUMNS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: COLUMNS_V2_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "COLUMNS_V2"
-    ADD CONSTRAINT "COLUMNS_V2_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
-
-
---
--- Name: DATABASE_PARAMS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "DATABASE_PARAMS"
-    ADD CONSTRAINT "DATABASE_PARAMS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
-
-
---
--- Name: DB_PRIVS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "DB_PRIVS"
-    ADD CONSTRAINT "DB_PRIVS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
-
-
---
--- Name: IDXS_INDEX_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "IDXS_INDEX_TBL_ID_fkey" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: IDXS_ORIG_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "IDXS_ORIG_TBL_ID_fkey" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: IDXS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "IDXS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: INDEX_PARAMS_INDEX_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "INDEX_PARAMS"
-    ADD CONSTRAINT "INDEX_PARAMS_INDEX_ID_fkey" FOREIGN KEY ("INDEX_ID") REFERENCES "IDXS"("INDEX_ID") DEFERRABLE;
-
-
---
--- Name: PARTITIONS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITIONS"
-    ADD CONSTRAINT "PARTITIONS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: PARTITIONS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITIONS"
-    ADD CONSTRAINT "PARTITIONS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: PARTITION_KEYS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITION_KEYS"
-    ADD CONSTRAINT "PARTITION_KEYS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: PARTITION_KEY_VALS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITION_KEY_VALS"
-    ADD CONSTRAINT "PARTITION_KEY_VALS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
---
--- Name: PARTITION_PARAMS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITION_PARAMS"
-    ADD CONSTRAINT "PARTITION_PARAMS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
---
--- Name: PART_COL_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PART_COL_PRIVS"
-    ADD CONSTRAINT "PART_COL_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
---
--- Name: PART_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PART_PRIVS"
-    ADD CONSTRAINT "PART_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
---
--- Name: ROLE_MAP_ROLE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "ROLE_MAP"
-    ADD CONSTRAINT "ROLE_MAP_ROLE_ID_fkey" FOREIGN KEY ("ROLE_ID") REFERENCES "ROLES"("ROLE_ID") DEFERRABLE;
-
-
---
--- Name: SDS_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SDS"
-    ADD CONSTRAINT "SDS_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
-
-
---
--- Name: SDS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SDS"
-    ADD CONSTRAINT "SDS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
-
-
---
--- Name: SD_PARAMS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SD_PARAMS"
-    ADD CONSTRAINT "SD_PARAMS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: SERDE_PARAMS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SERDE_PARAMS"
-    ADD CONSTRAINT "SERDE_PARAMS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
-
-
---
--- Name: SORT_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SORT_COLS"
-    ADD CONSTRAINT "SORT_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: TABLE_PARAMS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TABLE_PARAMS"
-    ADD CONSTRAINT "TABLE_PARAMS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: TBLS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TBLS"
-    ADD CONSTRAINT "TBLS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
-
-
---
--- Name: TBLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TBLS"
-    ADD CONSTRAINT "TBLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: TBL_COL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TBL_COL_PRIVS"
-    ADD CONSTRAINT "TBL_COL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: TBL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TBL_PRIVS"
-    ADD CONSTRAINT "TBL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: TYPE_FIELDS_TYPE_NAME_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TYPE_FIELDS"
-    ADD CONSTRAINT "TYPE_FIELDS_TYPE_NAME_fkey" FOREIGN KEY ("TYPE_NAME") REFERENCES "TYPES"("TYPES_ID") DEFERRABLE;
-
---
--- Name: TAB_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_fkey" FOREIGN KEY("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: PART_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
-ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID");
-
--- Name: FUNCS_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
-ALTER TABLE ONLY "FUNCS"
-    ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "DBS" ("DB_ID") DEFERRABLE;
-
--- Name: FUNC_RU_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
-ALTER TABLE ONLY "FUNC_RU"
-    ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "FUNCS" ("FUNC_ID") DEFERRABLE;
-
---
--- Name: public; Type: ACL; Schema: -; Owner: hiveuser
---
-
-REVOKE ALL ON SCHEMA public FROM PUBLIC;
-GRANT ALL ON SCHEMA public TO PUBLIC;
-
-
---
--- PostgreSQL database dump complete
---
-
--- -----------------------------------------------------------------------------------------------------------------------------------------------
--- Transaction and lock tables
--- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file.
--- -----------------------------------------------------------------------------------------------------------------------------------------------
-
-CREATE TABLE TXNS (
-  TXN_ID bigint PRIMARY KEY,
-  TXN_STATE char(1) NOT NULL,
-  TXN_STARTED bigint NOT NULL,
-  TXN_LAST_HEARTBEAT bigint NOT NULL,
-  TXN_USER varchar(128) NOT NULL,
-  TXN_HOST varchar(128) NOT NULL
-);
-
-CREATE TABLE TXN_COMPONENTS (
-  TC_TXNID bigint REFERENCES TXNS (TXN_ID),
-  TC_DATABASE varchar(128) NOT NULL,
-  TC_TABLE varchar(128),
-  TC_PARTITION varchar(767) DEFAULT NULL
-);
-
-CREATE TABLE COMPLETED_TXN_COMPONENTS (
-  CTC_TXNID bigint,
-  CTC_DATABASE varchar(128) NOT NULL,
-  CTC_TABLE varchar(128),
-  CTC_PARTITION varchar(767)
-);
-
-CREATE TABLE NEXT_TXN_ID (
-  NTXN_NEXT bigint NOT NULL
-);
-INSERT INTO NEXT_TXN_ID VALUES(1);
-
-CREATE TABLE HIVE_LOCKS (
-  HL_LOCK_EXT_ID bigint NOT NULL,
-  HL_LOCK_INT_ID bigint NOT NULL,
-  HL_TXNID bigint,
-  HL_DB varchar(128) NOT NULL,
-  HL_TABLE varchar(128),
-  HL_PARTITION varchar(767) DEFAULT NULL,
-  HL_LOCK_STATE char(1) NOT NULL,
-  HL_LOCK_TYPE char(1) NOT NULL,
-  HL_LAST_HEARTBEAT bigint NOT NULL,
-  HL_ACQUIRED_AT bigint,
-  HL_USER varchar(128) NOT NULL,
-  HL_HOST varchar(128) NOT NULL,
-  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
-); 
-
-CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS USING hash (HL_TXNID);
-
-CREATE TABLE NEXT_LOCK_ID (
-  NL_NEXT bigint NOT NULL
-);
-INSERT INTO NEXT_LOCK_ID VALUES(1);
-
-CREATE TABLE COMPACTION_QUEUE (
-  CQ_ID bigint PRIMARY KEY,
-  CQ_DATABASE varchar(128) NOT NULL,
-  CQ_TABLE varchar(128) NOT NULL,
-  CQ_PARTITION varchar(767),
-  CQ_STATE char(1) NOT NULL,
-  CQ_TYPE char(1) NOT NULL,
-  CQ_WORKER_ID varchar(128),
-  CQ_START bigint,
-  CQ_RUN_AS varchar(128)
-);
-
-CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
-  NCQ_NEXT bigint NOT NULL
-);
-INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
-
-
--- -----------------------------------------------------------------
--- Record schema version. Should be the last step in the init script
--- -----------------------------------------------------------------
-INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '0.13.0', 'Hive release version 0.13.0');
-

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/upgrade-0.12.0-to-0.13.0.oracle.sql
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/upgrade-0.12.0-to-0.13.0.oracle.sql b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/upgrade-0.12.0-to-0.13.0.oracle.sql
deleted file mode 100755
index d08b985..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/upgrade-0.12.0-to-0.13.0.oracle.sql
+++ /dev/null
@@ -1,165 +0,0 @@
-SELECT 'Upgrading MetaStore schema from 0.12.0 to 0.13.0' AS Status from dual;
-
--- 15-HIVE-5700.oracle.sql
--- Normalize the date partition column values as best we can. No schema changes.
-
-CREATE FUNCTION hive13_to_date(date_str IN VARCHAR2) RETURN DATE IS dt DATE; BEGIN dt := TO_DATE(date_str, 'YYYY-MM-DD'); RETURN dt; EXCEPTION WHEN others THEN RETURN null; END;/
-
-MERGE INTO PARTITION_KEY_VALS
-USING (
-  SELECT SRC.PART_ID as IPART_ID, SRC.INTEGER_IDX as IINTEGER_IDX, 
-     NVL(TO_CHAR(hive13_to_date(PART_KEY_VAL),'YYYY-MM-DD'), PART_KEY_VAL) as NORM
-  FROM PARTITION_KEY_VALS SRC
-    INNER JOIN PARTITIONS ON SRC.PART_ID = PARTITIONS.PART_ID
-    INNER JOIN PARTITION_KEYS ON PARTITION_KEYS.TBL_ID = PARTITIONS.TBL_ID
-      AND PARTITION_KEYS.INTEGER_IDX = SRC.INTEGER_IDX AND PARTITION_KEYS.PKEY_TYPE = 'date'
-) ON (IPART_ID = PARTITION_KEY_VALS.PART_ID AND IINTEGER_IDX = PARTITION_KEY_VALS.INTEGER_IDX)
-WHEN MATCHED THEN UPDATE SET PART_KEY_VAL = NORM;
-
-DROP FUNCTION hive13_to_date;
-
--- 16-HIVE-6386.oracle.sql
-ALTER TABLE DBS ADD OWNER_NAME VARCHAR2(128);
-ALTER TABLE DBS ADD OWNER_TYPE VARCHAR2(10);
-
--- 17-HIVE-6458.oracle.sql
-CREATE TABLE FUNCS (
-  FUNC_ID NUMBER NOT NULL,
-  CLASS_NAME VARCHAR2(4000),
-  CREATE_TIME NUMBER(10) NOT NULL,
-  DB_ID NUMBER,
-  FUNC_NAME VARCHAR2(128),
-  FUNC_TYPE NUMBER(10) NOT NULL,
-  OWNER_NAME VARCHAR2(128),
-  OWNER_TYPE VARCHAR2(10)
-);
-
-ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID);
-ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED;
-CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME, DB_ID);
-CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID);
-
-CREATE TABLE FUNC_RU (
-  FUNC_ID NUMBER NOT NULL,
-  RESOURCE_TYPE NUMBER(10) NOT NULL,
-  RESOURCE_URI VARCHAR2(4000),
-  INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID, INTEGER_IDX);
-ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) INITIALLY DEFERRED;
-CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID);
-
--- 18-HIVE-6757.oracle.sql
-UPDATE SDS
-  SET INPUT_FORMAT = 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat'
-WHERE
-  INPUT_FORMAT= 'parquet.hive.DeprecatedParquetInputFormat' or
-  INPUT_FORMAT = 'parquet.hive.MapredParquetInputFormat'
-;
-
-UPDATE SDS
-  SET OUTPUT_FORMAT = 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'
-WHERE
-  OUTPUT_FORMAT = 'parquet.hive.DeprecatedParquetOutputFormat'  or
-  OUTPUT_FORMAT = 'parquet.hive.MapredParquetOutputFormat'
-;
-
-UPDATE SERDES
-  SET SLIB='org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe'
-WHERE
-  SLIB = 'parquet.hive.serde.ParquetHiveSerDe'
-;
-
--- hive-txn-schema-0.13.0.oracle.sql
-
--- Licensed to the Apache Software Foundation (ASF) under one or more
--- contributor license agreements.  See the NOTICE file distributed with
--- this work for additional information regarding copyright ownership.
--- The ASF licenses this file to You under the Apache License, Version 2.0
--- (the License); you may not use this file except in compliance with
--- the License.  You may obtain a copy of the License at
---
---     http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an AS IS BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
-
---
--- Tables for transaction management
--- 
-
-CREATE TABLE TXNS (
-  TXN_ID NUMBER(19) PRIMARY KEY,
-  TXN_STATE char(1) NOT NULL,
-  TXN_STARTED NUMBER(19) NOT NULL,
-  TXN_LAST_HEARTBEAT NUMBER(19) NOT NULL,
-  TXN_USER varchar(128) NOT NULL,
-  TXN_HOST varchar(128) NOT NULL
-);
-
-CREATE TABLE TXN_COMPONENTS (
-  TC_TXNID NUMBER(19) REFERENCES TXNS (TXN_ID),
-  TC_DATABASE VARCHAR2(128) NOT NULL,
-  TC_TABLE VARCHAR2(128),
-  TC_PARTITION VARCHAR2(767) NULL
-);
-
-CREATE TABLE COMPLETED_TXN_COMPONENTS (
-  CTC_TXNID NUMBER(19),
-  CTC_DATABASE varchar(128) NOT NULL,
-  CTC_TABLE varchar(128),
-  CTC_PARTITION varchar(767)
-);
-
-CREATE TABLE NEXT_TXN_ID (
-  NTXN_NEXT NUMBER(19) NOT NULL
-);
-INSERT INTO NEXT_TXN_ID VALUES(1);
-
-CREATE TABLE HIVE_LOCKS (
-  HL_LOCK_EXT_ID NUMBER(19) NOT NULL,
-  HL_LOCK_INT_ID NUMBER(19) NOT NULL,
-  HL_TXNID NUMBER(19),
-  HL_DB VARCHAR2(128) NOT NULL,
-  HL_TABLE VARCHAR2(128),
-  HL_PARTITION VARCHAR2(767),
-  HL_LOCK_STATE CHAR(1) NOT NULL,
-  HL_LOCK_TYPE CHAR(1) NOT NULL,
-  HL_LAST_HEARTBEAT NUMBER(19) NOT NULL,
-  HL_ACQUIRED_AT NUMBER(19),
-  HL_USER varchar(128) NOT NULL,
-  HL_HOST varchar(128) NOT NULL,
-  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
-); 
-
-CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID);
-
-CREATE TABLE NEXT_LOCK_ID (
-  NL_NEXT NUMBER(19) NOT NULL
-);
-INSERT INTO NEXT_LOCK_ID VALUES(1);
-
-CREATE TABLE COMPACTION_QUEUE (
-  CQ_ID NUMBER(19) PRIMARY KEY,
-  CQ_DATABASE varchar(128) NOT NULL,
-  CQ_TABLE varchar(128) NOT NULL,
-  CQ_PARTITION varchar(767),
-  CQ_STATE char(1) NOT NULL,
-  CQ_TYPE char(1) NOT NULL,
-  CQ_WORKER_ID varchar(128),
-  CQ_START NUMBER(19),
-  CQ_RUN_AS varchar(128)
-);
-
-CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
-  NCQ_NEXT NUMBER(19) NOT NULL
-);
-INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
-
-
-UPDATE VERSION SET SCHEMA_VERSION='0.13.0', VERSION_COMMENT='Hive release version 0.13.0' where VER_ID=1;
-SELECT 'Finished upgrading MetaStore schema from 0.12.0 to 0.13.0' AS Status from dual;

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/upgrade-0.13.0.oracle.sql
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/upgrade-0.13.0.oracle.sql b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/upgrade-0.13.0.oracle.sql
deleted file mode 100755
index b34f406..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/upgrade-0.13.0.oracle.sql
+++ /dev/null
@@ -1,38 +0,0 @@
-ALTER TABLE TXNS MODIFY (
-  TXN_ID NUMBER(19),
-  TXN_STARTED NUMBER(19),
-  TXN_LAST_HEARTBEAT NUMBER(19)
-);
-
-ALTER TABLE TXN_COMPONENTS MODIFY (
-  TC_TXNID NUMBER(19)
-);
-
-ALTER TABLE COMPLETED_TXN_COMPONENTS MODIFY (
-  CTC_TXNID NUMBER(19)
-);
-
-ALTER TABLE NEXT_TXN_ID MODIFY (
-  NTXN_NEXT NUMBER(19)
-);
-
-ALTER TABLE HIVE_LOCKS MODIFY (
-  HL_LOCK_EXT_ID NUMBER(19),
-  HL_LOCK_INT_ID NUMBER(19),
-  HL_TXNID NUMBER(19),
-  HL_LAST_HEARTBEAT NUMBER(19),
-  HL_ACQUIRED_AT NUMBER(19)
-);
-
-ALTER TABLE NEXT_LOCK_ID MODIFY (
-  NL_NEXT NUMBER(19)
-);
-
-ALTER TABLE COMPACTION_QUEUE MODIFY (
-  CQ_ID NUMBER(19),
-  CQ_START NUMBER(19)
-);
-
-ALTER TABLE NEXT_COMPACTION_QUEUE_ID MODIFY (
-  NCQ_NEXT NUMBER(19)
-);

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/kerberos.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/kerberos.json b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/kerberos.json
deleted file mode 100755
index 4b31f7a..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/kerberos.json
+++ /dev/null
@@ -1,132 +0,0 @@
-{
-  "services": [
-    {
-      "name": "HIVE",
-      "identities": [
-        {
-          "name": "/spnego"
-        },
-        {
-          "name": "/smokeuser"
-        }
-      ],
-      "configurations": [
-        {
-          "hive-site": {
-            "hive.metastore.sasl.enabled": "true",
-            "hive.server2.authentication": "KERBEROS"
-          }
-        },
-        {
-          "webhcat-site": {
-            "templeton.kerberos.secret": "secret",
-            "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=${clusterHostInfo/hive_metastore_host|each(thrift://%s:9083, \\\\,, \\s*\\,\\s*)},hive.metastore.sasl.enabled=true,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse,hive.exec.mode.local.auto=false,hive.metastore.kerberos.principal=hive/_HOST@${realm}"
-          }
-        },
-        {
-          "core-site": {
-            "hadoop.proxyuser.HTTP.hosts": "${clusterHostInfo/webhcat_server_host}"
-          }
-        },
-        {
-          "ranger-hive-audit": {
-            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
-            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
-            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
-            "xasecure.audit.jaas.Client.option.storeKey": "false",
-            "xasecure.audit.jaas.Client.option.serviceName": "solr",
-            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
-          }
-        }
-      ],
-      "components": [
-        {
-          "name": "HIVE_METASTORE",
-          "identities": [
-            {
-              "name": "/HIVE/HIVE_SERVER/hive_server_hive",
-              "principal": {
-                "configuration": "hive-site/hive.metastore.kerberos.principal"
-              },
-              "keytab": {
-                "configuration": "hive-site/hive.metastore.kerberos.keytab.file"
-              }
-            }
-          ]
-        },
-        {
-          "name": "HIVE_SERVER",
-          "identities": [
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            },
-            {
-              "name": "hive_server_hive",
-              "principal": {
-                "value": "hive/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "hive-site/hive.server2.authentication.kerberos.principal",
-                "local_username": "${hive-env/hive_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/hive.service.keytab",
-                "owner": {
-                  "name": "${hive-env/hive_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": "r"
-                },
-                "configuration": "hive-site/hive.server2.authentication.kerberos.keytab"
-              }
-            },
-            {
-              "name": "atlas_kafka",
-              "reference": "/HIVE/HIVE_SERVER/hive_server_hive",
-              "principal": {
-                "configuration": "hive-atlas-application.properties/atlas.jaas.KafkaClient.option.principal"
-              },
-              "keytab": {
-                "configuration": "hive-atlas-application.properties/atlas.jaas.KafkaClient.option.keyTab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "hive-site/hive.server2.authentication.spnego.principal"
-              },
-              "keytab": {
-                "configuration": "hive-site/hive.server2.authentication.spnego.keytab"
-              }
-            },
-            {
-              "name": "ranger_audit",
-              "reference": "/HIVE/HIVE_SERVER/hive_server_hive",
-              "principal": {
-                "configuration": "ranger-hive-audit/xasecure.audit.jaas.Client.option.principal"
-              },
-              "keytab": {
-                "configuration": "ranger-hive-audit/xasecure.audit.jaas.Client.option.keyTab"
-              }
-            }
-          ]
-        },
-        {
-          "name": "WEBHCAT_SERVER",
-          "identities": [
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "webhcat-site/templeton.kerberos.principal"
-              },
-              "keytab": {
-                "configuration": "webhcat-site/templeton.kerberos.keytab"
-              }
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}


[20/52] bigtop git commit: ODPI-193. Add Hive 1.2 to ODPi ambari reference implementation

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.12.0.mysql.sql
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.12.0.mysql.sql b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.12.0.mysql.sql
new file mode 100755
index 0000000..b0415b1
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.12.0.mysql.sql
@@ -0,0 +1,777 @@
+-- MySQL dump 10.13  Distrib 5.5.25, for osx10.6 (i386)
+--
+-- Host: localhost    Database: test
+-- ------------------------------------------------------
+-- Server version	5.5.25
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+
+--
+-- Table structure for table `BUCKETING_COLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `BUCKETING_COLS_N49` (`SD_ID`),
+  CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `CDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `CDS` (
+  `CD_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `COLUMNS_V2`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
+  `CD_ID` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE_NAME` varchar(4000) DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
+  KEY `COLUMNS_V2_N49` (`CD_ID`),
+  CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DATABASE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_ID`,`PARAM_KEY`),
+  KEY `DATABASE_PARAMS_N49` (`DB_ID`),
+  CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DBS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DBS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_ID`),
+  UNIQUE KEY `UNIQUE_DATABASE` (`NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DB_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
+  `DB_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DB_ID` bigint(20) DEFAULT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DB_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_GRANT_ID`),
+  UNIQUE KEY `DBPRIVILEGEINDEX` (`DB_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DB_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `DB_PRIVS_N49` (`DB_ID`),
+  CONSTRAINT `DB_PRIVS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `GLOBAL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` (
+  `USER_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `USER_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`USER_GRANT_ID`),
+  UNIQUE KEY `GLOBALPRIVILEGEINDEX` (`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`USER_PRIV`,`GRANTOR`,`GRANTOR_TYPE`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `IDXS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `IDXS` (
+  `INDEX_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DEFERRED_REBUILD` bit(1) NOT NULL,
+  `INDEX_HANDLER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INDEX_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INDEX_TBL_ID` bigint(20) DEFAULT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `ORIG_TBL_ID` bigint(20) DEFAULT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`INDEX_ID`),
+  UNIQUE KEY `UNIQUEINDEX` (`INDEX_NAME`,`ORIG_TBL_ID`),
+  KEY `IDXS_N51` (`SD_ID`),
+  KEY `IDXS_N50` (`INDEX_TBL_ID`),
+  KEY `IDXS_N49` (`ORIG_TBL_ID`),
+  CONSTRAINT `IDXS_FK1` FOREIGN KEY (`ORIG_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+  CONSTRAINT `IDXS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `IDXS_FK3` FOREIGN KEY (`INDEX_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `INDEX_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `INDEX_PARAMS` (
+  `INDEX_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`),
+  KEY `INDEX_PARAMS_N49` (`INDEX_ID`),
+  CONSTRAINT `INDEX_PARAMS_FK1` FOREIGN KEY (`INDEX_ID`) REFERENCES `IDXS` (`INDEX_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `NUCLEUS_TABLES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `NUCLEUS_TABLES` (
+  `CLASS_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE` varchar(4) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `OWNER` varchar(2) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `VERSION` varchar(20) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTERFACE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`CLASS_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITIONS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITIONS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`PART_ID`),
+  UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
+  KEY `PARTITIONS_N49` (`TBL_ID`),
+  KEY `PARTITIONS_N50` (`SD_ID`),
+  KEY `PARTITIONS_N51` (`LINK_TARGET_ID`),
+  CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+  CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `PARTITIONS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_EVENTS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` (
+  `PART_NAME_ID` bigint(20) NOT NULL,
+  `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `EVENT_TIME` bigint(20) NOT NULL,
+  `EVENT_TYPE` int(11) NOT NULL,
+  `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_NAME_ID`),
+  KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEYS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `PKEY_COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PKEY_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PKEY_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`TBL_ID`,`PKEY_NAME`),
+  KEY `PARTITION_KEYS_N49` (`TBL_ID`),
+  CONSTRAINT `PARTITION_KEYS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEY_VALS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `PART_KEY_VAL` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`PART_ID`,`INTEGER_IDX`),
+  KEY `PARTITION_KEY_VALS_N49` (`PART_ID`),
+  CONSTRAINT `PARTITION_KEY_VALS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_ID`,`PARAM_KEY`),
+  KEY `PARTITION_PARAMS_N49` (`PART_ID`),
+  CONSTRAINT `PARTITION_PARAMS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_COL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` (
+  `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_ID` bigint(20) DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_COLUMN_GRANT_ID`),
+  KEY `PART_COL_PRIVS_N49` (`PART_ID`),
+  KEY `PARTITIONCOLUMNPRIVILEGEINDEX` (`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  CONSTRAINT `PART_COL_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PART_PRIVS` (
+  `PART_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_ID` bigint(20) DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_GRANT_ID`),
+  KEY `PARTPRIVILEGEINDEX` (`PART_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `PART_PRIVS_N49` (`PART_ID`),
+  CONSTRAINT `PART_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `ROLES` (
+  `ROLE_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ROLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`ROLE_ID`),
+  UNIQUE KEY `ROLEENTITYINDEX` (`ROLE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLE_MAP`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `ROLE_MAP` (
+  `ROLE_GRANT_ID` bigint(20) NOT NULL,
+  `ADD_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ROLE_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`ROLE_GRANT_ID`),
+  UNIQUE KEY `USERROLEMAPINDEX` (`PRINCIPAL_NAME`,`ROLE_ID`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `ROLE_MAP_N49` (`ROLE_ID`),
+  CONSTRAINT `ROLE_MAP_FK1` FOREIGN KEY (`ROLE_ID`) REFERENCES `ROLES` (`ROLE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SDS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `CD_ID` bigint(20) DEFAULT NULL,
+  `INPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `IS_COMPRESSED` bit(1) NOT NULL,
+  `IS_STOREDASSUBDIRECTORIES` bit(1) NOT NULL,
+  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `NUM_BUCKETS` int(11) NOT NULL,
+  `OUTPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SERDE_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`),
+  KEY `SDS_N49` (`SERDE_ID`),
+  KEY `SDS_N50` (`CD_ID`),
+  CONSTRAINT `SDS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
+  CONSTRAINT `SDS_FK2` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SD_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SD_PARAMS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`,`PARAM_KEY`),
+  KEY `SD_PARAMS_N49` (`SD_ID`),
+  CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SEQUENCE_TABLE`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` (
+  `SEQUENCE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `NEXT_VAL` bigint(20) NOT NULL,
+  PRIMARY KEY (`SEQUENCE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SERDES` (
+  `SERDE_ID` bigint(20) NOT NULL,
+  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` (
+  `SERDE_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`),
+  KEY `SERDE_PARAMS_N49` (`SERDE_ID`),
+  CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_COL_NAMES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` (
+  `SD_ID` bigint(20) NOT NULL,
+  `SKEWED_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `SKEWED_COL_NAMES_N49` (`SD_ID`),
+  CONSTRAINT `SKEWED_COL_NAMES_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_COL_VALUE_LOC_MAP`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` (
+  `SD_ID` bigint(20) NOT NULL,
+  `STRING_LIST_ID_KID` bigint(20) NOT NULL,
+  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`),
+  KEY `SKEWED_COL_VALUE_LOC_MAP_N49` (`STRING_LIST_ID_KID`),
+  KEY `SKEWED_COL_VALUE_LOC_MAP_N50` (`SD_ID`),
+  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK2` FOREIGN KEY (`STRING_LIST_ID_KID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_STRING_LIST`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` (
+  `STRING_LIST_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`STRING_LIST_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_STRING_LIST_VALUES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` (
+  `STRING_LIST_ID` bigint(20) NOT NULL,
+  `STRING_LIST_VALUE` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`),
+  KEY `SKEWED_STRING_LIST_VALUES_N49` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_STRING_LIST_VALUES_FK1` FOREIGN KEY (`STRING_LIST_ID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_VALUES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` (
+  `SD_ID_OID` bigint(20) NOT NULL,
+  `STRING_LIST_ID_EID` bigint(20) NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`),
+  KEY `SKEWED_VALUES_N50` (`SD_ID_OID`),
+  KEY `SKEWED_VALUES_N49` (`STRING_LIST_ID_EID`),
+  CONSTRAINT `SKEWED_VALUES_FK2` FOREIGN KEY (`STRING_LIST_ID_EID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_VALUES_FK1` FOREIGN KEY (`SD_ID_OID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SORT_COLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SORT_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ORDER` int(11) NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `SORT_COLS_N49` (`SD_ID`),
+  CONSTRAINT `SORT_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TABLE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`TBL_ID`,`PARAM_KEY`),
+  KEY `TABLE_PARAMS_N49` (`TBL_ID`),
+  CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBLS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DB_ID` bigint(20) DEFAULT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `RETENTION` int(11) NOT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `VIEW_EXPANDED_TEXT` mediumtext,
+  `VIEW_ORIGINAL_TEXT` mediumtext,
+  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_ID`),
+  UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
+  KEY `TBLS_N50` (`SD_ID`),
+  KEY `TBLS_N49` (`DB_ID`),
+  KEY `TBLS_N51` (`LINK_TARGET_ID`),
+  CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`),
+  CONSTRAINT `TBLS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_COL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` (
+  `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_COLUMN_GRANT_ID`),
+  KEY `TABLECOLUMNPRIVILEGEINDEX` (`TBL_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `TBL_COL_PRIVS_N49` (`TBL_ID`),
+  CONSTRAINT `TBL_COL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBL_PRIVS` (
+  `TBL_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_GRANT_ID`),
+  KEY `TBL_PRIVS_N49` (`TBL_ID`),
+  KEY `TABLEPRIVILEGEINDEX` (`TBL_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  CONSTRAINT `TBL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TAB_COL_STATS`
+--
+CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
+ `CS_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TBL_ID` bigint(20) NOT NULL,
+ `LONG_LOW_VALUE` bigint(20),
+ `LONG_HIGH_VALUE` bigint(20),
+ `DOUBLE_HIGH_VALUE` double(53,4),
+ `DOUBLE_LOW_VALUE` double(53,4),
+ `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `NUM_NULLS` bigint(20) NOT NULL,
+ `NUM_DISTINCTS` bigint(20),
+ `AVG_COL_LEN` double(53,4),
+ `MAX_COL_LEN` bigint(20),
+ `NUM_TRUES` bigint(20),
+ `NUM_FALSES` bigint(20),
+ `LAST_ANALYZED` bigint(20) NOT NULL,
+  PRIMARY KEY (`CS_ID`),
+  CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table `PART_COL_STATS`
+--
+CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
+ `CS_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PART_ID` bigint(20) NOT NULL,
+ `LONG_LOW_VALUE` bigint(20),
+ `LONG_HIGH_VALUE` bigint(20),
+ `DOUBLE_HIGH_VALUE` double(53,4),
+ `DOUBLE_LOW_VALUE` double(53,4),
+ `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `NUM_NULLS` bigint(20) NOT NULL,
+ `NUM_DISTINCTS` bigint(20),
+ `AVG_COL_LEN` double(53,4),
+ `MAX_COL_LEN` bigint(20),
+ `NUM_TRUES` bigint(20),
+ `NUM_FALSES` bigint(20),
+ `LAST_ANALYZED` bigint(20) NOT NULL,
+  PRIMARY KEY (`CS_ID`),
+  CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table `TYPES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TYPES` (
+  `TYPES_ID` bigint(20) NOT NULL,
+  `TYPE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TYPE1` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TYPE2` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`TYPES_ID`),
+  UNIQUE KEY `UNIQUE_TYPE` (`TYPE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TYPE_FIELDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TYPE_FIELDS` (
+  `TYPE_NAME` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `FIELD_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `FIELD_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`TYPE_NAME`,`FIELD_NAME`),
+  KEY `TYPE_FIELDS_N49` (`TYPE_NAME`),
+  CONSTRAINT `TYPE_FIELDS_FK1` FOREIGN KEY (`TYPE_NAME`) REFERENCES `TYPES` (`TYPES_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+-- Table `MASTER_KEYS` for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
+CREATE TABLE IF NOT EXISTS `MASTER_KEYS` 
+(
+    `KEY_ID` INTEGER NOT NULL AUTO_INCREMENT,
+    `MASTER_KEY` VARCHAR(767) BINARY NULL,
+    PRIMARY KEY (`KEY_ID`)
+) ENGINE=INNODB DEFAULT CHARSET=latin1;
+
+-- Table `DELEGATION_TOKENS` for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
+CREATE TABLE IF NOT EXISTS `DELEGATION_TOKENS`
+(
+    `TOKEN_IDENT` VARCHAR(767) BINARY NOT NULL,
+    `TOKEN` VARCHAR(767) BINARY NULL,
+    PRIMARY KEY (`TOKEN_IDENT`)
+) ENGINE=INNODB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for VERSION
+--
+CREATE TABLE IF NOT EXISTS `VERSION` (
+  `VER_ID` BIGINT NOT NULL,
+  `SCHEMA_VERSION` VARCHAR(127) NOT NULL,
+  `VERSION_COMMENT` VARCHAR(255),
+  PRIMARY KEY (`VER_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.12.0', 'Hive release version 0.12.0');
+
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2012-08-23  0:56:31

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.12.0.oracle.sql
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.12.0.oracle.sql b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.12.0.oracle.sql
new file mode 100755
index 0000000..812b897
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.12.0.oracle.sql
@@ -0,0 +1,718 @@
+-- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE SEQUENCE_TABLE
+(
+   SEQUENCE_NAME VARCHAR2(255) NOT NULL,
+   NEXT_VAL NUMBER NOT NULL
+);
+
+ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY (SEQUENCE_NAME);
+
+-- Table NUCLEUS_TABLES is an internal table required by DataNucleus.
+-- This table is required if datanucleus.autoStartMechanism=SchemaTable
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE NUCLEUS_TABLES
+(
+   CLASS_NAME VARCHAR2(128) NOT NULL,
+   TABLE_NAME VARCHAR2(128) NOT NULL,
+   TYPE VARCHAR2(4) NOT NULL,
+   OWNER VARCHAR2(2) NOT NULL,
+   VERSION VARCHAR2(20) NOT NULL,
+   INTERFACE_NAME VARCHAR2(255) NULL
+);
+
+ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY (CLASS_NAME);
+
+-- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+CREATE TABLE PART_COL_PRIVS
+(
+    PART_COLUMN_GRANT_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PART_ID NUMBER NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    PART_COL_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
+
+-- Table CDS.
+CREATE TABLE CDS
+(
+    CD_ID NUMBER NOT NULL
+);
+
+ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
+
+-- Table COLUMNS_V2 for join relationship
+CREATE TABLE COLUMNS_V2
+(
+    CD_ID NUMBER NOT NULL,
+    "COMMENT" VARCHAR2(256) NULL,
+    "COLUMN_NAME" VARCHAR2(128) NOT NULL,
+    TYPE_NAME VARCHAR2(4000) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
+
+-- Table PARTITION_KEY_VALS for join relationship
+CREATE TABLE PARTITION_KEY_VALS
+(
+    PART_ID NUMBER NOT NULL,
+    PART_KEY_VAL VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
+
+-- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE TABLE DBS
+(
+    DB_ID NUMBER NOT NULL,
+    "DESC" VARCHAR2(4000) NULL,
+    DB_LOCATION_URI VARCHAR2(4000) NOT NULL,
+    "NAME" VARCHAR2(128) NULL
+);
+
+ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
+
+-- Table PARTITION_PARAMS for join relationship
+CREATE TABLE PARTITION_PARAMS
+(
+    PART_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
+
+-- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+CREATE TABLE SERDES
+(
+    SERDE_ID NUMBER NOT NULL,
+    "NAME" VARCHAR2(128) NULL,
+    SLIB VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
+
+-- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
+CREATE TABLE TYPES
+(
+    TYPES_ID NUMBER NOT NULL,
+    TYPE_NAME VARCHAR2(128) NULL,
+    TYPE1 VARCHAR2(767) NULL,
+    TYPE2 VARCHAR2(767) NULL
+);
+
+ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
+
+-- Table PARTITION_KEYS for join relationship
+CREATE TABLE PARTITION_KEYS
+(
+    TBL_ID NUMBER NOT NULL,
+    PKEY_COMMENT VARCHAR2(4000) NULL,
+    PKEY_NAME VARCHAR2(128) NOT NULL,
+    PKEY_TYPE VARCHAR2(767) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
+
+-- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE TABLE ROLES
+(
+    ROLE_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    OWNER_NAME VARCHAR2(128) NULL,
+    ROLE_NAME VARCHAR2(128) NULL
+);
+
+ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
+
+-- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
+CREATE TABLE PARTITIONS
+(
+    PART_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    PART_NAME VARCHAR2(767) NULL,
+    SD_ID NUMBER NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
+
+-- Table INDEX_PARAMS for join relationship
+CREATE TABLE INDEX_PARAMS
+(
+    INDEX_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
+
+-- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+CREATE TABLE TBL_COL_PRIVS
+(
+    TBL_COLUMN_GRANT_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    TBL_COL_PRIV VARCHAR2(128) NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
+
+-- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
+CREATE TABLE IDXS
+(
+    INDEX_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DEFERRED_REBUILD NUMBER(1) NOT NULL CHECK (DEFERRED_REBUILD IN (1,0)),
+    INDEX_HANDLER_CLASS VARCHAR2(4000) NULL,
+    INDEX_NAME VARCHAR2(128) NULL,
+    INDEX_TBL_ID NUMBER NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    ORIG_TBL_ID NUMBER NULL,
+    SD_ID NUMBER NULL
+);
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
+
+-- Table BUCKETING_COLS for join relationship
+CREATE TABLE BUCKETING_COLS
+(
+    SD_ID NUMBER NOT NULL,
+    BUCKET_COL_NAME VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table TYPE_FIELDS for join relationship
+CREATE TABLE TYPE_FIELDS
+(
+    TYPE_NAME NUMBER NOT NULL,
+    "COMMENT" VARCHAR2(256) NULL,
+    FIELD_NAME VARCHAR2(128) NOT NULL,
+    FIELD_TYPE VARCHAR2(767) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
+
+-- Table SD_PARAMS for join relationship
+CREATE TABLE SD_PARAMS
+(
+    SD_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
+
+-- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE TABLE GLOBAL_PRIVS
+(
+    USER_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    USER_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
+
+-- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+CREATE TABLE SDS
+(
+    SD_ID NUMBER NOT NULL,
+    CD_ID NUMBER NULL,
+    INPUT_FORMAT VARCHAR2(4000) NULL,
+    IS_COMPRESSED NUMBER(1) NOT NULL CHECK (IS_COMPRESSED IN (1,0)),
+    LOCATION VARCHAR2(4000) NULL,
+    NUM_BUCKETS NUMBER (10) NOT NULL,
+    OUTPUT_FORMAT VARCHAR2(4000) NULL,
+    SERDE_ID NUMBER NULL,
+    IS_STOREDASSUBDIRECTORIES NUMBER(1) NOT NULL CHECK (IS_STOREDASSUBDIRECTORIES IN (1,0))
+);
+
+ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
+
+-- Table TABLE_PARAMS for join relationship
+CREATE TABLE TABLE_PARAMS
+(
+    TBL_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
+
+-- Table SORT_COLS for join relationship
+CREATE TABLE SORT_COLS
+(
+    SD_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    "ORDER" NUMBER (10) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+CREATE TABLE TBL_PRIVS
+(
+    TBL_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    TBL_PRIV VARCHAR2(128) NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
+
+-- Table DATABASE_PARAMS for join relationship
+CREATE TABLE DATABASE_PARAMS
+(
+    DB_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(180) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
+
+-- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
+CREATE TABLE ROLE_MAP
+(
+    ROLE_GRANT_ID NUMBER NOT NULL,
+    ADD_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    ROLE_ID NUMBER NULL
+);
+
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
+
+-- Table SERDE_PARAMS for join relationship
+CREATE TABLE SERDE_PARAMS
+(
+    SERDE_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
+
+-- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+CREATE TABLE PART_PRIVS
+(
+    PART_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PART_ID NUMBER NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    PART_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
+
+-- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+CREATE TABLE DB_PRIVS
+(
+    DB_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DB_ID NUMBER NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    DB_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
+
+-- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
+CREATE TABLE TBLS
+(
+    TBL_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DB_ID NUMBER NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    OWNER VARCHAR2(767) NULL,
+    RETENTION NUMBER (10) NOT NULL,
+    SD_ID NUMBER NULL,
+    TBL_NAME VARCHAR2(128) NULL,
+    TBL_TYPE VARCHAR2(128) NULL,
+    VIEW_EXPANDED_TEXT CLOB NULL,
+    VIEW_ORIGINAL_TEXT CLOB NULL
+);
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
+
+-- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE TABLE PARTITION_EVENTS
+(
+    PART_NAME_ID NUMBER NOT NULL,
+    DB_NAME VARCHAR2(128) NULL,
+    EVENT_TIME NUMBER NOT NULL,
+    EVENT_TYPE NUMBER (10) NOT NULL,
+    PARTITION_NAME VARCHAR2(767) NULL,
+    TBL_NAME VARCHAR2(128) NULL
+);
+
+ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
+
+-- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
+CREATE TABLE SKEWED_STRING_LIST
+(
+    STRING_LIST_ID NUMBER NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
+
+CREATE TABLE SKEWED_STRING_LIST_VALUES
+(
+    STRING_LIST_ID NUMBER NOT NULL,
+    "STRING_LIST_VALUE" VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_COL_NAMES
+(
+    SD_ID NUMBER NOT NULL,
+    "SKEWED_COL_NAME" VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
+(
+    SD_ID NUMBER NOT NULL,
+    STRING_LIST_ID_KID NUMBER NOT NULL,
+    "LOCATION" VARCHAR2(4000) NULL
+);
+
+CREATE TABLE MASTER_KEYS
+(
+    KEY_ID NUMBER (10) NOT NULL,
+    MASTER_KEY VARCHAR2(767) NULL
+);
+
+CREATE TABLE DELEGATION_TOKENS
+(
+    TOKEN_IDENT VARCHAR2(767) NOT NULL,
+    TOKEN VARCHAR2(767) NULL
+);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_VALUES
+(
+    SD_ID_OID NUMBER NOT NULL,
+    STRING_LIST_ID_EID NUMBER NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+-- column statistics
+
+CREATE TABLE TAB_COL_STATS (
+ CS_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(128) NOT NULL, 
+ COLUMN_NAME VARCHAR2(128) NOT NULL,
+ COLUMN_TYPE VARCHAR2(128) NOT NULL,
+ TBL_ID NUMBER NOT NULL,
+ LONG_LOW_VALUE NUMBER,
+ LONG_HIGH_VALUE NUMBER,
+ DOUBLE_LOW_VALUE NUMBER,
+ DOUBLE_HIGH_VALUE NUMBER,
+ BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+ BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+ NUM_NULLS NUMBER NOT NULL,
+ NUM_DISTINCTS NUMBER,
+ AVG_COL_LEN NUMBER,
+ MAX_COL_LEN NUMBER,
+ NUM_TRUES NUMBER,
+ NUM_FALSES NUMBER,
+ LAST_ANALYZED NUMBER NOT NULL
+);
+
+CREATE TABLE VERSION (
+  VER_ID NUMBER NOT NULL,
+  SCHEMA_VERSION VARCHAR(127) NOT NULL,
+  VERSION_COMMENT VARCHAR(255)
+);
+ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID);
+
+CREATE TABLE PART_COL_STATS (
+ CS_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(128) NOT NULL,
+ PARTITION_NAME VARCHAR2(767) NOT NULL,
+ COLUMN_NAME VARCHAR2(128) NOT NULL,
+ COLUMN_TYPE VARCHAR2(128) NOT NULL,
+ PART_ID NUMBER NOT NULL,
+ LONG_LOW_VALUE NUMBER,
+ LONG_HIGH_VALUE NUMBER,
+ DOUBLE_LOW_VALUE NUMBER,
+ DOUBLE_HIGH_VALUE NUMBER,
+ BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+ BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+ NUM_NULLS NUMBER NOT NULL,
+ NUM_DISTINCTS NUMBER,
+ AVG_COL_LEN NUMBER,
+ MAX_COL_LEN NUMBER,
+ NUM_TRUES NUMBER,
+ NUM_FALSES NUMBER,
+ LAST_ANALYZED NUMBER NOT NULL
+);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED;
+
+CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
+
+-- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
+
+CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table COLUMNS_V2
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
+
+
+-- Constraints for table PARTITION_KEY_VALS
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
+
+
+-- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME");
+
+
+-- Constraints for table PARTITION_PARAMS
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
+
+
+-- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+
+-- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
+CREATE UNIQUE INDEX UNIQUE_TYPE ON TYPES (TYPE_NAME);
+
+
+-- Constraints for table PARTITION_KEYS
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
+
+
+-- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
+
+
+-- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
+
+CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
+
+CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
+
+
+-- Constraints for table INDEX_PARAMS
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
+
+
+-- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
+
+
+-- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
+
+CREATE INDEX IDXS_N50 ON IDXS (INDEX_TBL_ID);
+
+CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
+
+CREATE INDEX IDXS_N49 ON IDXS (ORIG_TBL_ID);
+
+
+-- Constraints for table BUCKETING_COLS
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
+
+
+-- Constraints for table TYPE_FIELDS
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
+
+
+-- Constraints for table SD_PARAMS
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
+
+
+-- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
+CREATE INDEX SDS_N50 ON SDS (CD_ID);
+
+
+-- Constraints for table TABLE_PARAMS
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
+
+
+-- Constraints for table SORT_COLS
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
+
+
+-- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
+
+CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table DATABASE_PARAMS
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
+
+
+-- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
+
+CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table SERDE_PARAMS
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
+
+
+-- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
+
+
+-- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
+
+
+-- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
+
+CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
+
+CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
+
+
+-- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
+
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.12.0', 'Hive release version 0.12.0');
+


[03/52] bigtop git commit: Working around ODPI-186

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/yarn-site.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/yarn-site.xml
new file mode 100755
index 0000000..b20114c
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/configuration/yarn-site.xml
@@ -0,0 +1,424 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<!-- Put site-specific property overrides in this file. -->
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
+  <property>
+    <name>yarn.timeline-service.leveldb-timeline-store.path</name>
+    <value>/tmp/dummy</value>
+    <description>This is a temporary workaround for ODPI-186</description>
+  </property>
+
+  <!-- ResourceManager -->
+  <property>
+    <name>yarn.resourcemanager.hostname</name>
+    <value>localhost</value>
+    <description>The hostname of the RM.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.resource-tracker.address</name>
+    <value>localhost:8025</value>
+    <description> The address of ResourceManager. </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.scheduler.address</name>
+    <value>localhost:8030</value>
+    <description>The address of the scheduler interface.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.address</name>
+    <value>localhost:8050</value>
+    <description>
+      The address of the applications manager interface in the
+      RM.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.admin.address</name>
+    <value>localhost:8141</value>
+    <description>The address of the RM admin interface.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.scheduler.class</name>
+    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
+    <description>The class to use as the resource scheduler.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.minimum-allocation-mb</name>
+    <value>512</value>
+    <description>
+      The minimum allocation for every container request at the RM,
+      in MBs. Memory requests lower than this won't take effect,
+      and the specified value will get allocated at minimum.
+    </description>
+    <display-name>Minimum Container Size (Memory)</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>5120</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.nodemanager.resource.memory-mb</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.maximum-allocation-mb</name>
+    <value>5120</value>
+    <description>
+      The maximum allocation for every container request at the RM,
+      in MBs. Memory requests higher than this won't take effect,
+      and will get capped to this value.
+    </description>
+    <display-name>Maximum Container Size (Memory)</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>5120</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.nodemanager.resource.memory-mb</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.acl.enable</name>
+    <value>false</value>
+    <description> Are acls enabled. </description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.admin.acl</name>
+    <value/>
+    <description> ACL of who can be admin of the YARN cluster. </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- NodeManager -->
+  <property>
+    <name>yarn.nodemanager.address</name>
+    <value>0.0.0.0:45454</value>
+    <description>The address of the container manager in the NM.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.resource.memory-mb</name>
+    <value>5120</value>
+    <description>Amount of physical memory, in MB, that can be allocated
+      for containers.</description>
+    <display-name>Memory allocated for all YARN containers on a node</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>268435456</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.application.classpath</name>
+    <value>/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*</value>
+    <description>Classpath for typical applications.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.vmem-pmem-ratio</name>
+    <value>2.1</value>
+    <description>Ratio between virtual memory to physical memory when
+      setting memory limits for containers. Container allocations are
+      expressed in terms of physical memory, and virtual memory usage
+      is allowed to exceed this allocation by this ratio.
+    </description>
+    <display-name>Virtual Memory Ratio</display-name>
+    <value-attributes>
+      <type>float</type>
+      <minimum>0.1</minimum>
+      <maximum>5.0</maximum>
+      <increment-step>0.1</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.container-executor.class</name>
+    <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
+    <description>ContainerExecutor for launching containers</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.group</name>
+    <value>hadoop</value>
+    <description>Unix group of the NodeManager</description>
+    <depends-on>
+      <property>
+        <type>cluster-env</type>
+        <name>user_group</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.aux-services</name>
+    <value>mapreduce_shuffle</value>
+    <description>Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and can
+      not start with numbers</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
+    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
+    <description>The auxiliary service class to use </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.log-dirs</name>
+    <value>/hadoop/yarn/log</value>
+    <description>
+      Where to store container logs. An application's localized log directory
+      will be found in ${yarn.nodemanager.log-dirs}/application_${appid}.
+      Individual containers' log directories will be below this, in directories
+      named container_{$contid}. Each container directory will contain the files
+      stderr, stdin, and syslog generated by that container.
+    </description>
+    <value-attributes>
+      <type>directories</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.local-dirs</name>
+    <value>/hadoop/yarn/local</value>
+    <description>
+      List of directories to store localized files in. An
+      application's localized file directory will be found in:
+      ${yarn.nodemanager.local-dirs}/usercache/${user}/appcache/application_${appid}.
+      Individual containers' work directories, called container_${contid}, will
+      be subdirectories of this.
+    </description>
+    <value-attributes>
+      <type>directories</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.container-monitor.interval-ms</name>
+    <value>3000</value>
+    <description>
+      The interval, in milliseconds, for which the node manager
+      waits  between two cycles of monitoring its containers' memory usage.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!--
+  <property>
+    <name>yarn.nodemanager.health-checker.script.path</name>
+    <value>/etc/hadoop/conf/health_check_nodemanager</value>
+    <description>The health check script to run.</description>
+  </property>
+   -->
+  <property>
+    <name>yarn.nodemanager.health-checker.interval-ms</name>
+    <value>135000</value>
+    <description>Frequency of running node health script.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.health-checker.script.timeout-ms</name>
+    <value>60000</value>
+    <description>Script time out period.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.log.retain-second</name>
+    <value>604800</value>
+    <description>
+      Time in seconds to retain user logs. Only applicable if
+      log aggregation is disabled.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.log-aggregation-enable</name>
+    <value>true</value>
+    <description>Whether to enable log aggregation. </description>
+    <display-name>Enable Log Aggregation</display-name>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.remote-app-log-dir</name>
+    <value>/app-logs</value>
+    <description>Location to aggregate logs to. </description>
+    <property-type>NOT_MANAGED_HDFS_PATH</property-type>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.remote-app-log-dir-suffix</name>
+    <value>logs</value>
+    <description>
+      The remote log dir will be created at
+      {yarn.nodemanager.remote-app-log-dir}/${user}/{thisParam}.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.log-aggregation.compression-type</name>
+    <value>gz</value>
+    <description>
+      T-file compression types used to compress aggregated logs.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.delete.debug-delay-sec</name>
+    <value>0</value>
+    <description>
+      Number of seconds after an application finishes before the nodemanager's
+      DeletionService will delete the application's localized file directory
+      and log directory.
+
+      To diagnose Yarn application problems, set this property's value large
+      enough (for example, to 600 = 10 minutes) to permit examination of these
+      directories. After changing the property's value, you must restart the
+      nodemanager in order for it to have an effect.
+
+      The roots of Yarn applications' work directories is configurable with
+      the yarn.nodemanager.local-dirs property (see below), and the roots
+      of the Yarn applications' log directories is configurable with the
+      yarn.nodemanager.log-dirs property (see also below).
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.log-aggregation.retain-seconds</name>
+    <value>2592000</value>
+    <description>
+      How long to keep aggregation logs before deleting them. -1 disables.
+      Be careful set this too small and you will spam the name node.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.admin-env</name>
+    <value>MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX</value>
+    <description>
+      Environment variables that should be forwarded from the NodeManager's
+      environment to the container's.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.disk-health-checker.min-healthy-disks</name>
+    <value>0.25</value>
+    <description>
+      The minimum fraction of number of disks to be healthy for the nodemanager
+      to launch new containers. This correspond to both
+      yarn-nodemanager.local-dirs and yarn.nodemanager.log-dirs. i.e.
+      If there are less number of healthy local-dirs (or log-dirs) available,
+      then new containers will not be launched on this node.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.am.max-attempts</name>
+    <value>2</value>
+    <description>
+      The maximum number of application attempts. It's a global
+      setting for all application masters. Each application master can specify
+      its individual maximum number of application attempts via the API, but the
+      individual number cannot be more than the global upper bound. If it is,
+      the resourcemanager will override it. The default number is set to 2, to
+      allow at least one retry for AM.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.webapp.address</name>
+    <value>localhost:8088</value>
+    <description>
+      The address of the RM web application.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.webapp.https.address</name>
+    <value>localhost:8090</value>
+    <description>
+      The https address of the RM web application.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.vmem-check-enabled</name>
+    <value>false</value>
+    <description>
+      Whether virtual memory limits will be enforced for containers.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.log.server.url</name>
+    <value>http://localhost:19888/jobhistory/logs</value>
+    <description>
+      URI for the HistoryServer's log resource
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.nodes.exclude-path</name>
+    <value>/etc/hadoop/conf/yarn.exclude</value>
+    <description>
+      Names a file that contains a list of hosts that are
+      not permitted to connect to the resource manager.  The full pathname of the
+      file must be specified.  If the value is empty, no hosts are
+      excluded.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.http.policy</name>
+    <value>HTTP_ONLY</value>
+    <description>
+      This configures the HTTP endpoint for Yarn Daemons.The following values are supported: - HTTP_ONLY : Service is provided only on http - HTTPS_ONLY : Service is provided only on https
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/kerberos.json b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/kerberos.json
new file mode 100755
index 0000000..4093431
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/kerberos.json
@@ -0,0 +1,214 @@
+{
+  "services": [
+    {
+      "name": "YARN",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "yarn-site": {
+            "yarn.timeline-service.enabled": "false",
+            "yarn.timeline-service.http-authentication.type": "kerberos",
+            "yarn.acl.enable": "true",
+            "yarn.timeline-service.http-authentication.signature.secret": "",
+            "yarn.timeline-service.http-authentication.signature.secret.file": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
+            "yarn.timeline-service.http-authentication.token.validity": "",
+            "yarn.timeline-service.http-authentication.cookie.domain": "",
+            "yarn.timeline-service.http-authentication.cookie.path": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
+            "yarn.resourcemanager.proxyusers.*.groups": "",
+            "yarn.resourcemanager.proxyusers.*.hosts": "",
+            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
+            "yarn.nodemanager.linux-container-executor.cgroups.mount-path": ""
+          }
+        },
+        {
+          "core-site": {
+            "hadoop.proxyuser.${yarn-env/yarn_user}.groups": "*",
+            "hadoop.proxyuser.${yarn-env/yarn_user}.hosts": "${clusterHostInfo/rm_host}"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "NODEMANAGER",
+          "identities": [
+            {
+              "name": "nodemanager_nm",
+              "principal": {
+                "value": "nm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.nodemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.nodemanager.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "yarn-site": {
+                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor"
+              }
+            }
+          ]
+        },
+        {
+          "name": "RESOURCEMANAGER",
+          "identities": [
+            {
+              "name": "resource_manager_rm",
+              "principal": {
+                "value": "rm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.resourcemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/rm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.resourcemanager.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file"
+              }
+            }
+          ]
+        },
+        {
+          "name": "APP_TIMELINE_SERVER",
+          "identities": [
+            {
+              "name": "app_timeline_server_yarn",
+              "principal": {
+                "value": "yarn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.timeline-service.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/yarn.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.timeline-service.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
+              }
+            },
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "name": "MAPREDUCE2",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "components": [
+        {
+          "name": "HISTORYSERVER",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "history_server_jhs",
+              "principal": {
+                "value": "jhs/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "mapred-site/mapreduce.jobhistory.principal",
+                "local_username": "${mapred-env/mapred_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/jhs.service.keytab",
+                "owner": {
+                  "name": "${mapred-env/mapred_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "mapred-site/mapreduce.jobhistory.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/metainfo.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/metainfo.xml
index 6458e29..18117f3 100755
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/metainfo.xml
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/metainfo.xml
@@ -21,13 +21,283 @@
   <services>
     <service>
       <name>YARN</name>
+      <displayName>YARN</displayName>
+      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
       <version>2.7.1+odpi</version>
-      <extends>common-services/YARN/2.1.0.2.0</extends>
+      <components>
+
+        <component>
+          <name>RESOURCEMANAGER</name>
+          <displayName>ResourceManager</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <reassignAllowed>true</reassignAllowed>
+          <commandScript>
+            <script>scripts/resourcemanager.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>yarn_resourcemanager</logId>
+              <primary>true</primary>
+            </log>
+            <log>
+              <logId>yarn_historyserver</logId>
+            </log>
+            <log>
+              <logId>yarn_jobsummary</logId>
+            </log>
+          </logs>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/resourcemanager.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+            <customCommand>
+              <name>REFRESHQUEUES</name>
+              <commandScript>
+                <script>scripts/resourcemanager.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+          <configuration-dependencies>
+            <config-type>capacity-scheduler</config-type>
+            <config-type>hdfs-site</config-type>
+          </configuration-dependencies>
+        </component>
+
+        <component>
+          <name>NODEMANAGER</name>
+          <displayName>NodeManager</displayName>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <decommissionAllowed>true</decommissionAllowed>
+          <commandScript>
+            <script>scripts/nodemanager.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+           <bulkCommands>
+             <displayName>NodeManagers</displayName>
+             <!-- Used by decommission and recommission -->
+             <masterComponent>RESOURCEMANAGER</masterComponent>
+           </bulkCommands>
+          <logs>
+            <log>
+              <logId>yarn_nodemanager</logId>
+            </log>
+          </logs>
+        </component>
+
+        <component>
+          <name>YARN_CLIENT</name>
+          <displayName>YARN Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/yarn_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>yarn-site.xml</fileName>
+              <dictionaryName>yarn-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>core-site.xml</fileName>
+              <dictionaryName>core-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>yarn-env.sh</fileName>
+              <dictionaryName>yarn-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>log4j.properties</fileName>
+              <dictionaryName>hdfs-log4j,yarn-log4j</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>capacity-scheduler.xml</fileName>
+              <dictionaryName>capacity-scheduler</dictionaryName>
+            </configFile>                        
+          </configFiles>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hadoop-yarn</name>
+            </package>
+            <package>
+              <name>hadoop-hdfs</name>
+            </package>
+            <package>
+              <name>hadoop-mapreduce</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+      
+      <requiredServices>
+        <service>HDFS</service>
+        <service>MAPREDUCE2</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>yarn-site</config-type>
+        <config-type>yarn-env</config-type>
+        <config-type>hdfs-site</config-type>
+        <config-type>hadoop-env</config-type>
+        <config-type>core-site</config-type>
+        <config-type>mapred-site</config-type>
+        <config-type>yarn-log4j</config-type>
+        <config-type>ams-ssl-client</config-type>
+        <config-type>ranger-yarn-plugin-properties</config-type>
+        <config-type>ranger-yarn-audit</config-type>
+        <config-type>ranger-yarn-policymgr-ssl</config-type>
+        <config-type>ranger-yarn-security</config-type>
+      </configuration-dependencies>
+      <widgetsFileName>YARN_widgets.json</widgetsFileName>
+      <metricsFileName>YARN_metrics.json</metricsFileName>
     </service>
+
     <service>
       <name>MAPREDUCE2</name>
+      <displayName>MapReduce2</displayName>
+      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
       <version>2.7.1+odpi</version>
-      <extends>common-services/MAPREDUCE2/2.1.0.2.0.6.0</extends>
+      <components>
+        <component>
+          <name>HISTORYSERVER</name>
+          <displayName>History Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <reassignAllowed>true</reassignAllowed>
+          <auto-deploy>
+            <enabled>true</enabled>
+            <co-locate>YARN/RESOURCEMANAGER</co-locate>
+          </auto-deploy>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>TEZ/TEZ_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/historyserver.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>mapred_historyserver</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+        </component>
+
+        <component>
+          <name>MAPREDUCE2_CLIENT</name>
+          <displayName>MapReduce2 Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/mapreduce2_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>mapred-site.xml</fileName>
+              <dictionaryName>mapred-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>core-site.xml</fileName>
+              <dictionaryName>core-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>mapred-env.sh</fileName>
+              <dictionaryName>mapred-env</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hadoop-mapreduce</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/mapred_service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+      
+      <requiredServices>
+        <service>YARN</service>
+      </requiredServices>
+
+      <configuration-dir>configuration-mapred</configuration-dir>
+
+      <configuration-dependencies>
+        <config-type>hdfs-site</config-type>
+        <config-type>hadoop-env</config-type>
+        <config-type>core-site</config-type>
+        <config-type>mapred-site</config-type>
+        <config-type>mapred-env</config-type>
+        <config-type>ssl-client</config-type>
+        <config-type>ssl-server</config-type>
+        <config-type>ams-ssl-client</config-type>
+      </configuration-dependencies>
+      <restartRequiredAfterRackChange>true</restartRequiredAfterRackChange>
+      <widgetsFileName>MAPREDUCE2_widgets.json</widgetsFileName>
+      <metricsFileName>MAPREDUCE2_metrics.json</metricsFileName>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/.hash
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/.hash b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/.hash
new file mode 100755
index 0000000..c26dcc7
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/.hash
@@ -0,0 +1 @@
+51572fff0a03b67b13f41bbe7c55c4c2b682d089
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/alerts/alert_nodemanager_health.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/alerts/alert_nodemanager_health.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/alerts/alert_nodemanager_health.py
new file mode 100755
index 0000000..d7159e4
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/alerts/alert_nodemanager_health.py
@@ -0,0 +1,209 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+import socket
+import urllib2
+import logging
+import traceback
+from ambari_commons import OSCheck
+from ambari_commons.inet_utils import resolve_address
+from resource_management.libraries.functions.curl_krb_request import curl_krb_request
+from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
+from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
+from resource_management.core.environment import Environment
+
+RESULT_CODE_OK = 'OK'
+RESULT_CODE_CRITICAL = 'CRITICAL'
+RESULT_CODE_UNKNOWN = 'UNKNOWN'
+
+NODEMANAGER_HTTP_ADDRESS_KEY = '{{yarn-site/yarn.nodemanager.webapp.address}}'
+NODEMANAGER_HTTPS_ADDRESS_KEY = '{{yarn-site/yarn.nodemanager.webapp.https.address}}'
+YARN_HTTP_POLICY_KEY = '{{yarn-site/yarn.http.policy}}'
+
+OK_MESSAGE = 'NodeManager Healthy'
+CRITICAL_CONNECTION_MESSAGE = 'Connection failed to {0} ({1})'
+CRITICAL_HTTP_STATUS_MESSAGE = 'HTTP {0} returned from {1} ({2}) \n{3}'
+CRITICAL_NODEMANAGER_STATUS_MESSAGE = 'NodeManager returned an unexpected status of "{0}"'
+CRITICAL_NODEMANAGER_UNKNOWN_JSON_MESSAGE = 'Unable to determine NodeManager health from unexpected JSON response'
+
+KERBEROS_KEYTAB = '{{yarn-site/yarn.nodemanager.webapp.spnego-keytab-file}}'
+KERBEROS_PRINCIPAL = '{{yarn-site/yarn.nodemanager.webapp.spnego-principal}}'
+SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
+SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
+EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
+
+NODEMANAGER_DEFAULT_PORT = 8042
+
+CONNECTION_TIMEOUT_KEY = 'connection.timeout'
+CONNECTION_TIMEOUT_DEFAULT = 5.0
+
+LOGGER_EXCEPTION_MESSAGE = "[Alert] NodeManager Health on {0} fails:"
+logger = logging.getLogger('ambari_alerts')
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (NODEMANAGER_HTTP_ADDRESS_KEY,NODEMANAGER_HTTPS_ADDRESS_KEY, EXECUTABLE_SEARCH_PATHS,
+  YARN_HTTP_POLICY_KEY, SMOKEUSER_KEY, KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY)
+  
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+  result_code = RESULT_CODE_UNKNOWN
+
+  if configurations is None:
+    return (result_code, ['There were no configurations supplied to the script.'])
+
+  if host_name is None:
+    host_name = socket.getfqdn()
+
+  scheme = 'http'
+  http_uri = None
+  https_uri = None
+  http_policy = 'HTTP_ONLY'
+
+  if SMOKEUSER_KEY in configurations:
+    smokeuser = configurations[SMOKEUSER_KEY]
+
+  executable_paths = None
+  if EXECUTABLE_SEARCH_PATHS in configurations:
+    executable_paths = configurations[EXECUTABLE_SEARCH_PATHS]
+
+  security_enabled = False
+  if SECURITY_ENABLED_KEY in configurations:
+    security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
+
+  kerberos_keytab = None
+  if KERBEROS_KEYTAB in configurations:
+    kerberos_keytab = configurations[KERBEROS_KEYTAB]
+
+  kerberos_principal = None
+  if KERBEROS_PRINCIPAL in configurations:
+    kerberos_principal = configurations[KERBEROS_PRINCIPAL]
+    kerberos_principal = kerberos_principal.replace('_HOST', host_name)
+
+  if NODEMANAGER_HTTP_ADDRESS_KEY in configurations:
+    http_uri = configurations[NODEMANAGER_HTTP_ADDRESS_KEY]
+
+  if NODEMANAGER_HTTPS_ADDRESS_KEY in configurations:
+    https_uri = configurations[NODEMANAGER_HTTPS_ADDRESS_KEY]
+
+  if YARN_HTTP_POLICY_KEY in configurations:
+    http_policy = configurations[YARN_HTTP_POLICY_KEY]
+
+
+  # parse script arguments
+  connection_timeout = CONNECTION_TIMEOUT_DEFAULT
+  if CONNECTION_TIMEOUT_KEY in parameters:
+    connection_timeout = float(parameters[CONNECTION_TIMEOUT_KEY])
+
+
+  # determine the right URI and whether to use SSL
+  host_port = http_uri
+  if http_policy == 'HTTPS_ONLY':
+    scheme = 'https'
+
+    if https_uri is not None:
+      host_port = https_uri
+
+  label = ''
+  url_response = None
+  node_healthy = 'false'
+  total_time = 0
+
+  # replace hostname on host fqdn to make it work on all environments
+  if host_port is not None:
+    if ":" in host_port:
+      uri_host, uri_port = host_port.split(':')
+      host_port = '{0}:{1}'.format(host_name, uri_port)
+    else:
+      host_port = host_name
+
+  # some yarn-site structures don't have the web ui address
+  if host_port is None:
+    host_port = '{0}:{1}'.format(host_name, NODEMANAGER_DEFAULT_PORT)
+
+  query = "{0}://{1}/ws/v1/node/info".format(scheme, host_port)
+
+  try:
+    if kerberos_principal is not None and kerberos_keytab is not None and security_enabled:
+      env = Environment.get_instance()
+
+      # curl requires an integer timeout
+      curl_connection_timeout = int(connection_timeout)
+
+      kinit_timer_ms = parameters.get(KERBEROS_KINIT_TIMER_PARAMETER, DEFAULT_KERBEROS_KINIT_TIMER_MS)
+
+      url_response, error_msg, time_millis  = curl_krb_request(env.tmp_dir, kerberos_keytab, kerberos_principal,
+        query, "nm_health_alert", executable_paths, False, "NodeManager Health", smokeuser,
+        connection_timeout=curl_connection_timeout, kinit_timer_ms = kinit_timer_ms)
+
+      json_response = json.loads(url_response)
+    else:
+      # execute the query for the JSON that includes templeton status
+      url_response = urllib2.urlopen(query, timeout=connection_timeout)
+      json_response = json.loads(url_response.read())
+  except urllib2.HTTPError, httpError:
+    label = CRITICAL_HTTP_STATUS_MESSAGE.format(str(httpError.code), query,
+      str(httpError), traceback.format_exc())
+
+    return (RESULT_CODE_CRITICAL, [label])
+  except:
+    label = CRITICAL_CONNECTION_MESSAGE.format(query, traceback.format_exc())
+    return (RESULT_CODE_CRITICAL, [label])
+
+  # URL response received, parse it
+  try:
+    node_healthy = json_response['nodeInfo']['nodeHealthy']
+    node_healthy_report = json_response['nodeInfo']['healthReport']
+
+    # convert boolean to string
+    node_healthy = str(node_healthy)
+  except:
+    return (RESULT_CODE_CRITICAL, [query + "\n" + traceback.format_exc()])
+  finally:
+    if url_response is not None:
+      try:
+        url_response.close()
+      except:
+        pass
+
+  # proper JSON received, compare against known value
+  if node_healthy.lower() == 'true':
+    result_code = RESULT_CODE_OK
+    label = OK_MESSAGE
+  elif node_healthy.lower() == 'false':
+    result_code = RESULT_CODE_CRITICAL
+    label = node_healthy_report
+  else:
+    result_code = RESULT_CODE_CRITICAL
+    label = CRITICAL_NODEMANAGER_STATUS_MESSAGE.format(node_healthy)
+
+  return (result_code, [label])

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/alerts/alert_nodemanagers_summary.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/alerts/alert_nodemanagers_summary.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/alerts/alert_nodemanagers_summary.py
new file mode 100755
index 0000000..adf27ec
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/alerts/alert_nodemanagers_summary.py
@@ -0,0 +1,219 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import urllib2
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+import logging
+import traceback
+
+from ambari_commons.urllib_handlers import RefreshHeaderProcessor
+from resource_management.libraries.functions.curl_krb_request import curl_krb_request
+from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
+from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
+from resource_management.core.environment import Environment
+
+ERROR_LABEL = '{0} NodeManager{1} {2} unhealthy.'
+OK_LABEL = 'All NodeManagers are healthy'
+
+NODEMANAGER_HTTP_ADDRESS_KEY = '{{yarn-site/yarn.resourcemanager.webapp.address}}'
+NODEMANAGER_HTTPS_ADDRESS_KEY = '{{yarn-site/yarn.resourcemanager.webapp.https.address}}'
+YARN_HTTP_POLICY_KEY = '{{yarn-site/yarn.http.policy}}'
+
+KERBEROS_KEYTAB = '{{yarn-site/yarn.nodemanager.webapp.spnego-keytab-file}}'
+KERBEROS_PRINCIPAL = '{{yarn-site/yarn.nodemanager.webapp.spnego-principal}}'
+SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
+SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
+EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
+
+CONNECTION_TIMEOUT_KEY = 'connection.timeout'
+CONNECTION_TIMEOUT_DEFAULT = 5.0
+
+LOGGER_EXCEPTION_MESSAGE = "[Alert] NodeManager Health Summary on {0} fails:"
+logger = logging.getLogger('ambari_alerts')
+
+QRY = "Hadoop:service=ResourceManager,name=RMNMInfo"
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return NODEMANAGER_HTTP_ADDRESS_KEY, NODEMANAGER_HTTPS_ADDRESS_KEY, EXECUTABLE_SEARCH_PATHS, \
+    YARN_HTTP_POLICY_KEY, SMOKEUSER_KEY, KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  if configurations is None:
+    return (('UNKNOWN', ['There were no configurations supplied to the script.']))
+
+  scheme = 'http'  
+  http_uri = None
+  https_uri = None
+  http_policy = 'HTTP_ONLY'
+
+  security_enabled = False
+  if SECURITY_ENABLED_KEY in configurations:
+    security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
+
+  executable_paths = None
+  if EXECUTABLE_SEARCH_PATHS in configurations:
+    executable_paths = configurations[EXECUTABLE_SEARCH_PATHS]
+
+  kerberos_keytab = None
+  if KERBEROS_KEYTAB in configurations:
+    kerberos_keytab = configurations[KERBEROS_KEYTAB]
+
+  kerberos_principal = None
+  if KERBEROS_PRINCIPAL in configurations:
+    kerberos_principal = configurations[KERBEROS_PRINCIPAL]
+    kerberos_principal = kerberos_principal.replace('_HOST', host_name)
+
+  if NODEMANAGER_HTTP_ADDRESS_KEY in configurations:
+    http_uri = configurations[NODEMANAGER_HTTP_ADDRESS_KEY]
+
+  if NODEMANAGER_HTTPS_ADDRESS_KEY in configurations:
+    https_uri = configurations[NODEMANAGER_HTTPS_ADDRESS_KEY]
+
+  if YARN_HTTP_POLICY_KEY in configurations:
+    http_policy = configurations[YARN_HTTP_POLICY_KEY]
+    
+  if SMOKEUSER_KEY in configurations:
+    smokeuser = configurations[SMOKEUSER_KEY]
+
+  # parse script arguments
+  connection_timeout = CONNECTION_TIMEOUT_DEFAULT
+  if CONNECTION_TIMEOUT_KEY in parameters:
+    connection_timeout = float(parameters[CONNECTION_TIMEOUT_KEY])
+
+  kinit_timer_ms = parameters.get(KERBEROS_KINIT_TIMER_PARAMETER, DEFAULT_KERBEROS_KINIT_TIMER_MS)
+
+  # determine the right URI and whether to use SSL
+  uri = http_uri
+  if http_policy == 'HTTPS_ONLY':
+    scheme = 'https'
+
+    if https_uri is not None:
+      uri = https_uri
+
+  uri = str(host_name) + ":" + uri.split(":")[1]
+  live_nodemanagers_qry = "{0}://{1}/jmx?qry={2}".format(scheme, uri, QRY)
+  convert_to_json_failed = False
+  response_code = None
+  try:
+    if kerberos_principal is not None and kerberos_keytab is not None and security_enabled:
+      env = Environment.get_instance()
+
+      # curl requires an integer timeout
+      curl_connection_timeout = int(connection_timeout)
+
+      url_response, error_msg, time_millis  = curl_krb_request(env.tmp_dir, kerberos_keytab, kerberos_principal,
+        live_nodemanagers_qry, "nm_health_summary_alert", executable_paths, False,
+        "NodeManager Health Summary", smokeuser, connection_timeout=curl_connection_timeout,
+        kinit_timer_ms = kinit_timer_ms)
+
+      try:
+        url_response_json = json.loads(url_response)
+        live_nodemanagers = json.loads(find_value_in_jmx(url_response_json, "LiveNodeManagers", live_nodemanagers_qry))
+      except ValueError, error:
+        convert_to_json_failed = True
+        logger.exception("[Alert][{0}] Convert response to json failed or json doesn't contain needed data: {1}".
+        format("NodeManager Health Summary", str(error)))
+
+      if convert_to_json_failed:
+        response_code, error_msg, time_millis  = curl_krb_request(env.tmp_dir, kerberos_keytab, kerberos_principal,
+          live_nodemanagers_qry, "nm_health_summary_alert", executable_paths, True,
+          "NodeManager Health Summary", smokeuser, connection_timeout=curl_connection_timeout,
+          kinit_timer_ms = kinit_timer_ms)
+    else:
+      live_nodemanagers = json.loads(get_value_from_jmx(live_nodemanagers_qry,
+      "LiveNodeManagers", connection_timeout))
+
+    if kerberos_principal is not None and kerberos_keytab is not None and security_enabled:
+      if response_code in [200, 307] and convert_to_json_failed:
+        return ('UNKNOWN', ['HTTP {0} response (metrics unavailable)'.format(str(response_code))])
+      elif convert_to_json_failed and response_code not in [200, 307]:
+        raise Exception("[Alert][NodeManager Health Summary] Getting data from {0} failed with http code {1}".format(
+          str(live_nodemanagers_qry), str(response_code)))
+
+    unhealthy_count = 0
+
+    for nodemanager in live_nodemanagers:
+      health_report = nodemanager['State']
+      if health_report == 'UNHEALTHY':
+        unhealthy_count += 1
+
+    if unhealthy_count == 0:
+      result_code = 'OK'
+      label = OK_LABEL
+    else:
+      result_code = 'CRITICAL'
+      if unhealthy_count == 1:
+        label = ERROR_LABEL.format(unhealthy_count, '', 'is')
+      else:
+        label = ERROR_LABEL.format(unhealthy_count, 's', 'are')
+
+  except:
+    label = traceback.format_exc()
+    result_code = 'UNKNOWN'
+
+  return (result_code, [label])
+
+
+def get_value_from_jmx(query, jmx_property, connection_timeout):
+  response = None
+  
+  try:
+    # use a customer header process that will look for the non-standard
+    # "Refresh" header and attempt to follow the redirect
+    url_opener = urllib2.build_opener(RefreshHeaderProcessor())
+    response = url_opener.open(query, timeout=connection_timeout)
+
+    data = response.read()
+    data_dict = json.loads(data)
+    return find_value_in_jmx(data_dict, jmx_property, query)
+  finally:
+    if response is not None:
+      try:
+        response.close()
+      except:
+        pass
+
+
+def find_value_in_jmx(data_dict, jmx_property, query):
+  json_data = data_dict["beans"][0]
+
+  if jmx_property not in json_data:
+    beans = data_dict['beans']
+    for jmx_prop_list_item in beans:
+      if "name" in jmx_prop_list_item and jmx_prop_list_item["name"] == QRY:
+        if jmx_property not in jmx_prop_list_item:
+          raise Exception("Unable to find {0} in JSON from {1} ".format(jmx_property, query))
+        json_data = jmx_prop_list_item
+
+  return json_data[jmx_property]
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/files/validateYarnComponentStatusWindows.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/files/validateYarnComponentStatusWindows.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/files/validateYarnComponentStatusWindows.py
new file mode 100755
index 0000000..5e2b4d9
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/files/validateYarnComponentStatusWindows.py
@@ -0,0 +1,161 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import optparse
+import subprocess
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+import urllib2
+
+RESOURCEMANAGER = 'rm'
+NODEMANAGER = 'nm'
+HISTORYSERVER = 'hs'
+
+STARTED_STATE = 'STARTED'
+RUNNING_STATE = 'RUNNING'
+
+#Return reponse for given path and address
+def getResponse(path, address, ssl_enabled):
+  if ssl_enabled:
+    url = 'https://' + address + path
+  else:
+    url = 'http://' + address + path
+
+  try:
+    handle = urllib2.urlopen(url)
+    output = handle.read()
+    handle.close()
+    response = json.loads(output)
+    if response == None:
+      print 'There is no response for url: ' + str(url)
+      exit(1)
+    return response
+  except Exception as e:
+    print 'Error getting response for url:' + str(url), e
+    exit(1)
+
+#Verify that REST api is available for given component
+def validateAvailability(component, path, address, ssl_enabled):
+
+  try:
+    response = getResponse(path, address, ssl_enabled)
+    is_valid = validateAvailabilityResponse(component, response)
+    if not is_valid:
+      exit(1)
+  except Exception as e:
+    print 'Error checking availability status of component', e
+    exit(1)
+
+#Validate component-specific response
+def validateAvailabilityResponse(component, response):
+  try:
+    if component == RESOURCEMANAGER:
+      rm_state = response['clusterInfo']['state']
+      if rm_state == STARTED_STATE:
+        return True
+      else:
+        print 'Resourcemanager is not started'
+        return False
+
+    elif component == NODEMANAGER:
+      node_healthy = bool(response['nodeInfo']['nodeHealthy'])
+      if node_healthy:
+        return True
+      else:
+        return False
+    elif component == HISTORYSERVER:
+      hs_start_time = response['historyInfo']['startedOn']
+      if hs_start_time > 0:
+        return True
+      else:
+        return False
+    else:
+      return False
+  except Exception as e:
+    print 'Error validation of availability response for ' + str(component), e
+    return False
+
+#Verify that component has required resources to work
+def validateAbility(component, path, address, ssl_enabled):
+
+  try:
+    response = getResponse(path, address, ssl_enabled)
+    is_valid = validateAbilityResponse(component, response)
+    if not is_valid:
+      exit(1)
+  except Exception as e:
+    print 'Error checking ability of component', e
+    exit(1)
+
+#Validate component-specific response that it has required resources to work
+def validateAbilityResponse(component, response):
+  try:
+    if component == RESOURCEMANAGER:
+      nodes = []
+      if response.has_key('nodes') and not response['nodes'] == None and response['nodes'].has_key('node'):
+        nodes = response['nodes']['node']
+      connected_nodes_count = len(nodes)
+      if connected_nodes_count == 0:
+        print 'There is no connected nodemanagers to resourcemanager'
+        return False
+      active_nodes = filter(lambda x: x['state'] == RUNNING_STATE, nodes)
+      active_nodes_count = len(active_nodes)
+
+      if connected_nodes_count == 0:
+        print 'There is no connected active nodemanagers to resourcemanager'
+        return False
+      else:
+        return True
+    else:
+      return False
+  except Exception as e:
+    print 'Error validation of ability response', e
+    return False
+
+#
+# Main.
+#
+def main():
+  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
+  parser.add_option("-p", "--port", dest="address", help="Host:Port for REST API of a desired component")
+  parser.add_option("-s", "--ssl", dest="ssl_enabled", help="Is SSL enabled for UI of component")
+
+  (options, args) = parser.parse_args()
+
+  component = args[0]
+
+  address = options.address
+  ssl_enabled = (options.ssl_enabled) in 'true'
+  if component == RESOURCEMANAGER:
+    path = '/ws/v1/cluster/info'
+  elif component == NODEMANAGER:
+    path = '/ws/v1/node/info'
+  elif component == HISTORYSERVER:
+    path = '/ws/v1/history/info'
+  else:
+    parser.error("Invalid component")
+
+  validateAvailability(component, path, address, ssl_enabled)
+
+  if component == RESOURCEMANAGER:
+    path = '/ws/v1/cluster/nodes'
+    validateAbility(component, path, address, ssl_enabled)
+
+if __name__ == "__main__":
+  main()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/__init__.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/__init__.py
new file mode 100755
index 0000000..35de4bb
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/__init__.py
@@ -0,0 +1,20 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/application_timeline_server.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/application_timeline_server.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/application_timeline_server.py
new file mode 100755
index 0000000..4ec6aa7
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/application_timeline_server.py
@@ -0,0 +1,155 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties,\
+  FILE_TYPE_XML
+from resource_management.libraries.functions.format import format
+from yarn import yarn
+from service import service
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+
+class ApplicationTimelineServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    service('timelineserver', action='start')
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    service('timelineserver', action='stop')
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn(name='apptimelineserver')
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class ApplicationTimelineServerWindows(ApplicationTimelineServer):
+  def status(self, env):
+    service('timelineserver', action='status')
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class ApplicationTimelineServerDefault(ApplicationTimelineServer):
+  def get_component_name(self):
+    return "hadoop-yarn-timelineserver"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-yarn-timelineserver", params.version)
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    Execute(format("mv {yarn_historyserver_pid_file_old} {yarn_historyserver_pid_file}"),
+            only_if = format("test -e {yarn_historyserver_pid_file_old}", user=status_params.yarn_user))
+    functions.check_process_status(status_params.yarn_historyserver_pid_file)
+
+  def security_status(self, env):
+    import status_params
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"yarn.timeline-service.enabled": "true",
+                           "yarn.timeline-service.http-authentication.type": "kerberos",
+                           "yarn.acl.enable": "true"}
+      props_empty_check = ["yarn.timeline-service.principal",
+                           "yarn.timeline-service.keytab",
+                           "yarn.timeline-service.http-authentication.kerberos.principal",
+                           "yarn.timeline-service.http-authentication.kerberos.keytab"]
+
+      props_read_check = ["yarn.timeline-service.keytab",
+                          "yarn.timeline-service.http-authentication.kerberos.keytab"]
+      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
+                                                  props_read_check)
+
+      yarn_expectations ={}
+      yarn_expectations.update(yarn_site_props)
+
+      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
+                                                   {'yarn-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, yarn_expectations)
+      if not result_issues: # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ( 'yarn-site' not in security_params
+               or 'yarn.timeline-service.keytab' not in security_params['yarn-site']
+               or 'yarn.timeline-service.principal' not in security_params['yarn-site']) \
+            or 'yarn.timeline-service.http-authentication.kerberos.keytab' not in security_params['yarn-site'] \
+            or 'yarn.timeline-service.http-authentication.kerberos.principal' not in security_params['yarn-site']:
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.yarn_user,
+                                security_params['yarn-site']['yarn.timeline-service.keytab'],
+                                security_params['yarn-site']['yarn.timeline-service.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.yarn_user,
+                                security_params['yarn-site']['yarn.timeline-service.http-authentication.kerberos.keytab'],
+                                security_params['yarn-site']['yarn.timeline-service.http-authentication.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+  def get_log_folder(self):
+    import params
+    return params.yarn_log_dir
+  
+  def get_user(self):
+    import params
+    return params.yarn_user
+
+if __name__ == "__main__":
+  ApplicationTimelineServer().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/historyserver.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/historyserver.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/historyserver.py
new file mode 100755
index 0000000..34c683a
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/historyserver.py
@@ -0,0 +1,190 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+from resource_management.core.source import Template
+from resource_management.core.logger import Logger
+
+from install_jars import install_tez_jars
+from yarn import yarn
+from service import service
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+
+class HistoryServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    service('historyserver', action='stop', serviceName='mapreduce')
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn(name="historyserver")
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HistoryserverWindows(HistoryServer):
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    service('historyserver', action='start', serviceName='mapreduce')
+
+  def status(self, env):
+    service('historyserver', action='status')
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HistoryServerDefault(HistoryServer):
+  def get_component_name(self):
+    return "hadoop-mapreduce-historyserver"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-mapreduce-historyserver", params.version)
+      # MC Hammer said, "Can't touch this"
+      copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
+      copy_to_hdfs("tez", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
+      copy_to_hdfs("slider", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
+      params.HdfsResource(None, action="execute")
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+
+    if params.stack_version_formatted_major and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS, params.stack_version_formatted_major):
+      # MC Hammer said, "Can't touch this"
+      resource_created = copy_to_hdfs(
+        "mapreduce",
+        params.user_group,
+        params.hdfs_user,
+        host_sys_prepped=params.host_sys_prepped)
+      resource_created = copy_to_hdfs(
+        "tez",
+        params.user_group,
+        params.hdfs_user,
+        host_sys_prepped=params.host_sys_prepped) or resource_created
+      resource_created = copy_to_hdfs(
+        "slider",
+        params.user_group,
+        params.hdfs_user,
+        host_sys_prepped=params.host_sys_prepped) or resource_created
+      if resource_created:
+        params.HdfsResource(None, action="execute")
+    else:
+      # In stack versions before copy_tarball_to_hdfs support tez.tar.gz was copied to a different folder in HDFS.
+      install_tez_jars()
+
+    service('historyserver', action='start', serviceName='mapreduce')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.mapred_historyserver_pid_file)
+
+  def security_status(self, env):
+    import status_params
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      expectations = {}
+      expectations.update(build_expectations('mapred-site',
+                                             None,
+                                             [
+                                               'mapreduce.jobhistory.keytab',
+                                               'mapreduce.jobhistory.principal',
+                                               'mapreduce.jobhistory.webapp.spnego-keytab-file',
+                                               'mapreduce.jobhistory.webapp.spnego-principal'
+                                             ],
+                                             None))
+
+      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
+                                                   {'mapred-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, expectations)
+      if not result_issues: # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ( 'mapred-site' not in security_params or
+               'mapreduce.jobhistory.keytab' not in security_params['mapred-site'] or
+               'mapreduce.jobhistory.principal' not in security_params['mapred-site'] or
+               'mapreduce.jobhistory.webapp.spnego-keytab-file' not in security_params['mapred-site'] or
+               'mapreduce.jobhistory.webapp.spnego-principal' not in security_params['mapred-site']):
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal not set."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.mapred_user,
+                                security_params['mapred-site']['mapreduce.jobhistory.keytab'],
+                                security_params['mapred-site']['mapreduce.jobhistory.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.mapred_user,
+                                security_params['mapred-site']['mapreduce.jobhistory.webapp.spnego-keytab-file'],
+                                security_params['mapred-site']['mapreduce.jobhistory.webapp.spnego-principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+  def get_log_folder(self):
+    import params
+    return params.mapred_log_dir
+
+  def get_user(self):
+    import params
+    return params.mapred_user
+
+if __name__ == "__main__":
+  HistoryServer().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/install_jars.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/install_jars.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/install_jars.py
new file mode 100755
index 0000000..44015bf
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/install_jars.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import os
+import glob
+
+def install_tez_jars():
+  import params
+
+  destination_hdfs_dirs = get_tez_hdfs_dir_paths(params.tez_lib_uris)
+
+  # If tez libraries are to be stored in hdfs
+  if destination_hdfs_dirs:
+    for hdfs_dir in destination_hdfs_dirs:
+      params.HdfsResource(hdfs_dir,
+                           type="directory",
+                           action="create_on_execute",
+                           owner=params.tez_user,
+                           mode=0755
+      )
+
+    app_dir_path = None
+    lib_dir_path = None
+
+    if len(destination_hdfs_dirs) > 0:
+      for path in destination_hdfs_dirs:
+        if 'lib' in path:
+          lib_dir_path = path
+        else:
+          app_dir_path = path
+        pass
+      pass
+    pass
+
+    tez_jars = {}
+    if app_dir_path:
+      tez_jars[params.tez_local_api_jars] = app_dir_path
+    if lib_dir_path:
+      tez_jars[params.tez_local_lib_jars] = lib_dir_path
+
+    for src_file_regex, dest_dir in tez_jars.iteritems():
+      for src_filepath in glob.glob(src_file_regex):
+        src_filename = os.path.basename(src_filepath)
+        params.HdfsResource(format("{dest_dir}/{src_filename}"),
+                            type="file",
+                            action="create_on_execute",
+                            source=src_filepath,
+                            mode=0755,
+                            owner=params.tez_user
+         )
+        
+    for src_file_regex, dest_dir in tez_jars.iteritems():
+      for src_filepath in glob.glob(src_file_regex):
+        src_filename = os.path.basename(src_filepath)
+        params.HdfsResource(format("{dest_dir}/{src_filename}"),
+                            type="file",
+                            action="create_on_execute",
+                            source=src_filepath,
+                            mode=0755,
+                            owner=params.tez_user
+         )
+    params.HdfsResource(None, action="execute")
+
+
+def get_tez_hdfs_dir_paths(tez_lib_uris = None):
+  hdfs_path_prefix = 'hdfs://'
+  lib_dir_paths = []
+  if tez_lib_uris and tez_lib_uris.strip().find(hdfs_path_prefix, 0) != -1:
+    dir_paths = tez_lib_uris.split(',')
+    for path in dir_paths:
+      if not "tez.tar.gz" in path:
+        lib_dir_path = path.replace(hdfs_path_prefix, '')
+        lib_dir_path = lib_dir_path if lib_dir_path.endswith(os.sep) else lib_dir_path + os.sep
+        lib_dir_paths.append(lib_dir_path)
+      else:
+        lib_dir_path = path.replace(hdfs_path_prefix, '')
+        lib_dir_paths.append(os.path.dirname(lib_dir_path))
+    pass
+  pass
+
+  return lib_dir_paths

http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/mapred_service_check.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/mapred_service_check.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/mapred_service_check.py
new file mode 100755
index 0000000..5fc498d
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/package/scripts/mapred_service_check.py
@@ -0,0 +1,168 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+from resource_management.core.logger import Logger
+
+
+class MapReduce2ServiceCheck(Script):
+  def service_check(self, env):
+    pass
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class MapReduce2ServiceCheckWindows(MapReduce2ServiceCheck):
+  def service_check(self, env):
+    import params
+
+    env.set_params(params)
+
+    component_type = 'hs'
+    if params.hadoop_ssl_enabled:
+      component_address = params.hs_webui_address
+    else:
+      component_address = params.hs_webui_address
+
+    validateStatusFileName = "validateYarnComponentStatusWindows.py"
+    validateStatusFilePath = os.path.join(os.path.dirname(params.hadoop_home), "temp", validateStatusFileName)
+    python_executable = sys.executable
+    validateStatusCmd = "{0} {1} {2} -p {3} -s {4}".format(
+      python_executable, validateStatusFilePath, component_type, component_address, params.hadoop_ssl_enabled)
+
+    if params.security_enabled:
+      kinit_cmd = "{0} -kt {1} {2};".format(params.kinit_path_local, params.smoke_user_keytab, params.smokeuser)
+      smoke_cmd = kinit_cmd + validateStatusCmd
+    else:
+      smoke_cmd = validateStatusCmd
+
+    File(validateStatusFilePath,
+         content=StaticFile(validateStatusFileName)
+    )
+
+    Execute(smoke_cmd,
+            tries=3,
+            try_sleep=5,
+            logoutput=True
+    )
+
+    # hadoop_exe = os.path.join(params.hadoop_home, "bin", "hadoop")
+    #
+    # tested_file = os.path.join(params.hadoop_home, "bin", "hadoop.cmd")
+    # jar_path = os.path.join(params.hadoop_mapred2_jar_location, params.hadoopMapredExamplesJarName)
+    # input_file = format("/user/hadoop/mapredsmokeinput")
+    # output_file = format("/user/hadoop/mapredsmokeoutput")
+    # cleanup_cmd = format("cmd /C {hadoop_exe} fs -rm -r -f {output_file} {input_file}")
+    # create_file_cmd = format("cmd /C {hadoop_exe} fs -put {tested_file} {input_file}")
+    # run_wordcount_job = format("cmd /C {hadoop_exe} jar {jar_path} wordcount {input_file} {output_file}")
+    # test_cmd = format("cmd /C {hadoop_exe} fs -test -e {output_file}")
+    #
+    # if params.security_enabled:
+    #   kinit_cmd = "{0} -kt {1} {2};".format(kinit_path_local, smoke_user_keytab, smokeuser)
+    #   Execute(kinit_cmd)
+    #
+    # Execute(cleanup_cmd,
+    #         tries=1,
+    #         try_sleep=5,
+    #         logoutput=True,
+    #         user=params.hdfs_user
+    # )
+    #
+    # Execute(create_file_cmd,
+    #         tries=1,
+    #         try_sleep=5,
+    #         logoutput=True,
+    #         user=params.hdfs_user
+    # )
+    #
+    # Execute(run_wordcount_job,
+    #         tries=1,
+    #         try_sleep=5,
+    #         logoutput=True,
+    #         user=params.hdfs_user
+    # )
+    #
+    # Execute(test_cmd,
+    #         logoutput=True,
+    #         user=params.hdfs_user
+    # )
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class MapReduce2ServiceCheckDefault(MapReduce2ServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    jar_path = format("{hadoop_mapred2_jar_location}/{hadoopMapredExamplesJarName}")
+    input_file = format("/user/{smokeuser}/mapredsmokeinput")
+    output_file = format("/user/{smokeuser}/mapredsmokeoutput")
+
+    test_cmd = format("fs -test -e {output_file}")
+    run_wordcount_job = format("jar {jar_path} wordcount {input_file} {output_file}")
+
+    params.HdfsResource(format("/user/{smokeuser}"),
+                      type="directory",
+                      action="create_on_execute",
+                      owner=params.smokeuser,
+                      mode=params.smoke_hdfs_user_mode,
+    )
+    params.HdfsResource(output_file,
+                        action = "delete_on_execute",
+                        type = "directory",
+                        dfs_type = params.dfs_type,
+    )
+    params.HdfsResource(input_file,
+                        action = "create_on_execute",
+                        type = "file",
+                        source = "/etc/passwd",
+                        dfs_type = params.dfs_type,
+    )
+    params.HdfsResource(None, action="execute")
+
+    # initialize the ticket
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
+      Execute(kinit_cmd, user=params.smokeuser)
+
+    ExecuteHadoop(run_wordcount_job,
+                  tries=1,
+                  try_sleep=5,
+                  user=params.smokeuser,
+                  bin_dir=params.execute_path,
+                  conf_dir=params.hadoop_conf_dir,
+                  logoutput=True)
+
+    # the ticket may have expired, so re-initialize
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
+      Execute(kinit_cmd, user=params.smokeuser)
+
+    ExecuteHadoop(test_cmd,
+                  user=params.smokeuser,
+                  bin_dir=params.execute_path,
+                  conf_dir=params.hadoop_conf_dir)
+
+
+if __name__ == "__main__":
+  MapReduce2ServiceCheck().execute()


[19/52] bigtop git commit: ODPI-193. Add Hive 1.2 to ODPi ambari reference implementation

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.12.0.postgres.sql
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.12.0.postgres.sql b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.12.0.postgres.sql
new file mode 100755
index 0000000..bc6486b
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.12.0.postgres.sql
@@ -0,0 +1,1406 @@
+--
+-- PostgreSQL database dump
+--
+
+SET statement_timeout = 0;
+SET client_encoding = 'UTF8';
+SET standard_conforming_strings = off;
+SET check_function_bodies = false;
+SET client_min_messages = warning;
+SET escape_string_warning = off;
+
+SET search_path = public, pg_catalog;
+
+SET default_tablespace = '';
+
+SET default_with_oids = false;
+
+--
+-- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "BUCKETING_COLS" (
+    "SD_ID" bigint NOT NULL,
+    "BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "CDS" (
+    "CD_ID" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_OLD; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_OLD" (
+    "SD_ID" bigint NOT NULL,
+    "COMMENT" character varying(256) DEFAULT NULL::character varying,
+    "COLUMN_NAME" character varying(128) NOT NULL,
+    "TYPE_NAME" character varying(4000) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_V2" (
+    "CD_ID" bigint NOT NULL,
+    "COMMENT" character varying(4000),
+    "COLUMN_NAME" character varying(128) NOT NULL,
+    "TYPE_NAME" character varying(4000),
+    "INTEGER_IDX" integer NOT NULL
+);
+
+
+--
+-- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DATABASE_PARAMS" (
+    "DB_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(180) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DBS" (
+    "DB_ID" bigint NOT NULL,
+    "DESC" character varying(4000) DEFAULT NULL::character varying,
+    "DB_LOCATION_URI" character varying(4000) NOT NULL,
+    "NAME" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DB_PRIVS" (
+    "DB_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DB_ID" bigint,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "DB_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "GLOBAL_PRIVS" (
+    "USER_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "USER_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "IDXS" (
+    "INDEX_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DEFERRED_REBUILD" boolean NOT NULL,
+    "INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character varying,
+    "INDEX_NAME" character varying(128) DEFAULT NULL::character varying,
+    "INDEX_TBL_ID" bigint,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "ORIG_TBL_ID" bigint,
+    "SD_ID" bigint
+);
+
+
+--
+-- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "INDEX_PARAMS" (
+    "INDEX_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "NUCLEUS_TABLES" (
+    "CLASS_NAME" character varying(128) NOT NULL,
+    "TABLE_NAME" character varying(128) NOT NULL,
+    "TYPE" character varying(4) NOT NULL,
+    "OWNER" character varying(2) NOT NULL,
+    "VERSION" character varying(20) NOT NULL,
+    "INTERFACE_NAME" character varying(255) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PARTITIONS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITIONS" (
+    "PART_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "PART_NAME" character varying(767) DEFAULT NULL::character varying,
+    "SD_ID" bigint,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: PARTITION_EVENTS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_EVENTS" (
+    "PART_NAME_ID" bigint NOT NULL,
+    "DB_NAME" character varying(128),
+    "EVENT_TIME" bigint NOT NULL,
+    "EVENT_TYPE" integer NOT NULL,
+    "PARTITION_NAME" character varying(767),
+    "TBL_NAME" character varying(128)
+);
+
+
+--
+-- Name: PARTITION_KEYS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_KEYS" (
+    "TBL_ID" bigint NOT NULL,
+    "PKEY_COMMENT" character varying(4000) DEFAULT NULL::character varying,
+    "PKEY_NAME" character varying(128) NOT NULL,
+    "PKEY_TYPE" character varying(767) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: PARTITION_KEY_VALS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_KEY_VALS" (
+    "PART_ID" bigint NOT NULL,
+    "PART_KEY_VAL" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: PARTITION_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_PARAMS" (
+    "PART_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PART_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_COL_PRIVS" (
+    "PART_COLUMN_GRANT_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_ID" bigint,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_COL_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PART_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_PRIVS" (
+    "PART_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_ID" bigint,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: ROLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "ROLES" (
+    "ROLE_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+    "ROLE_NAME" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: ROLE_MAP; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "ROLE_MAP" (
+    "ROLE_GRANT_ID" bigint NOT NULL,
+    "ADD_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "ROLE_ID" bigint
+);
+
+
+--
+-- Name: SDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SDS" (
+    "SD_ID" bigint NOT NULL,
+    "INPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+    "IS_COMPRESSED" boolean NOT NULL,
+    "LOCATION" character varying(4000) DEFAULT NULL::character varying,
+    "NUM_BUCKETS" bigint NOT NULL,
+    "OUTPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+    "SERDE_ID" bigint,
+    "CD_ID" bigint,
+    "IS_STOREDASSUBDIRECTORIES" boolean NOT NULL
+);
+
+
+--
+-- Name: SD_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SD_PARAMS" (
+    "SD_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SEQUENCE_TABLE; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SEQUENCE_TABLE" (
+    "SEQUENCE_NAME" character varying(255) NOT NULL,
+    "NEXT_VAL" bigint NOT NULL
+);
+
+
+--
+-- Name: SERDES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SERDES" (
+    "SERDE_ID" bigint NOT NULL,
+    "NAME" character varying(128) DEFAULT NULL::character varying,
+    "SLIB" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SERDE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SERDE_PARAMS" (
+    "SERDE_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SORT_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SORT_COLS" (
+    "SD_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "ORDER" bigint NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: TABLE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TABLE_PARAMS" (
+    "TBL_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: TBLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBLS" (
+    "TBL_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DB_ID" bigint,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "OWNER" character varying(767) DEFAULT NULL::character varying,
+    "RETENTION" bigint NOT NULL,
+    "SD_ID" bigint,
+    "TBL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "VIEW_EXPANDED_TEXT" text,
+    "VIEW_ORIGINAL_TEXT" text
+);
+
+
+--
+-- Name: TBL_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBL_COL_PRIVS" (
+    "TBL_COLUMN_GRANT_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "TBL_COL_PRIV" character varying(128) DEFAULT NULL::character varying,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: TBL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBL_PRIVS" (
+    "TBL_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "TBL_PRIV" character varying(128) DEFAULT NULL::character varying,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: TYPES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TYPES" (
+    "TYPES_ID" bigint NOT NULL,
+    "TYPE_NAME" character varying(128) DEFAULT NULL::character varying,
+    "TYPE1" character varying(767) DEFAULT NULL::character varying,
+    "TYPE2" character varying(767) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: TYPE_FIELDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TYPE_FIELDS" (
+    "TYPE_NAME" bigint NOT NULL,
+    "COMMENT" character varying(256) DEFAULT NULL::character varying,
+    "FIELD_NAME" character varying(128) NOT NULL,
+    "FIELD_TYPE" character varying(767) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_STRING_LIST" (
+    "STRING_LIST_ID" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_STRING_LIST_VALUES" (
+    "STRING_LIST_ID" bigint NOT NULL,
+    "STRING_LIST_VALUE" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_COL_NAMES" (
+    "SD_ID" bigint NOT NULL,
+    "SKEWED_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_COL_VALUE_LOC_MAP" (
+    "SD_ID" bigint NOT NULL,
+    "STRING_LIST_ID_KID" bigint NOT NULL,
+    "LOCATION" character varying(4000) DEFAULT NULL::character varying
+);
+
+CREATE TABLE "SKEWED_VALUES" (
+    "SD_ID_OID" bigint NOT NULL,
+    "STRING_LIST_ID_EID" bigint NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: TAB_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE  "MASTER_KEYS"
+(
+    "KEY_ID" SERIAL,
+    "MASTER_KEY" varchar(767) NULL,
+    PRIMARY KEY ("KEY_ID")
+);
+
+CREATE TABLE  "DELEGATION_TOKENS"
+(
+    "TOKEN_IDENT" varchar(767) NOT NULL,
+    "TOKEN" varchar(767) NULL,
+    PRIMARY KEY ("TOKEN_IDENT")
+);
+
+CREATE TABLE "TAB_COL_STATS" (
+ "CS_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "TBL_ID" bigint NOT NULL,
+ "LONG_LOW_VALUE" bigint,
+ "LONG_HIGH_VALUE" bigint,
+ "DOUBLE_LOW_VALUE" double precision,
+ "DOUBLE_HIGH_VALUE" double precision,
+ "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_NULLS" bigint NOT NULL,
+ "NUM_DISTINCTS" bigint,
+ "AVG_COL_LEN" double precision,
+ "MAX_COL_LEN" bigint,
+ "NUM_TRUES" bigint,
+ "NUM_FALSES" bigint,
+ "LAST_ANALYZED" bigint NOT NULL
+);
+
+--
+-- Table structure for VERSION
+--
+CREATE TABLE "VERSION" (
+  "VER_ID" bigint,
+  "SCHEMA_VERSION" character varying(127) NOT NULL,
+  "VERSION_COMMENT" character varying(255) NOT NULL
+);
+
+--
+-- Name: PART_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_COL_STATS" (
+ "CS_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
+ "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "PART_ID" bigint NOT NULL,
+ "LONG_LOW_VALUE" bigint,
+ "LONG_HIGH_VALUE" bigint,
+ "DOUBLE_LOW_VALUE" double precision,
+ "DOUBLE_HIGH_VALUE" double precision,
+ "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_NULLS" bigint NOT NULL,
+ "NUM_DISTINCTS" bigint,
+ "AVG_COL_LEN" double precision,
+ "MAX_COL_LEN" bigint,
+ "NUM_TRUES" bigint,
+ "NUM_FALSES" bigint,
+ "LAST_ANALYZED" bigint NOT NULL
+);
+
+--
+-- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "BUCKETING_COLS"
+    ADD CONSTRAINT "BUCKETING_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+
+--
+-- Name: CDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "CDS"
+    ADD CONSTRAINT "CDS_pkey" PRIMARY KEY ("CD_ID");
+
+
+--
+-- Name: COLUMNS_V2_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "COLUMNS_V2"
+    ADD CONSTRAINT "COLUMNS_V2_pkey" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
+
+
+--
+-- Name: COLUMNS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "COLUMNS_OLD"
+    ADD CONSTRAINT "COLUMNS_pkey" PRIMARY KEY ("SD_ID", "COLUMN_NAME");
+
+
+--
+-- Name: DATABASE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DATABASE_PARAMS"
+    ADD CONSTRAINT "DATABASE_PARAMS_pkey" PRIMARY KEY ("DB_ID", "PARAM_KEY");
+
+
+--
+-- Name: DBPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DBPRIVILEGEINDEX" UNIQUE ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: DBS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DBS"
+    ADD CONSTRAINT "DBS_pkey" PRIMARY KEY ("DB_ID");
+
+
+--
+-- Name: DB_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DB_PRIVS_pkey" PRIMARY KEY ("DB_GRANT_ID");
+
+
+--
+-- Name: GLOBALPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "GLOBAL_PRIVS"
+    ADD CONSTRAINT "GLOBALPRIVILEGEINDEX" UNIQUE ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: GLOBAL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "GLOBAL_PRIVS"
+    ADD CONSTRAINT "GLOBAL_PRIVS_pkey" PRIMARY KEY ("USER_GRANT_ID");
+
+
+--
+-- Name: IDXS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_pkey" PRIMARY KEY ("INDEX_ID");
+
+
+--
+-- Name: INDEX_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "INDEX_PARAMS"
+    ADD CONSTRAINT "INDEX_PARAMS_pkey" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
+
+
+--
+-- Name: NUCLEUS_TABLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "NUCLEUS_TABLES"
+    ADD CONSTRAINT "NUCLEUS_TABLES_pkey" PRIMARY KEY ("CLASS_NAME");
+
+
+--
+-- Name: PARTITIONS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_pkey" PRIMARY KEY ("PART_ID");
+
+
+--
+-- Name: PARTITION_EVENTS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_EVENTS"
+    ADD CONSTRAINT "PARTITION_EVENTS_pkey" PRIMARY KEY ("PART_NAME_ID");
+
+
+--
+-- Name: PARTITION_KEYS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_KEYS"
+    ADD CONSTRAINT "PARTITION_KEYS_pkey" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
+
+
+--
+-- Name: PARTITION_KEY_VALS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_KEY_VALS"
+    ADD CONSTRAINT "PARTITION_KEY_VALS_pkey" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
+
+
+--
+-- Name: PARTITION_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_PARAMS"
+    ADD CONSTRAINT "PARTITION_PARAMS_pkey" PRIMARY KEY ("PART_ID", "PARAM_KEY");
+
+
+--
+-- Name: PART_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PART_COL_PRIVS"
+    ADD CONSTRAINT "PART_COL_PRIVS_pkey" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
+
+
+--
+-- Name: PART_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PART_PRIVS"
+    ADD CONSTRAINT "PART_PRIVS_pkey" PRIMARY KEY ("PART_GRANT_ID");
+
+
+--
+-- Name: ROLEENTITYINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLES"
+    ADD CONSTRAINT "ROLEENTITYINDEX" UNIQUE ("ROLE_NAME");
+
+
+--
+-- Name: ROLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLES"
+    ADD CONSTRAINT "ROLES_pkey" PRIMARY KEY ("ROLE_ID");
+
+
+--
+-- Name: ROLE_MAP_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "ROLE_MAP_pkey" PRIMARY KEY ("ROLE_GRANT_ID");
+
+
+--
+-- Name: SDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_pkey" PRIMARY KEY ("SD_ID");
+
+
+--
+-- Name: SD_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SD_PARAMS"
+    ADD CONSTRAINT "SD_PARAMS_pkey" PRIMARY KEY ("SD_ID", "PARAM_KEY");
+
+
+--
+-- Name: SEQUENCE_TABLE_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SEQUENCE_TABLE"
+    ADD CONSTRAINT "SEQUENCE_TABLE_pkey" PRIMARY KEY ("SEQUENCE_NAME");
+
+
+--
+-- Name: SERDES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SERDES"
+    ADD CONSTRAINT "SERDES_pkey" PRIMARY KEY ("SERDE_ID");
+
+
+--
+-- Name: SERDE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SERDE_PARAMS"
+    ADD CONSTRAINT "SERDE_PARAMS_pkey" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
+
+
+--
+-- Name: SORT_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SORT_COLS"
+    ADD CONSTRAINT "SORT_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+
+--
+-- Name: TABLE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TABLE_PARAMS"
+    ADD CONSTRAINT "TABLE_PARAMS_pkey" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
+
+
+--
+-- Name: TBLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_pkey" PRIMARY KEY ("TBL_ID");
+
+
+--
+-- Name: TBL_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBL_COL_PRIVS"
+    ADD CONSTRAINT "TBL_COL_PRIVS_pkey" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
+
+
+--
+-- Name: TBL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBL_PRIVS"
+    ADD CONSTRAINT "TBL_PRIVS_pkey" PRIMARY KEY ("TBL_GRANT_ID");
+
+
+--
+-- Name: TYPES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPES"
+    ADD CONSTRAINT "TYPES_pkey" PRIMARY KEY ("TYPES_ID");
+
+
+--
+-- Name: TYPE_FIELDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPE_FIELDS"
+    ADD CONSTRAINT "TYPE_FIELDS_pkey" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_pkey" PRIMARY KEY ("STRING_LIST_ID");
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_pkey" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
+
+
+ALTER TABLE ONLY "SKEWED_COL_NAMES"
+    ADD CONSTRAINT "SKEWED_COL_NAMES_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_pkey" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_pkey" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
+
+--
+-- Name: TAB_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+--
+-- Name: PART_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+--
+-- Name: UNIQUEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "UNIQUEINDEX" UNIQUE ("INDEX_NAME", "ORIG_TBL_ID");
+
+
+--
+-- Name: UNIQUEPARTITION; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("PART_NAME", "TBL_ID");
+
+
+--
+-- Name: UNIQUETABLE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "UNIQUETABLE" UNIQUE ("TBL_NAME", "DB_ID");
+
+
+--
+-- Name: UNIQUE_DATABASE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DBS"
+    ADD CONSTRAINT "UNIQUE_DATABASE" UNIQUE ("NAME");
+
+
+--
+-- Name: UNIQUE_TYPE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPES"
+    ADD CONSTRAINT "UNIQUE_TYPE" UNIQUE ("TYPE_NAME");
+
+
+--
+-- Name: USERROLEMAPINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "USERROLEMAPINDEX" UNIQUE ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "BUCKETING_COLS_N49" ON "BUCKETING_COLS" USING btree ("SD_ID");
+
+
+--
+-- Name: COLUMNS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "COLUMNS_N49" ON "COLUMNS_OLD" USING btree ("SD_ID");
+
+
+--
+-- Name: DATABASE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "DATABASE_PARAMS_N49" ON "DATABASE_PARAMS" USING btree ("DB_ID");
+
+
+--
+-- Name: DB_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "DB_PRIVS_N49" ON "DB_PRIVS" USING btree ("DB_ID");
+
+
+--
+-- Name: IDXS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N49" ON "IDXS" USING btree ("ORIG_TBL_ID");
+
+
+--
+-- Name: IDXS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N50" ON "IDXS" USING btree ("INDEX_TBL_ID");
+
+
+--
+-- Name: IDXS_N51; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N51" ON "IDXS" USING btree ("SD_ID");
+
+
+--
+-- Name: INDEX_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "INDEX_PARAMS_N49" ON "INDEX_PARAMS" USING btree ("INDEX_ID");
+
+
+--
+-- Name: PARTITIONCOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONCOLUMNPRIVILEGEINDEX" ON "PART_COL_PRIVS" USING btree ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: PARTITIONEVENTINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONEVENTINDEX" ON "PARTITION_EVENTS" USING btree ("PARTITION_NAME");
+
+
+--
+-- Name: PARTITIONS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONS_N49" ON "PARTITIONS" USING btree ("TBL_ID");
+
+
+--
+-- Name: PARTITIONS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONS_N50" ON "PARTITIONS" USING btree ("SD_ID");
+
+
+--
+-- Name: PARTITION_KEYS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_KEYS_N49" ON "PARTITION_KEYS" USING btree ("TBL_ID");
+
+
+--
+-- Name: PARTITION_KEY_VALS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_KEY_VALS_N49" ON "PARTITION_KEY_VALS" USING btree ("PART_ID");
+
+
+--
+-- Name: PARTITION_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_PARAMS_N49" ON "PARTITION_PARAMS" USING btree ("PART_ID");
+
+
+--
+-- Name: PARTPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTPRIVILEGEINDEX" ON "PART_PRIVS" USING btree ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: PART_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_COL_PRIVS_N49" ON "PART_COL_PRIVS" USING btree ("PART_ID");
+
+
+--
+-- Name: PART_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID");
+
+
+--
+-- Name: ROLE_MAP_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "ROLE_MAP_N49" ON "ROLE_MAP" USING btree ("ROLE_ID");
+
+
+--
+-- Name: SDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SDS_N49" ON "SDS" USING btree ("SERDE_ID");
+
+
+--
+-- Name: SD_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SD_PARAMS_N49" ON "SD_PARAMS" USING btree ("SD_ID");
+
+
+--
+-- Name: SERDE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SERDE_PARAMS_N49" ON "SERDE_PARAMS" USING btree ("SERDE_ID");
+
+
+--
+-- Name: SORT_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SORT_COLS_N49" ON "SORT_COLS" USING btree ("SD_ID");
+
+
+--
+-- Name: TABLECOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLECOLUMNPRIVILEGEINDEX" ON "TBL_COL_PRIVS" USING btree ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: TABLEPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLEPRIVILEGEINDEX" ON "TBL_PRIVS" USING btree ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: TABLE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLE_PARAMS_N49" ON "TABLE_PARAMS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TBLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBLS_N49" ON "TBLS" USING btree ("DB_ID");
+
+
+--
+-- Name: TBLS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBLS_N50" ON "TBLS" USING btree ("SD_ID");
+
+
+--
+-- Name: TBL_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBL_COL_PRIVS_N49" ON "TBL_COL_PRIVS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TBL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBL_PRIVS_N49" ON "TBL_PRIVS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TYPE_FIELDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TYPE_FIELDS_N49" ON "TYPE_FIELDS" USING btree ("TYPE_NAME");
+
+--
+-- Name: TAB_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TAB_COL_STATS_N49" ON "TAB_COL_STATS" USING btree ("TBL_ID");
+
+--
+-- Name: PART_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID");
+
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_fkey" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "SKEWED_COL_NAMES"
+    ADD CONSTRAINT "SKEWED_COL_NAMES_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey1" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_fkey1" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_fkey2" FOREIGN KEY ("SD_ID_OID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: BUCKETING_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "BUCKETING_COLS"
+    ADD CONSTRAINT "BUCKETING_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: COLUMNS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "COLUMNS_OLD"
+    ADD CONSTRAINT "COLUMNS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: COLUMNS_V2_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "COLUMNS_V2"
+    ADD CONSTRAINT "COLUMNS_V2_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+
+
+--
+-- Name: DATABASE_PARAMS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "DATABASE_PARAMS"
+    ADD CONSTRAINT "DATABASE_PARAMS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: DB_PRIVS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DB_PRIVS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_INDEX_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_INDEX_TBL_ID_fkey" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_ORIG_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_ORIG_TBL_ID_fkey" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: INDEX_PARAMS_INDEX_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "INDEX_PARAMS"
+    ADD CONSTRAINT "INDEX_PARAMS_INDEX_ID_fkey" FOREIGN KEY ("INDEX_ID") REFERENCES "IDXS"("INDEX_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITIONS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITIONS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_KEYS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_KEYS"
+    ADD CONSTRAINT "PARTITION_KEYS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_KEY_VALS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_KEY_VALS"
+    ADD CONSTRAINT "PARTITION_KEY_VALS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_PARAMS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_PARAMS"
+    ADD CONSTRAINT "PARTITION_PARAMS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_COL_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PART_COL_PRIVS"
+    ADD CONSTRAINT "PART_COL_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PART_PRIVS"
+    ADD CONSTRAINT "PART_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: ROLE_MAP_ROLE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "ROLE_MAP_ROLE_ID_fkey" FOREIGN KEY ("ROLE_ID") REFERENCES "ROLES"("ROLE_ID") DEFERRABLE;
+
+
+--
+-- Name: SDS_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+
+
+--
+-- Name: SDS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+
+
+--
+-- Name: SD_PARAMS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SD_PARAMS"
+    ADD CONSTRAINT "SD_PARAMS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: SERDE_PARAMS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SERDE_PARAMS"
+    ADD CONSTRAINT "SERDE_PARAMS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+
+
+--
+-- Name: SORT_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SORT_COLS"
+    ADD CONSTRAINT "SORT_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: TABLE_PARAMS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TABLE_PARAMS"
+    ADD CONSTRAINT "TABLE_PARAMS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TBLS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: TBLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: TBL_COL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBL_COL_PRIVS"
+    ADD CONSTRAINT "TBL_COL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TBL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBL_PRIVS"
+    ADD CONSTRAINT "TBL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TYPE_FIELDS_TYPE_NAME_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TYPE_FIELDS"
+    ADD CONSTRAINT "TYPE_FIELDS_TYPE_NAME_fkey" FOREIGN KEY ("TYPE_NAME") REFERENCES "TYPES"("TYPES_ID") DEFERRABLE;
+
+--
+-- Name: TAB_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_fkey" FOREIGN KEY("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID");
+
+--
+-- Name: public; Type: ACL; Schema: -; Owner: hiveuser
+--
+
+REVOKE ALL ON SCHEMA public FROM PUBLIC;
+GRANT ALL ON SCHEMA public TO PUBLIC;
+
+
+INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '0.12.0', 'Hive release version 0.12.0');
+--
+-- PostgreSQL database dump complete
+--
+

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.13.0.mysql.sql
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.13.0.mysql.sql b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.13.0.mysql.sql
new file mode 100755
index 0000000..89ce15d
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/etc/hive-schema-0.13.0.mysql.sql
@@ -0,0 +1,889 @@
+-- MySQL dump 10.13  Distrib 5.5.25, for osx10.6 (i386)
+--
+-- Host: localhost    Database: test
+-- ------------------------------------------------------
+-- Server version	5.5.25
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+
+--
+-- Table structure for table `BUCKETING_COLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `BUCKETING_COLS_N49` (`SD_ID`),
+  CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `CDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `CDS` (
+  `CD_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `COLUMNS_V2`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
+  `CD_ID` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE_NAME` varchar(4000) DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
+  KEY `COLUMNS_V2_N49` (`CD_ID`),
+  CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DATABASE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_ID`,`PARAM_KEY`),
+  KEY `DATABASE_PARAMS_N49` (`DB_ID`),
+  CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DBS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DBS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `OWNER_TYPE` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_ID`),
+  UNIQUE KEY `UNIQUE_DATABASE` (`NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DB_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
+  `DB_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DB_ID` bigint(20) DEFAULT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DB_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_GRANT_ID`),
+  UNIQUE KEY `DBPRIVILEGEINDEX` (`DB_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DB_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `DB_PRIVS_N49` (`DB_ID`),
+  CONSTRAINT `DB_PRIVS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `GLOBAL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` (
+  `USER_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `USER_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`USER_GRANT_ID`),
+  UNIQUE KEY `GLOBALPRIVILEGEINDEX` (`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`USER_PRIV`,`GRANTOR`,`GRANTOR_TYPE`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `IDXS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `IDXS` (
+  `INDEX_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DEFERRED_REBUILD` bit(1) NOT NULL,
+  `INDEX_HANDLER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INDEX_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INDEX_TBL_ID` bigint(20) DEFAULT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `ORIG_TBL_ID` bigint(20) DEFAULT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`INDEX_ID`),
+  UNIQUE KEY `UNIQUEINDEX` (`INDEX_NAME`,`ORIG_TBL_ID`),
+  KEY `IDXS_N51` (`SD_ID`),
+  KEY `IDXS_N50` (`INDEX_TBL_ID`),
+  KEY `IDXS_N49` (`ORIG_TBL_ID`),
+  CONSTRAINT `IDXS_FK1` FOREIGN KEY (`ORIG_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+  CONSTRAINT `IDXS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `IDXS_FK3` FOREIGN KEY (`INDEX_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `INDEX_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `INDEX_PARAMS` (
+  `INDEX_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`),
+  KEY `INDEX_PARAMS_N49` (`INDEX_ID`),
+  CONSTRAINT `INDEX_PARAMS_FK1` FOREIGN KEY (`INDEX_ID`) REFERENCES `IDXS` (`INDEX_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `NUCLEUS_TABLES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `NUCLEUS_TABLES` (
+  `CLASS_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE` varchar(4) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `OWNER` varchar(2) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `VERSION` varchar(20) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTERFACE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`CLASS_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITIONS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITIONS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`PART_ID`),
+  UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
+  KEY `PARTITIONS_N49` (`TBL_ID`),
+  KEY `PARTITIONS_N50` (`SD_ID`),
+  KEY `PARTITIONS_N51` (`LINK_TARGET_ID`),
+  CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+  CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `PARTITIONS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_EVENTS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` (
+  `PART_NAME_ID` bigint(20) NOT NULL,
+  `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `EVENT_TIME` bigint(20) NOT NULL,
+  `EVENT_TYPE` int(11) NOT NULL,
+  `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_NAME_ID`),
+  KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEYS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `PKEY_COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PKEY_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PKEY_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`TBL_ID`,`PKEY_NAME`),
+  KEY `PARTITION_KEYS_N49` (`TBL_ID`),
+  CONSTRAINT `PARTITION_KEYS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEY_VALS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `PART_KEY_VAL` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`PART_ID`,`INTEGER_IDX`),
+  KEY `PARTITION_KEY_VALS_N49` (`PART_ID`),
+  CONSTRAINT `PARTITION_KEY_VALS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_ID`,`PARAM_KEY`),
+  KEY `PARTITION_PARAMS_N49` (`PART_ID`),
+  CONSTRAINT `PARTITION_PARAMS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_COL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` (
+  `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_ID` bigint(20) DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_COLUMN_GRANT_ID`),
+  KEY `PART_COL_PRIVS_N49` (`PART_ID`),
+  KEY `PARTITIONCOLUMNPRIVILEGEINDEX` (`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  CONSTRAINT `PART_COL_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PART_PRIVS` (
+  `PART_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_ID` bigint(20) DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_GRANT_ID`),
+  KEY `PARTPRIVILEGEINDEX` (`PART_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `PART_PRIVS_N49` (`PART_ID`),
+  CONSTRAINT `PART_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `ROLES` (
+  `ROLE_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ROLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`ROLE_ID`),
+  UNIQUE KEY `ROLEENTITYINDEX` (`ROLE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLE_MAP`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `ROLE_MAP` (
+  `ROLE_GRANT_ID` bigint(20) NOT NULL,
+  `ADD_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ROLE_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`ROLE_GRANT_ID`),
+  UNIQUE KEY `USERROLEMAPINDEX` (`PRINCIPAL_NAME`,`ROLE_ID`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `ROLE_MAP_N49` (`ROLE_ID`),
+  CONSTRAINT `ROLE_MAP_FK1` FOREIGN KEY (`ROLE_ID`) REFERENCES `ROLES` (`ROLE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SDS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `CD_ID` bigint(20) DEFAULT NULL,
+  `INPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `IS_COMPRESSED` bit(1) NOT NULL,
+  `IS_STOREDASSUBDIRECTORIES` bit(1) NOT NULL,
+  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `NUM_BUCKETS` int(11) NOT NULL,
+  `OUTPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SERDE_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`),
+  KEY `SDS_N49` (`SERDE_ID`),
+  KEY `SDS_N50` (`CD_ID`),
+  CONSTRAINT `SDS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
+  CONSTRAINT `SDS_FK2` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SD_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SD_PARAMS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`,`PARAM_KEY`),
+  KEY `SD_PARAMS_N49` (`SD_ID`),
+  CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SEQUENCE_TABLE`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` (
+  `SEQUENCE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `NEXT_VAL` bigint(20) NOT NULL,
+  PRIMARY KEY (`SEQUENCE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SERDES` (
+  `SERDE_ID` bigint(20) NOT NULL,
+  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` (
+  `SERDE_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`),
+  KEY `SERDE_PARAMS_N49` (`SERDE_ID`),
+  CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_COL_NAMES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` (
+  `SD_ID` bigint(20) NOT NULL,
+  `SKEWED_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `SKEWED_COL_NAMES_N49` (`SD_ID`),
+  CONSTRAINT `SKEWED_COL_NAMES_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_COL_VALUE_LOC_MAP`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` (
+  `SD_ID` bigint(20) NOT NULL,
+  `STRING_LIST_ID_KID` bigint(20) NOT NULL,
+  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`),
+  KEY `SKEWED_COL_VALUE_LOC_MAP_N49` (`STRING_LIST_ID_KID`),
+  KEY `SKEWED_COL_VALUE_LOC_MAP_N50` (`SD_ID`),
+  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK2` FOREIGN KEY (`STRING_LIST_ID_KID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_STRING_LIST`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` (
+  `STRING_LIST_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`STRING_LIST_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_STRING_LIST_VALUES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` (
+  `STRING_LIST_ID` bigint(20) NOT NULL,
+  `STRING_LIST_VALUE` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`),
+  KEY `SKEWED_STRING_LIST_VALUES_N49` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_STRING_LIST_VALUES_FK1` FOREIGN KEY (`STRING_LIST_ID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_VALUES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` (
+  `SD_ID_OID` bigint(20) NOT NULL,
+  `STRING_LIST_ID_EID` bigint(20) NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`),
+  KEY `SKEWED_VALUES_N50` (`SD_ID_OID`),
+  KEY `SKEWED_VALUES_N49` (`STRING_LIST_ID_EID`),
+  CONSTRAINT `SKEWED_VALUES_FK2` FOREIGN KEY (`STRING_LIST_ID_EID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_VALUES_FK1` FOREIGN KEY (`SD_ID_OID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SORT_COLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SORT_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ORDER` int(11) NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `SORT_COLS_N49` (`SD_ID`),
+  CONSTRAINT `SORT_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TABLE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`TBL_ID`,`PARAM_KEY`),
+  KEY `TABLE_PARAMS_N49` (`TBL_ID`),
+  CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBLS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DB_ID` bigint(20) DEFAULT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `RETENTION` int(11) NOT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `VIEW_EXPANDED_TEXT` mediumtext,
+  `VIEW_ORIGINAL_TEXT` mediumtext,
+  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_ID`),
+  UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
+  KEY `TBLS_N50` (`SD_ID`),
+  KEY `TBLS_N49` (`DB_ID`),
+  KEY `TBLS_N51` (`LINK_TARGET_ID`),
+  CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`),
+  CONSTRAINT `TBLS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_COL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` (
+  `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_COLUMN_GRANT_ID`),
+  KEY `TABLECOLUMNPRIVILEGEINDEX` (`TBL_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `TBL_COL_PRIVS_N49` (`TBL_ID`),
+  CONSTRAINT `TBL_COL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBL_PRIVS` (
+  `TBL_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_GRANT_ID`),
+  KEY `TBL_PRIVS_N49` (`TBL_ID`),
+  KEY `TABLEPRIVILEGEINDEX` (`TBL_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  CONSTRAINT `TBL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TAB_COL_STATS`
+--
+CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
+ `CS_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TBL_ID` bigint(20) NOT NULL,
+ `LONG_LOW_VALUE` bigint(20),
+ `LONG_HIGH_VALUE` bigint(20),
+ `DOUBLE_HIGH_VALUE` double(53,4),
+ `DOUBLE_LOW_VALUE` double(53,4),
+ `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `NUM_NULLS` bigint(20) NOT NULL,
+ `NUM_DISTINCTS` bigint(20),
+ `AVG_COL_LEN` double(53,4),
+ `MAX_COL_LEN` bigint(20),
+ `NUM_TRUES` bigint(20),
+ `NUM_FALSES` bigint(20),
+ `LAST_ANALYZED` bigint(20) NOT NULL,
+  PRIMARY KEY (`CS_ID`),
+  CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table `PART_COL_STATS`
+--
+CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
+ `CS_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PART_ID` bigint(20) NOT NULL,
+ `LONG_LOW_VALUE` bigint(20),
+ `LONG_HIGH_VALUE` bigint(20),
+ `DOUBLE_HIGH_VALUE` double(53,4),
+ `DOUBLE_LOW_VALUE` double(53,4),
+ `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `NUM_NULLS` bigint(20) NOT NULL,
+ `NUM_DISTINCTS` bigint(20),
+ `AVG_COL_LEN` double(53,4),
+ `MAX_COL_LEN` bigint(20),
+ `NUM_TRUES` bigint(20),
+ `NUM_FALSES` bigint(20),
+ `LAST_ANALYZED` bigint(20) NOT NULL,
+  PRIMARY KEY (`CS_ID`),
+  CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table `TYPES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TYPES` (
+  `TYPES_ID` bigint(20) NOT NULL,
+  `TYPE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TYPE1` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TYPE2` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`TYPES_ID`),
+  UNIQUE KEY `UNIQUE_TYPE` (`TYPE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TYPE_FIELDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TYPE_FIELDS` (
+  `TYPE_NAME` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `FIELD_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `FIELD_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`TYPE_NAME`,`FIELD_NAME`),
+  KEY `TYPE_FIELDS_N49` (`TYPE_NAME`),
+  CONSTRAINT `TYPE_FIELDS_FK1` FOREIGN KEY (`TYPE_NAME`) REFERENCES `TYPES` (`TYPES_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+-- Table `MASTER_KEYS` for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
+CREATE TABLE IF NOT EXISTS `MASTER_KEYS` 
+(
+    `KEY_ID` INTEGER NOT NULL AUTO_INCREMENT,
+    `MASTER_KEY` VARCHAR(767) BINARY NULL,
+    PRIMARY KEY (`KEY_ID`)
+) ENGINE=INNODB DEFAULT CHARSET=latin1;
+
+-- Table `DELEGATION_TOKENS` for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
+CREATE TABLE IF NOT EXISTS `DELEGATION_TOKENS`
+(
+    `TOKEN_IDENT` VARCHAR(767) BINARY NOT NULL,
+    `TOKEN` VARCHAR(767) BINARY NULL,
+    PRIMARY KEY (`TOKEN_IDENT`)
+) ENGINE=INNODB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for VERSION
+--
+CREATE TABLE IF NOT EXISTS `VERSION` (
+  `VER_ID` BIGINT NOT NULL,
+  `SCHEMA_VERSION` VARCHAR(127) NOT NULL,
+  `VERSION_COMMENT` VARCHAR(255),
+  PRIMARY KEY (`VER_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table FUNCS
+--
+CREATE TABLE IF NOT EXISTS `FUNCS` (
+  `FUNC_ID` BIGINT(20) NOT NULL,
+  `CLASS_NAME` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+  `CREATE_TIME` INT(11) NOT NULL,
+  `DB_ID` BIGINT(20),
+  `FUNC_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin,
+  `FUNC_TYPE` INT(11) NOT NULL,
+  `OWNER_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin,
+  `OWNER_TYPE` VARCHAR(10) CHARACTER SET latin1 COLLATE latin1_bin,
+  PRIMARY KEY (`FUNC_ID`),
+  UNIQUE KEY `UNIQUEFUNCTION` (`FUNC_NAME`, `DB_ID`),
+  KEY `FUNCS_N49` (`DB_ID`),
+  CONSTRAINT `FUNCS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table FUNC_RU
+--
+CREATE TABLE IF NOT EXISTS `FUNC_RU` (
+  `FUNC_ID` BIGINT(20) NOT NULL,
+  `RESOURCE_TYPE` INT(11) NOT NULL,
+  `RESOURCE_URI` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+  `INTEGER_IDX` INT(11) NOT NULL,
+  PRIMARY KEY (`FUNC_ID`, `INTEGER_IDX`),
+  CONSTRAINT `FUNC_RU_FK1` FOREIGN KEY (`FUNC_ID`) REFERENCES `FUNCS` (`FUNC_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+
+-- -----------------------------------------------------------------------------------------------------------------------------------------------
+-- Transaction and Lock Tables
+-- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file.
+-- -----------------------------------------------------------------------------------------------------------------------------------------------
+
+CREATE TABLE TXNS (
+  TXN_ID bigint PRIMARY KEY,
+  TXN_STATE char(1) NOT NULL,
+  TXN_STARTED bigint NOT NULL,
+  TXN_LAST_HEARTBEAT bigint NOT NULL,
+  TXN_USER varchar(128) NOT NULL,
+  TXN_HOST varchar(128) NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE TXN_COMPONENTS (
+  TC_TXNID bigint,
+  TC_DATABASE varchar(128) NOT NULL,
+  TC_TABLE varchar(128),
+  TC_PARTITION varchar(767),
+  FOREIGN KEY (TC_TXNID) REFERENCES TXNS (TXN_ID)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS (
+  CTC_TXNID bigint,
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(128),
+  CTC_PARTITION varchar(767)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE NEXT_TXN_ID (
+  NTXN_NEXT bigint NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE HIVE_LOCKS (
+  HL_LOCK_EXT_ID bigint NOT NULL,
+  HL_LOCK_INT_ID bigint NOT NULL,
+  HL_TXNID bigint,
+  HL_DB varchar(128) NOT NULL,
+  HL_TABLE varchar(128),
+  HL_PARTITION varchar(767),
+  HL_LOCK_STATE char(1) not null,
+  HL_LOCK_TYPE char(1) not null,
+  HL_LAST_HEARTBEAT bigint NOT NULL,
+  HL_ACQUIRED_AT bigint,
+  HL_USER varchar(128) NOT NULL,
+  HL_HOST varchar(128) NOT NULL,
+  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID),
+  KEY HIVE_LOCK_TXNID_INDEX (HL_TXNID)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE INDEX HL_TXNID_IDX ON HIVE_LOCKS (HL_TXNID);
+
+CREATE TABLE NEXT_LOCK_ID (
+  NL_NEXT bigint NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE COMPACTION_QUEUE (
+  CQ_ID bigint PRIMARY KEY,
+  CQ_DATABASE varchar(128) NOT NULL,
+  CQ_TABLE varchar(128) NOT NULL,
+  CQ_PARTITION varchar(767),
+  CQ_STATE char(1) NOT NULL,
+  CQ_TYPE char(1) NOT NULL,
+  CQ_WORKER_ID varchar(128),
+  CQ_START bigint,
+  CQ_RUN_AS varchar(128)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+  NCQ_NEXT bigint NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+
+-- -----------------------------------------------------------------
+-- Record schema version. Should be the last step in the init script
+-- -----------------------------------------------------------------
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.13.0', 'Hive release version 0.13.0');
+
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2012-08-23  0:56:31


[09/52] bigtop git commit: ODPI-5. Integrate Ambari packaging into Bigtop

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ODPi/1.0/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/stack_advisor.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/stack_advisor.py
new file mode 100755
index 0000000..568e46e
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/stack_advisor.py
@@ -0,0 +1,1947 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import re
+import os
+import sys
+import socket
+
+from math import ceil, floor
+
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.mounted_dirs_helper import get_mounts_with_multiple_data_dirs
+
+from stack_advisor import DefaultStackAdvisor
+
+
+class ODPi10StackAdvisor(DefaultStackAdvisor):
+
+  def __init__(self):
+    super(ODPi10StackAdvisor, self).__init__()
+    Logger.initialize_logger()
+
+  def getComponentLayoutValidations(self, services, hosts):
+    """Returns array of Validation objects about issues with hostnames components assigned to"""
+    items = super(ODPi10StackAdvisor, self).getComponentLayoutValidations(services, hosts)
+
+    # Validating NAMENODE and SECONDARY_NAMENODE are on different hosts if possible
+    # Use a set for fast lookup
+    hostsSet =  set(super(ODPi10StackAdvisor, self).getActiveHosts([host["Hosts"] for host in hosts["items"]]))  #[host["Hosts"]["host_name"] for host in hosts["items"]]
+    hostsCount = len(hostsSet)
+
+    componentsListList = [service["components"] for service in services["services"]]
+    componentsList = [item for sublist in componentsListList for item in sublist]
+    nameNodeHosts = [component["StackServiceComponents"]["hostnames"] for component in componentsList if component["StackServiceComponents"]["component_name"] == "NAMENODE"]
+    secondaryNameNodeHosts = [component["StackServiceComponents"]["hostnames"] for component in componentsList if component["StackServiceComponents"]["component_name"] == "SECONDARY_NAMENODE"]
+
+    # Validating cardinality
+    for component in componentsList:
+      if component["StackServiceComponents"]["cardinality"] is not None:
+         componentName = component["StackServiceComponents"]["component_name"]
+         componentDisplayName = component["StackServiceComponents"]["display_name"]
+         componentHosts = []
+         if component["StackServiceComponents"]["hostnames"] is not None:
+           componentHosts = [componentHost for componentHost in component["StackServiceComponents"]["hostnames"] if componentHost in hostsSet]
+         componentHostsCount = len(componentHosts)
+         cardinality = str(component["StackServiceComponents"]["cardinality"])
+         # cardinality types: null, 1+, 1-2, 1, ALL
+         message = None
+         if "+" in cardinality:
+           hostsMin = int(cardinality[:-1])
+           if componentHostsCount < hostsMin:
+             message = "At least {0} {1} components should be installed in cluster.".format(hostsMin, componentDisplayName)
+         elif "-" in cardinality:
+           nums = cardinality.split("-")
+           hostsMin = int(nums[0])
+           hostsMax = int(nums[1])
+           if componentHostsCount > hostsMax or componentHostsCount < hostsMin:
+             message = "Between {0} and {1} {2} components should be installed in cluster.".format(hostsMin, hostsMax, componentDisplayName)
+         elif "ALL" == cardinality:
+           if componentHostsCount != hostsCount:
+             message = "{0} component should be installed on all hosts in cluster.".format(componentDisplayName)
+         else:
+           if componentHostsCount != int(cardinality):
+             message = "Exactly {0} {1} components should be installed in cluster.".format(int(cardinality), componentDisplayName)
+
+         if message is not None:
+           items.append({"type": 'host-component', "level": 'ERROR', "message": message, "component-name": componentName})
+
+    # Validating host-usage
+    usedHostsListList = [component["StackServiceComponents"]["hostnames"] for component in componentsList if not self.isComponentNotValuable(component)]
+    usedHostsList = [item for sublist in usedHostsListList for item in sublist]
+    nonUsedHostsList = [item for item in hostsSet if item not in usedHostsList]
+    for host in nonUsedHostsList:
+      items.append( { "type": 'host-component', "level": 'ERROR', "message": 'Host is not used', "host": str(host) } )
+
+    return items
+
+  def getServiceConfigurationRecommenderDict(self):
+    return {
+      "YARN": self.recommendYARNConfigurations,
+      "MAPREDUCE2": self.recommendMapReduce2Configurations,
+      "HDFS": self.recommendHDFSConfigurations,
+      "HBASE": self.recommendHbaseConfigurations,
+      "STORM": self.recommendStormConfigurations,
+      "AMBARI_METRICS": self.recommendAmsConfigurations,
+      "RANGER": self.recommendRangerConfigurations
+    }
+
+  def recommendYARNConfigurations(self, configurations, clusterData, services, hosts):
+    putYarnProperty = self.putProperty(configurations, "yarn-site", services)
+    putYarnPropertyAttribute = self.putPropertyAttribute(configurations, "yarn-site")
+    putYarnEnvProperty = self.putProperty(configurations, "yarn-env", services)
+    nodemanagerMinRam = 1048576 # 1TB in mb
+    if "referenceNodeManagerHost" in clusterData:
+      nodemanagerMinRam = min(clusterData["referenceNodeManagerHost"]["total_mem"]/1024, nodemanagerMinRam)
+    putYarnProperty('yarn.nodemanager.resource.memory-mb', int(round(min(clusterData['containers'] * clusterData['ramPerContainer'], nodemanagerMinRam))))
+    putYarnProperty('yarn.scheduler.minimum-allocation-mb', int(clusterData['ramPerContainer']))
+    putYarnProperty('yarn.scheduler.maximum-allocation-mb', int(configurations["yarn-site"]["properties"]["yarn.nodemanager.resource.memory-mb"]))
+    putYarnEnvProperty('min_user_id', self.get_system_min_uid())
+
+    sc_queue_name = self.recommendYarnQueue(services, "yarn-env", "service_check.queue.name")
+    if sc_queue_name is not None:
+      putYarnEnvProperty("service_check.queue.name", sc_queue_name)
+
+    containerExecutorGroup = 'hadoop'
+    if 'cluster-env' in services['configurations'] and 'user_group' in services['configurations']['cluster-env']['properties']:
+      containerExecutorGroup = services['configurations']['cluster-env']['properties']['user_group']
+    putYarnProperty("yarn.nodemanager.linux-container-executor.group", containerExecutorGroup)
+
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if "TEZ" in servicesList:
+        ambari_user = self.getAmbariUser(services)
+        ambariHostName = socket.getfqdn()
+        putYarnProperty("yarn.timeline-service.http-authentication.proxyuser.{0}.hosts".format(ambari_user), ambariHostName)
+        putYarnProperty("yarn.timeline-service.http-authentication.proxyuser.{0}.groups".format(ambari_user), "*")
+        old_ambari_user = self.getOldAmbariUser(services)
+        if old_ambari_user is not None:
+            putYarnPropertyAttribute("yarn.timeline-service.http-authentication.proxyuser.{0}.hosts".format(old_ambari_user), 'delete', 'true')
+            putYarnPropertyAttribute("yarn.timeline-service.http-authentication.proxyuser.{0}.groups".format(old_ambari_user), 'delete', 'true')
+
+
+  def recommendMapReduce2Configurations(self, configurations, clusterData, services, hosts):
+    putMapredProperty = self.putProperty(configurations, "mapred-site", services)
+    putMapredProperty('yarn.app.mapreduce.am.resource.mb', int(clusterData['amMemory']))
+    putMapredProperty('yarn.app.mapreduce.am.command-opts', "-Xmx" + str(int(round(0.8 * clusterData['amMemory']))) + "m")
+    putMapredProperty('mapreduce.map.memory.mb', clusterData['mapMemory'])
+    putMapredProperty('mapreduce.reduce.memory.mb', int(clusterData['reduceMemory']))
+    putMapredProperty('mapreduce.map.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['mapMemory']))) + "m")
+    putMapredProperty('mapreduce.reduce.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['reduceMemory']))) + "m")
+    putMapredProperty('mapreduce.task.io.sort.mb', min(int(round(0.4 * clusterData['mapMemory'])), 1024))
+    mr_queue = self.recommendYarnQueue(services, "mapred-site", "mapreduce.job.queuename")
+    if mr_queue is not None:
+      putMapredProperty("mapreduce.job.queuename", mr_queue)
+
+  def getAmbariUser(self, services):
+    ambari_user = services['ambari-server-properties']['ambari-server.user']
+    if "cluster-env" in services["configurations"] \
+          and "ambari_principal_name" in services["configurations"]["cluster-env"]["properties"] \
+                and "security_enabled" in services["configurations"]["cluster-env"]["properties"] \
+                    and services["configurations"]["cluster-env"]["properties"]["security_enabled"].lower() == "true":
+      ambari_user = services["configurations"]["cluster-env"]["properties"]["ambari_principal_name"]
+      ambari_user = ambari_user.split('@')[0]
+    return ambari_user
+
+  def getOldAmbariUser(self, services):
+    ambari_user = None
+    if "cluster-env" in services["configurations"]:
+      if "security_enabled" in services["configurations"]["cluster-env"]["properties"] \
+              and services["configurations"]["cluster-env"]["properties"]["security_enabled"].lower() == "true":
+         ambari_user = services['ambari-server-properties']['ambari-server.user']
+      elif "ambari_principal_name" in services["configurations"]["cluster-env"]["properties"]:
+         ambari_user = services["configurations"]["cluster-env"]["properties"]["ambari_principal_name"]
+         ambari_user = ambari_user.split('@')[0]
+    return ambari_user
+
+  def recommendAmbariProxyUsersForHDFS(self, services, servicesList, putCoreSiteProperty, putCoreSitePropertyAttribute):
+      if "HDFS" in servicesList:
+          ambari_user = self.getAmbariUser(services)
+          ambariHostName = socket.getfqdn()
+          putCoreSiteProperty("hadoop.proxyuser.{0}.hosts".format(ambari_user), ambariHostName)
+          putCoreSiteProperty("hadoop.proxyuser.{0}.groups".format(ambari_user), "*")
+          old_ambari_user = self.getOldAmbariUser(services)
+          if old_ambari_user is not None:
+            putCoreSitePropertyAttribute("hadoop.proxyuser.{0}.hosts".format(old_ambari_user), 'delete', 'true')
+            putCoreSitePropertyAttribute("hadoop.proxyuser.{0}.groups".format(old_ambari_user), 'delete', 'true')
+
+  def recommendHadoopProxyUsers (self, configurations, services, hosts):
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    users = {}
+
+    if 'forced-configurations' not in services:
+      services["forced-configurations"] = []
+
+    if "HDFS" in servicesList:
+      hdfs_user = None
+      if "hadoop-env" in services["configurations"] and "hdfs_user" in services["configurations"]["hadoop-env"]["properties"]:
+        hdfs_user = services["configurations"]["hadoop-env"]["properties"]["hdfs_user"]
+        if not hdfs_user in users and hdfs_user is not None:
+          users[hdfs_user] = {"propertyHosts" : "*","propertyGroups" : "*", "config" : "hadoop-env", "propertyName" : "hdfs_user"}
+
+    if "OOZIE" in servicesList:
+      oozie_user = None
+      if "oozie-env" in services["configurations"] and "oozie_user" in services["configurations"]["oozie-env"]["properties"]:
+        oozie_user = services["configurations"]["oozie-env"]["properties"]["oozie_user"]
+        oozieServerrHosts = self.getHostsWithComponent("OOZIE", "OOZIE_SERVER", services, hosts)
+        if oozieServerrHosts is not None:
+          oozieServerHostsNameList = []
+          for oozieServerHost in oozieServerrHosts:
+            oozieServerHostsNameList.append(oozieServerHost["Hosts"]["host_name"])
+          oozieServerHostsNames = ",".join(oozieServerHostsNameList)
+          if not oozie_user in users and oozie_user is not None:
+            users[oozie_user] = {"propertyHosts" : oozieServerHostsNames,"propertyGroups" : "*", "config" : "oozie-env", "propertyName" : "oozie_user"}
+
+    hive_user = None
+    if "HIVE" in servicesList:
+      webhcat_user = None
+      if "hive-env" in services["configurations"] and "hive_user" in services["configurations"]["hive-env"]["properties"] \
+              and "webhcat_user" in services["configurations"]["hive-env"]["properties"]:
+        hive_user = services["configurations"]["hive-env"]["properties"]["hive_user"]
+        webhcat_user = services["configurations"]["hive-env"]["properties"]["webhcat_user"]
+        hiveServerHosts = self.getHostsWithComponent("HIVE", "HIVE_SERVER", services, hosts)
+        hiveServerInteractiveHosts = self.getHostsWithComponent("HIVE", "HIVE_SERVER_INTERACTIVE", services, hosts)
+        webHcatServerHosts = self.getHostsWithComponent("HIVE", "WEBHCAT_SERVER", services, hosts)
+
+        if hiveServerHosts is not None:
+          hiveServerHostsNameList = []
+          for hiveServerHost in hiveServerHosts:
+            hiveServerHostsNameList.append(hiveServerHost["Hosts"]["host_name"])
+          # Append Hive Server Interactive host as well, as it is Hive2/HiveServer2 component.
+          if hiveServerInteractiveHosts:
+            for hiveServerInteractiveHost in hiveServerInteractiveHosts:
+              hiveServerInteractiveHostName = hiveServerInteractiveHost["Hosts"]["host_name"]
+              if hiveServerInteractiveHostName not in hiveServerHostsNameList:
+                hiveServerHostsNameList.append(hiveServerInteractiveHostName)
+                Logger.info("Appended (if not exiting), Hive Server Interactive Host : '{0}', to Hive Server Host List : '{1}'".format(hiveServerInteractiveHostName, hiveServerHostsNameList))
+
+          hiveServerHostsNames = ",".join(hiveServerHostsNameList)  # includes Hive Server interactive host also.
+          Logger.info("Hive Server and Hive Server Interactive (if enabled) Host List : {0}".format(hiveServerHostsNameList))
+          if not hive_user in users and hive_user is not None:
+            users[hive_user] = {"propertyHosts" : hiveServerHostsNames,"propertyGroups" : "*", "config" : "hive-env", "propertyName" : "hive_user"}
+
+        if webHcatServerHosts is not None:
+          webHcatServerHostsNameList = []
+          for webHcatServerHost in webHcatServerHosts:
+            webHcatServerHostsNameList.append(webHcatServerHost["Hosts"]["host_name"])
+          webHcatServerHostsNames = ",".join(webHcatServerHostsNameList)
+          if not webhcat_user in users and webhcat_user is not None:
+            users[webhcat_user] = {"propertyHosts" : webHcatServerHostsNames,"propertyGroups" : "*", "config" : "hive-env", "propertyName" : "webhcat_user"}
+
+    if "YARN" in servicesList:
+      yarn_user = None
+      if "yarn-env" in services["configurations"] and "yarn_user" in services["configurations"]["yarn-env"]["properties"]:
+        yarn_user = services["configurations"]["yarn-env"]["properties"]["yarn_user"]
+        rmHosts = self.getHostsWithComponent("YARN", "RESOURCEMANAGER", services, hosts)
+
+        if len(rmHosts) > 1:
+          rmHostsNameList = []
+          for rmHost in rmHosts:
+            rmHostsNameList.append(rmHost["Hosts"]["host_name"])
+          rmHostsNames = ",".join(rmHostsNameList)
+          if not yarn_user in users and yarn_user is not None:
+            users[yarn_user] = {"propertyHosts" : rmHostsNames, "config" : "yarn-env", "propertyName" : "yarn_user"}
+
+
+    if "FALCON" in servicesList:
+      falconUser = None
+      if "falcon-env" in services["configurations"] and "falcon_user" in services["configurations"]["falcon-env"]["properties"]:
+        falconUser = services["configurations"]["falcon-env"]["properties"]["falcon_user"]
+        if not falconUser in users and falconUser is not None:
+          users[falconUser] = {"propertyHosts" : "*","propertyGroups" : "*", "config" : "falcon-env", "propertyName" : "falcon_user"}
+
+    if "SPARK" in servicesList:
+      livyUser = None
+      if "livy-env" in services["configurations"] and "livy_user" in services["configurations"]["livy-env"]["properties"]:
+        livyUser = services["configurations"]["livy-env"]["properties"]["livy_user"]
+        if not livyUser in users and livyUser is not None:
+          users[livyUser] = {"propertyHosts" : "*","propertyGroups" : "*", "config" : "livy-env", "propertyName" : "livy_user"}
+
+    putCoreSiteProperty = self.putProperty(configurations, "core-site", services)
+    putCoreSitePropertyAttribute = self.putPropertyAttribute(configurations, "core-site")
+
+    for user_name, user_properties in users.iteritems():
+      if hive_user and hive_user == user_name:
+        if "propertyHosts" in user_properties:
+          services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.hosts".format(hive_user)})
+      # Add properties "hadoop.proxyuser.*.hosts", "hadoop.proxyuser.*.groups" to core-site for all users
+      putCoreSiteProperty("hadoop.proxyuser.{0}.hosts".format(user_name) , user_properties["propertyHosts"])
+      Logger.info("Updated hadoop.proxyuser.{0}.hosts as : {1}".format(hive_user, user_properties["propertyHosts"]))
+      if "propertyGroups" in user_properties:
+        putCoreSiteProperty("hadoop.proxyuser.{0}.groups".format(user_name) , user_properties["propertyGroups"])
+
+      # Remove old properties if user was renamed
+      userOldValue = getOldValue(self, services, user_properties["config"], user_properties["propertyName"])
+      if userOldValue is not None and userOldValue != user_name:
+        putCoreSitePropertyAttribute("hadoop.proxyuser.{0}.hosts".format(userOldValue), 'delete', 'true')
+        services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.hosts".format(userOldValue)})
+        services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.hosts".format(user_name)})
+
+        if "propertyGroups" in user_properties:
+          putCoreSitePropertyAttribute("hadoop.proxyuser.{0}.groups".format(userOldValue), 'delete', 'true')
+          services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.groups".format(userOldValue)})
+          services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.groups".format(user_name)})
+
+    self.recommendAmbariProxyUsersForHDFS(services, servicesList, putCoreSiteProperty, putCoreSitePropertyAttribute)
+
+  def recommendHDFSConfigurations(self, configurations, clusterData, services, hosts):
+    putHDFSProperty = self.putProperty(configurations, "hadoop-env", services)
+    putHDFSSiteProperty = self.putProperty(configurations, "hdfs-site", services)
+    putHDFSSitePropertyAttributes = self.putPropertyAttribute(configurations, "hdfs-site")
+    putHDFSProperty('namenode_heapsize', max(int(clusterData['totalAvailableRam'] / 2), 1024))
+    putHDFSProperty = self.putProperty(configurations, "hadoop-env", services)
+    putHDFSProperty('namenode_opt_newsize', max(int(clusterData['totalAvailableRam'] / 8), 128))
+    putHDFSProperty = self.putProperty(configurations, "hadoop-env", services)
+    putHDFSProperty('namenode_opt_maxnewsize', max(int(clusterData['totalAvailableRam'] / 8), 256))
+
+    # Check if NN HA is enabled and recommend removing dfs.namenode.rpc-address
+    hdfsSiteProperties = getServicesSiteProperties(services, "hdfs-site")
+    nameServices = None
+    if hdfsSiteProperties and 'dfs.internal.nameservices' in hdfsSiteProperties:
+      nameServices = hdfsSiteProperties['dfs.internal.nameservices']
+    if nameServices is None and hdfsSiteProperties and 'dfs.nameservices' in hdfsSiteProperties:
+      nameServices = hdfsSiteProperties['dfs.nameservices']
+    if nameServices and "dfs.ha.namenodes.%s" % nameServices in hdfsSiteProperties:
+      namenodes = hdfsSiteProperties["dfs.ha.namenodes.%s" % nameServices]
+      if len(namenodes.split(',')) > 1:
+        putHDFSSitePropertyAttributes("dfs.namenode.rpc-address", "delete", "true")
+
+    #Initialize default 'dfs.datanode.data.dir' if needed
+    if (not hdfsSiteProperties) or ('dfs.datanode.data.dir' not in hdfsSiteProperties):
+      dataDirs = '/hadoop/hdfs/data'
+      putHDFSSiteProperty('dfs.datanode.data.dir', dataDirs)
+    else:
+      dataDirs = hdfsSiteProperties['dfs.datanode.data.dir'].split(",")
+
+    # dfs.datanode.du.reserved should be set to 10-15% of volume size
+    # For each host selects maximum size of the volume. Then gets minimum for all hosts.
+    # This ensures that each host will have at least one data dir with available space.
+    reservedSizeRecommendation = 0l #kBytes
+    for host in hosts["items"]:
+      mountPoints = []
+      mountPointDiskAvailableSpace = [] #kBytes
+      for diskInfo in host["Hosts"]["disk_info"]:
+        mountPoints.append(diskInfo["mountpoint"])
+        mountPointDiskAvailableSpace.append(long(diskInfo["size"]))
+
+      maxFreeVolumeSizeForHost = 0l #kBytes
+      for dataDir in dataDirs:
+        mp = getMountPointForDir(dataDir, mountPoints)
+        for i in range(len(mountPoints)):
+          if mp == mountPoints[i]:
+            if mountPointDiskAvailableSpace[i] > maxFreeVolumeSizeForHost:
+              maxFreeVolumeSizeForHost = mountPointDiskAvailableSpace[i]
+
+      if not reservedSizeRecommendation or maxFreeVolumeSizeForHost and maxFreeVolumeSizeForHost < reservedSizeRecommendation:
+        reservedSizeRecommendation = maxFreeVolumeSizeForHost
+
+    if reservedSizeRecommendation:
+      reservedSizeRecommendation = max(reservedSizeRecommendation * 1024 / 8, 1073741824) # At least 1Gb is reserved
+      putHDFSSiteProperty('dfs.datanode.du.reserved', reservedSizeRecommendation) #Bytes
+
+    # recommendations for "hadoop.proxyuser.*.hosts", "hadoop.proxyuser.*.groups" properties in core-site
+    self.recommendHadoopProxyUsers(configurations, services, hosts)
+
+  def recommendHbaseConfigurations(self, configurations, clusterData, services, hosts):
+    # recommendations for HBase env config
+
+    # If cluster size is < 100, hbase master heap = 2G
+    # else If cluster size is < 500, hbase master heap = 4G
+    # else hbase master heap = 8G
+    # for small test clusters use 1 gb
+    hostsCount = 0
+    if hosts and "items" in hosts:
+      hostsCount = len(hosts["items"])
+
+    hbaseMasterRam = {
+      hostsCount < 20: 1,
+      20 <= hostsCount < 100: 2,
+      100 <= hostsCount < 500: 4,
+      500 <= hostsCount: 8
+    }[True]
+
+    putHbaseProperty = self.putProperty(configurations, "hbase-env", services)
+    putHbaseProperty('hbase_regionserver_heapsize', int(clusterData['hbaseRam']) * 1024)
+    putHbaseProperty('hbase_master_heapsize', hbaseMasterRam * 1024)
+
+    # recommendations for HBase site config
+    putHbaseSiteProperty = self.putProperty(configurations, "hbase-site", services)
+
+    if 'hbase-site' in services['configurations'] and 'hbase.superuser' in services['configurations']['hbase-site']['properties'] \
+      and 'hbase-env' in services['configurations'] and 'hbase_user' in services['configurations']['hbase-env']['properties'] \
+      and services['configurations']['hbase-env']['properties']['hbase_user'] != services['configurations']['hbase-site']['properties']['hbase.superuser']:
+      putHbaseSiteProperty("hbase.superuser", services['configurations']['hbase-env']['properties']['hbase_user'])
+
+
+  def recommendRangerConfigurations(self, configurations, clusterData, services, hosts):
+
+    putRangerAdminProperty = self.putProperty(configurations, "admin-properties", services)
+
+    # Build policymgr_external_url
+    protocol = 'http'
+    ranger_admin_host = 'localhost'
+    port = '6080'
+
+    # Check if http is disabled. For HDP-2.3 this can be checked in ranger-admin-site/ranger.service.http.enabled
+    # For Ranger-0.4.0 this can be checked in ranger-site/http.enabled
+    if ('ranger-site' in services['configurations'] and 'http.enabled' in services['configurations']['ranger-site']['properties'] \
+      and services['configurations']['ranger-site']['properties']['http.enabled'].lower() == 'false') or \
+      ('ranger-admin-site' in services['configurations'] and 'ranger.service.http.enabled' in services['configurations']['ranger-admin-site']['properties'] \
+      and services['configurations']['ranger-admin-site']['properties']['ranger.service.http.enabled'].lower() == 'false'):
+      # HTTPS protocol is used
+      protocol = 'https'
+      # Starting Ranger-0.5.0.2.3 port stored in ranger-admin-site ranger.service.https.port
+      if 'ranger-admin-site' in services['configurations'] and \
+          'ranger.service.https.port' in services['configurations']['ranger-admin-site']['properties']:
+        port = services['configurations']['ranger-admin-site']['properties']['ranger.service.https.port']
+      # In Ranger-0.4.0 port stored in ranger-site https.service.port
+      elif 'ranger-site' in services['configurations'] and \
+          'https.service.port' in services['configurations']['ranger-site']['properties']:
+        port = services['configurations']['ranger-site']['properties']['https.service.port']
+    else:
+      # HTTP protocol is used
+      # Starting Ranger-0.5.0.2.3 port stored in ranger-admin-site ranger.service.http.port
+      if 'ranger-admin-site' in services['configurations'] and \
+          'ranger.service.http.port' in services['configurations']['ranger-admin-site']['properties']:
+        port = services['configurations']['ranger-admin-site']['properties']['ranger.service.http.port']
+      # In Ranger-0.4.0 port stored in ranger-site http.service.port
+      elif 'ranger-site' in services['configurations'] and \
+          'http.service.port' in services['configurations']['ranger-site']['properties']:
+        port = services['configurations']['ranger-site']['properties']['http.service.port']
+
+    ranger_admin_hosts = self.getComponentHostNames(services, "RANGER", "RANGER_ADMIN")
+    if ranger_admin_hosts:
+      if len(ranger_admin_hosts) > 1 \
+        and services['configurations'] \
+        and 'admin-properties' in services['configurations'] and 'policymgr_external_url' in services['configurations']['admin-properties']['properties'] \
+        and services['configurations']['admin-properties']['properties']['policymgr_external_url'] \
+        and services['configurations']['admin-properties']['properties']['policymgr_external_url'].strip():
+
+        # in case of HA deployment keep the policymgr_external_url specified in the config
+        policymgr_external_url = services['configurations']['admin-properties']['properties']['policymgr_external_url']
+      else:
+
+        ranger_admin_host = ranger_admin_hosts[0]
+        policymgr_external_url = "%s://%s:%s" % (protocol, ranger_admin_host, port)
+
+      putRangerAdminProperty('policymgr_external_url', policymgr_external_url)
+
+    rangerServiceVersion = [service['StackServices']['service_version'] for service in services["services"] if service['StackServices']['service_name'] == 'RANGER'][0]
+    if rangerServiceVersion == '0.4.0':
+      # Recommend ldap settings based on ambari.properties configuration
+      # If 'ambari.ldap.isConfigured' == true
+      # For Ranger version 0.4.0
+      if 'ambari-server-properties' in services and \
+      'ambari.ldap.isConfigured' in services['ambari-server-properties'] and \
+        services['ambari-server-properties']['ambari.ldap.isConfigured'].lower() == "true":
+        putUserSyncProperty = self.putProperty(configurations, "usersync-properties", services)
+        serverProperties = services['ambari-server-properties']
+        if 'authentication.ldap.managerDn' in serverProperties:
+          putUserSyncProperty('SYNC_LDAP_BIND_DN', serverProperties['authentication.ldap.managerDn'])
+        if 'authentication.ldap.primaryUrl' in serverProperties:
+          ldap_protocol =  'ldap://'
+          if 'authentication.ldap.useSSL' in serverProperties and serverProperties['authentication.ldap.useSSL'] == 'true':
+            ldap_protocol =  'ldaps://'
+          ldapUrl = ldap_protocol + serverProperties['authentication.ldap.primaryUrl'] if serverProperties['authentication.ldap.primaryUrl'] else serverProperties['authentication.ldap.primaryUrl']
+          putUserSyncProperty('SYNC_LDAP_URL', ldapUrl)
+        if 'authentication.ldap.userObjectClass' in serverProperties:
+          putUserSyncProperty('SYNC_LDAP_USER_OBJECT_CLASS', serverProperties['authentication.ldap.userObjectClass'])
+        if 'authentication.ldap.usernameAttribute' in serverProperties:
+          putUserSyncProperty('SYNC_LDAP_USER_NAME_ATTRIBUTE', serverProperties['authentication.ldap.usernameAttribute'])
+
+
+      # Set Ranger Admin Authentication method
+      if 'admin-properties' in services['configurations'] and 'usersync-properties' in services['configurations'] and \
+          'SYNC_SOURCE' in services['configurations']['usersync-properties']['properties']:
+        rangerUserSyncSource = services['configurations']['usersync-properties']['properties']['SYNC_SOURCE']
+        authenticationMethod = rangerUserSyncSource.upper()
+        if authenticationMethod != 'FILE':
+          putRangerAdminProperty('authentication_method', authenticationMethod)
+
+      # Recommend xasecure.audit.destination.hdfs.dir
+      # For Ranger version 0.4.0
+      servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+      putRangerEnvProperty = self.putProperty(configurations, "ranger-env", services)
+      include_hdfs = "HDFS" in servicesList
+      if include_hdfs:
+        if 'core-site' in services['configurations'] and ('fs.defaultFS' in services['configurations']['core-site']['properties']):
+          default_fs = services['configurations']['core-site']['properties']['fs.defaultFS']
+          default_fs += '/ranger/audit/%app-type%/%time:yyyyMMdd%'
+          putRangerEnvProperty('xasecure.audit.destination.hdfs.dir', default_fs)
+
+      # Recommend Ranger Audit properties for ranger supported services
+      # For Ranger version 0.4.0
+      ranger_services = [
+        {'service_name': 'HDFS', 'audit_file': 'ranger-hdfs-plugin-properties'},
+        {'service_name': 'HBASE', 'audit_file': 'ranger-hbase-plugin-properties'},
+        {'service_name': 'HIVE', 'audit_file': 'ranger-hive-plugin-properties'},
+        {'service_name': 'KNOX', 'audit_file': 'ranger-knox-plugin-properties'},
+        {'service_name': 'STORM', 'audit_file': 'ranger-storm-plugin-properties'}
+      ]
+
+      for item in range(len(ranger_services)):
+        if ranger_services[item]['service_name'] in servicesList:
+          component_audit_file =  ranger_services[item]['audit_file']
+          if component_audit_file in services["configurations"]:
+            ranger_audit_dict = [
+              {'filename': 'ranger-env', 'configname': 'xasecure.audit.destination.db', 'target_configname': 'XAAUDIT.DB.IS_ENABLED'},
+              {'filename': 'ranger-env', 'configname': 'xasecure.audit.destination.hdfs', 'target_configname': 'XAAUDIT.HDFS.IS_ENABLED'},
+              {'filename': 'ranger-env', 'configname': 'xasecure.audit.destination.hdfs.dir', 'target_configname': 'XAAUDIT.HDFS.DESTINATION_DIRECTORY'}
+            ]
+            putRangerAuditProperty = self.putProperty(configurations, component_audit_file, services)
+
+            for item in ranger_audit_dict:
+              if item['filename'] in services["configurations"] and item['configname'] in  services["configurations"][item['filename']]["properties"]:
+                if item['filename'] in configurations and item['configname'] in  configurations[item['filename']]["properties"]:
+                  rangerAuditProperty = configurations[item['filename']]["properties"][item['configname']]
+                else:
+                  rangerAuditProperty = services["configurations"][item['filename']]["properties"][item['configname']]
+                putRangerAuditProperty(item['target_configname'], rangerAuditProperty)
+
+
+  def getAmsMemoryRecommendation(self, services, hosts):
+    # MB per sink in hbase heapsize
+    HEAP_PER_MASTER_COMPONENT = 50
+    HEAP_PER_SLAVE_COMPONENT = 10
+
+    schMemoryMap = {
+      "HDFS": {
+        "NAMENODE": HEAP_PER_MASTER_COMPONENT,
+        "DATANODE": HEAP_PER_SLAVE_COMPONENT
+      },
+      "YARN": {
+        "RESOURCEMANAGER": HEAP_PER_MASTER_COMPONENT,
+      },
+      "HBASE": {
+        "HBASE_MASTER": HEAP_PER_MASTER_COMPONENT,
+        "HBASE_REGIONSERVER": HEAP_PER_SLAVE_COMPONENT
+      },
+      "ACCUMULO": {
+        "ACCUMULO_MASTER": HEAP_PER_MASTER_COMPONENT,
+        "ACCUMULO_TSERVER": HEAP_PER_SLAVE_COMPONENT
+      },
+      "KAFKA": {
+        "KAFKA_BROKER": HEAP_PER_MASTER_COMPONENT
+      },
+      "FLUME": {
+        "FLUME_HANDLER": HEAP_PER_SLAVE_COMPONENT
+      },
+      "STORM": {
+        "NIMBUS": HEAP_PER_MASTER_COMPONENT,
+      },
+      "AMBARI_METRICS": {
+        "METRICS_COLLECTOR": HEAP_PER_MASTER_COMPONENT,
+        "METRICS_MONITOR": HEAP_PER_SLAVE_COMPONENT
+      }
+    }
+    total_sinks_count = 0
+    # minimum heap size
+    hbase_heapsize = 500
+    for serviceName, componentsDict in schMemoryMap.items():
+      for componentName, multiplier in componentsDict.items():
+        schCount = len(
+          self.getHostsWithComponent(serviceName, componentName, services,
+                                     hosts))
+        hbase_heapsize += int((schCount * multiplier) ** 0.9)
+        total_sinks_count += schCount
+    collector_heapsize = int(hbase_heapsize/4 if hbase_heapsize > 2048 else 512)
+
+    return round_to_n(collector_heapsize), round_to_n(hbase_heapsize), total_sinks_count
+
+  def recommendStormConfigurations(self, configurations, clusterData, services, hosts):
+    putStormSiteProperty = self.putProperty(configurations, "storm-site", services)
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    # Storm AMS integration
+    if 'AMBARI_METRICS' in servicesList:
+      putStormSiteProperty('metrics.reporter.register', 'org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter')
+
+  def recommendAmsConfigurations(self, configurations, clusterData, services, hosts):
+    putAmsEnvProperty = self.putProperty(configurations, "ams-env", services)
+    putAmsHbaseSiteProperty = self.putProperty(configurations, "ams-hbase-site", services)
+    putAmsSiteProperty = self.putProperty(configurations, "ams-site", services)
+    putHbaseEnvProperty = self.putProperty(configurations, "ams-hbase-env", services)
+    putGrafanaProperty = self.putProperty(configurations, "ams-grafana-env", services)
+    putGrafanaPropertyAttribute = self.putPropertyAttribute(configurations, "ams-grafana-env")
+
+    amsCollectorHosts = self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")
+
+    if 'cluster-env' in services['configurations'] and \
+        'metrics_collector_vip_host' in services['configurations']['cluster-env']['properties']:
+      metric_collector_host = services['configurations']['cluster-env']['properties']['metrics_collector_vip_host']
+    else:
+      metric_collector_host = 'localhost' if len(amsCollectorHosts) == 0 else amsCollectorHosts[0]
+
+    putAmsSiteProperty("timeline.metrics.service.webapp.address", str(metric_collector_host) + ":6188")
+
+    log_dir = "/var/log/ambari-metrics-collector"
+    if "ams-env" in services["configurations"]:
+      if "metrics_collector_log_dir" in services["configurations"]["ams-env"]["properties"]:
+        log_dir = services["configurations"]["ams-env"]["properties"]["metrics_collector_log_dir"]
+      putHbaseEnvProperty("hbase_log_dir", log_dir)
+
+    defaultFs = 'file:///'
+    if "core-site" in services["configurations"] and \
+      "fs.defaultFS" in services["configurations"]["core-site"]["properties"]:
+      defaultFs = services["configurations"]["core-site"]["properties"]["fs.defaultFS"]
+
+    operatingMode = "embedded"
+    if "ams-site" in services["configurations"]:
+      if "timeline.metrics.service.operation.mode" in services["configurations"]["ams-site"]["properties"]:
+        operatingMode = services["configurations"]["ams-site"]["properties"]["timeline.metrics.service.operation.mode"]
+
+    if operatingMode == "distributed":
+      putAmsSiteProperty("timeline.metrics.service.watcher.disabled", 'true')
+      putAmsHbaseSiteProperty("hbase.cluster.distributed", 'true')
+    else:
+      putAmsSiteProperty("timeline.metrics.service.watcher.disabled", 'false')
+      putAmsHbaseSiteProperty("hbase.cluster.distributed", 'false')
+
+    rootDir = "file:///var/lib/ambari-metrics-collector/hbase"
+    tmpDir = "/var/lib/ambari-metrics-collector/hbase-tmp"
+    zk_port_default = []
+    if "ams-hbase-site" in services["configurations"]:
+      if "hbase.rootdir" in services["configurations"]["ams-hbase-site"]["properties"]:
+        rootDir = services["configurations"]["ams-hbase-site"]["properties"]["hbase.rootdir"]
+      if "hbase.tmp.dir" in services["configurations"]["ams-hbase-site"]["properties"]:
+        tmpDir = services["configurations"]["ams-hbase-site"]["properties"]["hbase.tmp.dir"]
+      if "hbase.zookeeper.property.clientPort" in services["configurations"]["ams-hbase-site"]["properties"]:
+        zk_port_default = services["configurations"]["ams-hbase-site"]["properties"]["hbase.zookeeper.property.clientPort"]
+
+      # Skip recommendation item if default value is present
+    if operatingMode == "distributed" and not "{{zookeeper_clientPort}}" in zk_port_default:
+      zkPort = self.getZKPort(services)
+      putAmsHbaseSiteProperty("hbase.zookeeper.property.clientPort", zkPort)
+    elif operatingMode == "embedded" and not "{{zookeeper_clientPort}}" in zk_port_default:
+      putAmsHbaseSiteProperty("hbase.zookeeper.property.clientPort", "61181")
+
+    mountpoints = ["/"]
+    for collectorHostName in amsCollectorHosts:
+      for host in hosts["items"]:
+        if host["Hosts"]["host_name"] == collectorHostName:
+          mountpoints = self.getPreferredMountPoints(host["Hosts"])
+          break
+    isLocalRootDir = rootDir.startswith("file://") or (defaultFs.startswith("file://") and rootDir.startswith("/"))
+    if isLocalRootDir:
+      rootDir = re.sub("^file:///|/", "", rootDir, count=1)
+      rootDir = "file://" + os.path.join(mountpoints[0], rootDir)
+    tmpDir = re.sub("^file:///|/", "", tmpDir, count=1)
+    if len(mountpoints) > 1 and isLocalRootDir:
+      tmpDir = os.path.join(mountpoints[1], tmpDir)
+    else:
+      tmpDir = os.path.join(mountpoints[0], tmpDir)
+    putAmsHbaseSiteProperty("hbase.tmp.dir", tmpDir)
+
+    if operatingMode == "distributed":
+      putAmsHbaseSiteProperty("hbase.rootdir", defaultFs + "/user/ams/hbase")
+
+    if operatingMode == "embedded":
+      if isLocalRootDir:
+        putAmsHbaseSiteProperty("hbase.rootdir", rootDir)
+      else:
+        putAmsHbaseSiteProperty("hbase.rootdir", "file:///var/lib/ambari-metrics-collector/hbase")
+
+    collector_heapsize, hbase_heapsize, total_sinks_count = self.getAmsMemoryRecommendation(services, hosts)
+
+    putAmsEnvProperty("metrics_collector_heapsize", collector_heapsize)
+
+    # blockCache = 0.3, memstore = 0.35, phoenix-server = 0.15, phoenix-client = 0.25
+    putAmsHbaseSiteProperty("hfile.block.cache.size", 0.3)
+    putAmsHbaseSiteProperty("hbase.hregion.memstore.flush.size", 134217728)
+    putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.upperLimit", 0.35)
+    putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.lowerLimit", 0.3)
+
+    if len(amsCollectorHosts) > 1:
+      pass
+    else:
+      # blockCache = 0.3, memstore = 0.3, phoenix-server = 0.2, phoenix-client = 0.3
+      if total_sinks_count >= 2000:
+        putAmsHbaseSiteProperty("hbase.regionserver.handler.count", 60)
+        putAmsHbaseSiteProperty("hbase.regionserver.hlog.blocksize", 134217728)
+        putAmsHbaseSiteProperty("hbase.regionserver.maxlogs", 64)
+        putAmsHbaseSiteProperty("hbase.hregion.memstore.flush.size", 268435456)
+        putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.upperLimit", 0.3)
+        putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.lowerLimit", 0.25)
+        putAmsHbaseSiteProperty("phoenix.query.maxGlobalMemoryPercentage", 20)
+        putAmsHbaseSiteProperty("phoenix.coprocessor.maxMetaDataCacheSize", 81920000)
+        putAmsSiteProperty("phoenix.query.maxGlobalMemoryPercentage", 30)
+        putAmsSiteProperty("timeline.metrics.service.resultset.fetchSize", 10000)
+      elif total_sinks_count >= 500:
+        putAmsHbaseSiteProperty("hbase.regionserver.handler.count", 60)
+        putAmsHbaseSiteProperty("hbase.regionserver.hlog.blocksize", 134217728)
+        putAmsHbaseSiteProperty("hbase.regionserver.maxlogs", 64)
+        putAmsHbaseSiteProperty("hbase.hregion.memstore.flush.size", 268435456)
+        putAmsHbaseSiteProperty("phoenix.coprocessor.maxMetaDataCacheSize", 40960000)
+        putAmsSiteProperty("timeline.metrics.service.resultset.fetchSize", 5000)
+      else:
+        putAmsHbaseSiteProperty("phoenix.coprocessor.maxMetaDataCacheSize", 20480000)
+      pass
+
+    metrics_api_handlers = min(50, max(20, int(total_sinks_count / 100)))
+    putAmsSiteProperty("timeline.metrics.service.handler.thread.count", metrics_api_handlers)
+
+    # Distributed mode heap size
+    if operatingMode == "distributed":
+      hbase_heapsize = max(hbase_heapsize, 768)
+      putHbaseEnvProperty("hbase_master_heapsize", "512")
+      putHbaseEnvProperty("hbase_master_xmn_size", "102") #20% of 512 heap size
+      putHbaseEnvProperty("hbase_regionserver_heapsize", hbase_heapsize)
+      putHbaseEnvProperty("regionserver_xmn_size", round_to_n(0.15*hbase_heapsize,64))
+    else:
+      # Embedded mode heap size : master + regionserver
+      hbase_rs_heapsize = 768
+      putHbaseEnvProperty("hbase_regionserver_heapsize", hbase_rs_heapsize)
+      putHbaseEnvProperty("hbase_master_heapsize", hbase_heapsize)
+      putHbaseEnvProperty("hbase_master_xmn_size", round_to_n(0.15*(hbase_heapsize+hbase_rs_heapsize),64))
+
+    # If no local DN in distributed mode
+    if operatingMode == "distributed":
+      dn_hosts = self.getComponentHostNames(services, "HDFS", "DATANODE")
+      # call by Kerberos wizard sends only the service being affected
+      # so it is possible for dn_hosts to be None but not amsCollectorHosts
+      if dn_hosts and len(dn_hosts) > 0:
+        if set(amsCollectorHosts).intersection(dn_hosts):
+          collector_cohosted_with_dn = "true"
+        else:
+          collector_cohosted_with_dn = "false"
+        putAmsHbaseSiteProperty("dfs.client.read.shortcircuit", collector_cohosted_with_dn)
+
+    #split points
+    scriptDir = os.path.dirname(os.path.abspath(__file__))
+    metricsDir = os.path.join(scriptDir, '../../../../common-services/AMBARI_METRICS/0.1.0/package')
+    serviceMetricsDir = os.path.join(metricsDir, 'files', 'service-metrics')
+    sys.path.append(os.path.join(metricsDir, 'scripts'))
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+
+    from split_points import FindSplitPointsForAMSRegions
+
+    ams_hbase_site = None
+    ams_hbase_env = None
+
+    # Overriden properties form the UI
+    if "ams-hbase-site" in services["configurations"]:
+      ams_hbase_site = services["configurations"]["ams-hbase-site"]["properties"]
+    if "ams-hbase-env" in services["configurations"]:
+       ams_hbase_env = services["configurations"]["ams-hbase-env"]["properties"]
+
+    # Recommendations
+    if not ams_hbase_site:
+      ams_hbase_site = configurations["ams-hbase-site"]["properties"]
+    if not ams_hbase_env:
+      ams_hbase_env = configurations["ams-hbase-env"]["properties"]
+
+    split_point_finder = FindSplitPointsForAMSRegions(
+      ams_hbase_site, ams_hbase_env, serviceMetricsDir, operatingMode, servicesList)
+
+    result = split_point_finder.get_split_points()
+    precision_splits = ' '
+    aggregate_splits = ' '
+    if result.precision:
+      precision_splits = result.precision
+    if result.aggregate:
+      aggregate_splits = result.aggregate
+    putAmsSiteProperty("timeline.metrics.host.aggregate.splitpoints", ','.join(precision_splits))
+    putAmsSiteProperty("timeline.metrics.cluster.aggregate.splitpoints", ','.join(aggregate_splits))
+
+    component_grafana_exists = False
+    for service in services['services']:
+      if 'components' in service:
+        for component in service['components']:
+          if 'StackServiceComponents' in component:
+            # If Grafana is installed the hostnames would indicate its location
+            if 'METRICS_GRAFANA' in component['StackServiceComponents']['component_name'] and\
+              len(component['StackServiceComponents']['hostnames']) != 0:
+              component_grafana_exists = True
+              break
+    pass
+
+    if not component_grafana_exists:
+      putGrafanaPropertyAttribute("metrics_grafana_password", "visible", "false")
+
+    pass
+
+  def getHostNamesWithComponent(self, serviceName, componentName, services):
+    """
+    Returns the list of hostnames on which service component is installed
+    """
+    if services is not None and serviceName in [service["StackServices"]["service_name"] for service in services["services"]]:
+      service = [serviceEntry for serviceEntry in services["services"] if serviceEntry["StackServices"]["service_name"] == serviceName][0]
+      components = [componentEntry for componentEntry in service["components"] if componentEntry["StackServiceComponents"]["component_name"] == componentName]
+      if (len(components) > 0 and len(components[0]["StackServiceComponents"]["hostnames"]) > 0):
+        componentHostnames = components[0]["StackServiceComponents"]["hostnames"]
+        return componentHostnames
+    return []
+
+  def getHostsWithComponent(self, serviceName, componentName, services, hosts):
+    if services is not None and hosts is not None and serviceName in [service["StackServices"]["service_name"] for service in services["services"]]:
+      service = [serviceEntry for serviceEntry in services["services"] if serviceEntry["StackServices"]["service_name"] == serviceName][0]
+      components = [componentEntry for componentEntry in service["components"] if componentEntry["StackServiceComponents"]["component_name"] == componentName]
+      if (len(components) > 0 and len(components[0]["StackServiceComponents"]["hostnames"]) > 0):
+        componentHostnames = components[0]["StackServiceComponents"]["hostnames"]
+        componentHosts = [host for host in hosts["items"] if host["Hosts"]["host_name"] in componentHostnames]
+        return componentHosts
+    return []
+
+  def getHostWithComponent(self, serviceName, componentName, services, hosts):
+    componentHosts = self.getHostsWithComponent(serviceName, componentName, services, hosts)
+    if (len(componentHosts) > 0):
+      return componentHosts[0]
+    return None
+
+  def getHostComponentsByCategories(self, hostname, categories, services, hosts):
+    components = []
+    if services is not None and hosts is not None:
+      for service in services["services"]:
+          components.extend([componentEntry for componentEntry in service["components"]
+                              if componentEntry["StackServiceComponents"]["component_category"] in categories
+                              and hostname in componentEntry["StackServiceComponents"]["hostnames"]])
+    return components
+
+  def getZKHostPortString(self, services, include_port=True):
+    """
+    Returns the comma delimited string of zookeeper server host with the configure port installed in a cluster
+    Example: zk.host1.org:2181,zk.host2.org:2181,zk.host3.org:2181
+    include_port boolean param -> If port is also needed.
+    """
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    include_zookeeper = "ZOOKEEPER" in servicesList
+    zookeeper_host_port = ''
+
+    if include_zookeeper:
+      zookeeper_hosts = self.getHostNamesWithComponent("ZOOKEEPER", "ZOOKEEPER_SERVER", services)
+      zookeeper_host_port_arr = []
+
+      if include_port:
+        zookeeper_port = self.getZKPort(services)
+        for i in range(len(zookeeper_hosts)):
+          zookeeper_host_port_arr.append(zookeeper_hosts[i] + ':' + zookeeper_port)
+      else:
+        for i in range(len(zookeeper_hosts)):
+          zookeeper_host_port_arr.append(zookeeper_hosts[i])
+
+      zookeeper_host_port = ",".join(zookeeper_host_port_arr)
+    return zookeeper_host_port
+
+  def getZKPort(self, services):
+    zookeeper_port = '2181'     #default port
+    if 'zoo.cfg' in services['configurations'] and ('clientPort' in services['configurations']['zoo.cfg']['properties']):
+      zookeeper_port = services['configurations']['zoo.cfg']['properties']['clientPort']
+    return zookeeper_port
+
+  def getConfigurationClusterSummary(self, servicesList, hosts, components, services):
+
+    hBaseInstalled = False
+    if 'HBASE' in servicesList:
+      hBaseInstalled = True
+
+    cluster = {
+      "cpu": 0,
+      "disk": 0,
+      "ram": 0,
+      "hBaseInstalled": hBaseInstalled,
+      "components": components
+    }
+
+    if len(hosts["items"]) > 0:
+      nodeManagerHosts = self.getHostsWithComponent("YARN", "NODEMANAGER", services, hosts)
+      # NodeManager host with least memory is generally used in calculations as it will work in larger hosts.
+      if nodeManagerHosts is not None and len(nodeManagerHosts) > 0:
+        nodeManagerHost = nodeManagerHosts[0];
+        for nmHost in nodeManagerHosts:
+          if nmHost["Hosts"]["total_mem"] < nodeManagerHost["Hosts"]["total_mem"]:
+            nodeManagerHost = nmHost
+        host = nodeManagerHost["Hosts"]
+        cluster["referenceNodeManagerHost"] = host
+      else:
+        host = hosts["items"][0]["Hosts"]
+      cluster["referenceHost"] = host
+      cluster["cpu"] = host["cpu_count"]
+      cluster["disk"] = len(host["disk_info"])
+      cluster["ram"] = int(host["total_mem"] / (1024 * 1024))
+
+    ramRecommendations = [
+      {"os":1, "hbase":1},
+      {"os":2, "hbase":1},
+      {"os":2, "hbase":2},
+      {"os":4, "hbase":4},
+      {"os":6, "hbase":8},
+      {"os":8, "hbase":8},
+      {"os":8, "hbase":8},
+      {"os":12, "hbase":16},
+      {"os":24, "hbase":24},
+      {"os":32, "hbase":32},
+      {"os":64, "hbase":32}
+    ]
+    index = {
+      cluster["ram"] <= 4: 0,
+      4 < cluster["ram"] <= 8: 1,
+      8 < cluster["ram"] <= 16: 2,
+      16 < cluster["ram"] <= 24: 3,
+      24 < cluster["ram"] <= 48: 4,
+      48 < cluster["ram"] <= 64: 5,
+      64 < cluster["ram"] <= 72: 6,
+      72 < cluster["ram"] <= 96: 7,
+      96 < cluster["ram"] <= 128: 8,
+      128 < cluster["ram"] <= 256: 9,
+      256 < cluster["ram"]: 10
+    }[1]
+
+
+    cluster["reservedRam"] = ramRecommendations[index]["os"]
+    cluster["hbaseRam"] = ramRecommendations[index]["hbase"]
+
+
+    cluster["minContainerSize"] = {
+      cluster["ram"] <= 4: 256,
+      4 < cluster["ram"] <= 8: 512,
+      8 < cluster["ram"] <= 24: 1024,
+      24 < cluster["ram"]: 2048
+    }[1]
+
+    totalAvailableRam = cluster["ram"] - cluster["reservedRam"]
+    if cluster["hBaseInstalled"]:
+      totalAvailableRam -= cluster["hbaseRam"]
+    cluster["totalAvailableRam"] = max(512, totalAvailableRam * 1024)
+    '''containers = max(3, min (2*cores,min (1.8*DISKS,(Total available RAM) / MIN_CONTAINER_SIZE))))'''
+    cluster["containers"] = round(max(3,
+                                min(2 * cluster["cpu"],
+                                    min(ceil(1.8 * cluster["disk"]),
+                                            cluster["totalAvailableRam"] / cluster["minContainerSize"]))))
+
+    '''ramPerContainers = max(2GB, RAM - reservedRam - hBaseRam) / containers'''
+    cluster["ramPerContainer"] = abs(cluster["totalAvailableRam"] / cluster["containers"])
+    '''If greater than 1GB, value will be in multiples of 512.'''
+    if cluster["ramPerContainer"] > 1024:
+      cluster["ramPerContainer"] = int(cluster["ramPerContainer"] / 512) * 512
+
+    cluster["mapMemory"] = int(cluster["ramPerContainer"])
+    cluster["reduceMemory"] = cluster["ramPerContainer"]
+    cluster["amMemory"] = max(cluster["mapMemory"], cluster["reduceMemory"])
+
+    return cluster
+
+  def getServiceConfigurationValidators(self):
+    return {
+      "HDFS": { "hdfs-site": self.validateHDFSConfigurations,
+                "hadoop-env": self.validateHDFSConfigurationsEnv},
+      "MAPREDUCE2": {"mapred-site": self.validateMapReduce2Configurations},
+      "YARN": {"yarn-site": self.validateYARNConfigurations,
+               "yarn-env": self.validateYARNEnvConfigurations},
+      "HBASE": {"hbase-env": self.validateHbaseEnvConfigurations},
+      "STORM": {"storm-site": self.validateStormConfigurations},
+      "AMBARI_METRICS": {"ams-hbase-site": self.validateAmsHbaseSiteConfigurations,
+              "ams-hbase-env": self.validateAmsHbaseEnvConfigurations,
+              "ams-site": self.validateAmsSiteConfigurations}
+    }
+
+  def validateMinMax(self, items, recommendedDefaults, configurations):
+
+    # required for casting to the proper numeric type before comparison
+    def convertToNumber(number):
+      try:
+        return int(number)
+      except ValueError:
+        return float(number)
+
+    for configName in configurations:
+      validationItems = []
+      if configName in recommendedDefaults and "property_attributes" in recommendedDefaults[configName]:
+        for propertyName in recommendedDefaults[configName]["property_attributes"]:
+          if propertyName in configurations[configName]["properties"]:
+            if "maximum" in recommendedDefaults[configName]["property_attributes"][propertyName] and \
+                propertyName in recommendedDefaults[configName]["properties"]:
+              userValue = convertToNumber(configurations[configName]["properties"][propertyName])
+              maxValue = convertToNumber(recommendedDefaults[configName]["property_attributes"][propertyName]["maximum"])
+              if userValue > maxValue:
+                validationItems.extend([{"config-name": propertyName, "item": self.getWarnItem("Value is greater than the recommended maximum of {0} ".format(maxValue))}])
+            if "minimum" in recommendedDefaults[configName]["property_attributes"][propertyName] and \
+                    propertyName in recommendedDefaults[configName]["properties"]:
+              userValue = convertToNumber(configurations[configName]["properties"][propertyName])
+              minValue = convertToNumber(recommendedDefaults[configName]["property_attributes"][propertyName]["minimum"])
+              if userValue < minValue:
+                validationItems.extend([{"config-name": propertyName, "item": self.getWarnItem("Value is less than the recommended minimum of {0} ".format(minValue))}])
+      items.extend(self.toConfigurationValidationProblems(validationItems, configName))
+    pass
+
+  def validateAmsSiteConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+
+    op_mode = properties.get("timeline.metrics.service.operation.mode")
+    correct_op_mode_item = None
+    if op_mode not in ("embedded", "distributed"):
+      correct_op_mode_item = self.getErrorItem("Correct value should be set.")
+      pass
+
+    validationItems.extend([{"config-name":'timeline.metrics.service.operation.mode', "item": correct_op_mode_item }])
+    return self.toConfigurationValidationProblems(validationItems, "ams-site")
+
+  def validateAmsHbaseSiteConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+
+    amsCollectorHosts = self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")
+    ams_site = getSiteProperties(configurations, "ams-site")
+    core_site = getSiteProperties(configurations, "core-site")
+
+    collector_heapsize, hbase_heapsize, total_sinks_count = self.getAmsMemoryRecommendation(services, hosts)
+    recommendedDiskSpace = 10485760
+    # TODO validate configuration for multiple AMBARI_METRICS collectors
+    if len(amsCollectorHosts) > 1:
+      pass
+    else:
+      if total_sinks_count > 2000:
+        recommendedDiskSpace  = 104857600  # * 1k == 100 Gb
+      elif total_sinks_count > 500:
+        recommendedDiskSpace  = 52428800  # * 1k == 50 Gb
+      elif total_sinks_count > 250:
+        recommendedDiskSpace  = 20971520  # * 1k == 20 Gb
+
+    validationItems = []
+
+    rootdir_item = None
+    op_mode = ams_site.get("timeline.metrics.service.operation.mode")
+    default_fs = core_site.get("fs.defaultFS") if core_site else "file:///"
+    hbase_rootdir = properties.get("hbase.rootdir")
+    hbase_tmpdir = properties.get("hbase.tmp.dir")
+    distributed = properties.get("hbase.cluster.distributed")
+    is_local_root_dir = hbase_rootdir.startswith("file://") or (default_fs.startswith("file://") and hbase_rootdir.startswith("/"))
+
+    if op_mode == "distributed" and is_local_root_dir:
+      rootdir_item = self.getWarnItem("In distributed mode hbase.rootdir should point to HDFS.")
+    elif op_mode == "embedded":
+      if distributed.lower() == "false" and hbase_rootdir.startswith('/') or hbase_rootdir.startswith("hdfs://"):
+        rootdir_item = self.getWarnItem("In embedded mode hbase.rootdir cannot point to schemaless values or HDFS, "
+                                        "Example - file:// for localFS")
+      pass
+
+    distributed_item = None
+    if op_mode == "distributed" and not distributed.lower() == "true":
+      distributed_item = self.getErrorItem("hbase.cluster.distributed property should be set to true for "
+                                           "distributed mode")
+    if op_mode == "embedded" and distributed.lower() == "true":
+      distributed_item = self.getErrorItem("hbase.cluster.distributed property should be set to false for embedded mode")
+
+    hbase_zk_client_port = properties.get("hbase.zookeeper.property.clientPort")
+    zkPort = self.getZKPort(services)
+    hbase_zk_client_port_item = None
+    if distributed.lower() == "true" and op_mode == "distributed" and \
+        hbase_zk_client_port != zkPort and hbase_zk_client_port != "{{zookeeper_clientPort}}":
+      hbase_zk_client_port_item = self.getErrorItem("In AMS distributed mode, hbase.zookeeper.property.clientPort "
+                                                    "should be the cluster zookeeper server port : {0}".format(zkPort))
+
+    if distributed.lower() == "false" and op_mode == "embedded" and \
+        hbase_zk_client_port == zkPort and hbase_zk_client_port != "{{zookeeper_clientPort}}":
+      hbase_zk_client_port_item = self.getErrorItem("In AMS embedded mode, hbase.zookeeper.property.clientPort "
+                                                    "should be a different port than cluster zookeeper port."
+                                                    "(default:61181)")
+
+    validationItems.extend([{"config-name":'hbase.rootdir', "item": rootdir_item },
+                            {"config-name":'hbase.cluster.distributed', "item": distributed_item },
+                            {"config-name":'hbase.zookeeper.property.clientPort', "item": hbase_zk_client_port_item }])
+
+    for collectorHostName in amsCollectorHosts:
+      for host in hosts["items"]:
+        if host["Hosts"]["host_name"] == collectorHostName:
+          if op_mode == 'embedded' or is_local_root_dir:
+            validationItems.extend([{"config-name": 'hbase.rootdir', "item": self.validatorEnoughDiskSpace(properties, 'hbase.rootdir', host["Hosts"], recommendedDiskSpace)}])
+            validationItems.extend([{"config-name": 'hbase.rootdir', "item": self.validatorNotRootFs(properties, recommendedDefaults, 'hbase.rootdir', host["Hosts"])}])
+            validationItems.extend([{"config-name": 'hbase.tmp.dir', "item": self.validatorNotRootFs(properties, recommendedDefaults, 'hbase.tmp.dir', host["Hosts"])}])
+
+          dn_hosts = self.getComponentHostNames(services, "HDFS", "DATANODE")
+          if is_local_root_dir:
+            mountPoints = []
+            for mountPoint in host["Hosts"]["disk_info"]:
+              mountPoints.append(mountPoint["mountpoint"])
+            hbase_rootdir_mountpoint = getMountPointForDir(hbase_rootdir, mountPoints)
+            hbase_tmpdir_mountpoint = getMountPointForDir(hbase_tmpdir, mountPoints)
+            preferred_mountpoints = self.getPreferredMountPoints(host['Hosts'])
+            # hbase.rootdir and hbase.tmp.dir shouldn't point to the same partition
+            # if multiple preferred_mountpoints exist
+            if hbase_rootdir_mountpoint == hbase_tmpdir_mountpoint and \
+              len(preferred_mountpoints) > 1:
+              item = self.getWarnItem("Consider not using {0} partition for storing metrics temporary data. "
+                                      "{0} partition is already used as hbase.rootdir to store metrics data".format(hbase_tmpdir_mountpoint))
+              validationItems.extend([{"config-name":'hbase.tmp.dir', "item": item}])
+
+            # if METRICS_COLLECTOR is co-hosted with DATANODE
+            # cross-check dfs.datanode.data.dir and hbase.rootdir
+            # they shouldn't share same disk partition IO
+            hdfs_site = getSiteProperties(configurations, "hdfs-site")
+            dfs_datadirs = hdfs_site.get("dfs.datanode.data.dir").split(",") if hdfs_site and "dfs.datanode.data.dir" in hdfs_site else []
+            if dn_hosts and collectorHostName in dn_hosts and ams_site and \
+              dfs_datadirs and len(preferred_mountpoints) > len(dfs_datadirs):
+              for dfs_datadir in dfs_datadirs:
+                dfs_datadir_mountpoint = getMountPointForDir(dfs_datadir, mountPoints)
+                if dfs_datadir_mountpoint == hbase_rootdir_mountpoint:
+                  item = self.getWarnItem("Consider not using {0} partition for storing metrics data. "
+                                          "{0} is already used by datanode to store HDFS data".format(hbase_rootdir_mountpoint))
+                  validationItems.extend([{"config-name": 'hbase.rootdir', "item": item}])
+                  break
+          # If no local DN in distributed mode
+          elif collectorHostName not in dn_hosts and distributed.lower() == "true":
+            item = self.getWarnItem("It's recommended to install Datanode component on {0} "
+                                    "to speed up IO operations between HDFS and Metrics "
+                                    "Collector in distributed mode ".format(collectorHostName))
+            validationItems.extend([{"config-name": "hbase.cluster.distributed", "item": item}])
+          # Short circuit read should be enabled in distibuted mode
+          # if local DN installed
+          else:
+            validationItems.extend([{"config-name": "dfs.client.read.shortcircuit", "item": self.validatorEqualsToRecommendedItem(properties, recommendedDefaults, "dfs.client.read.shortcircuit")}])
+
+    return self.toConfigurationValidationProblems(validationItems, "ams-hbase-site")
+
+  def validateStormConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    # Storm AMS integration
+    if 'AMBARI_METRICS' in servicesList and "metrics.reporter.register" in properties and \
+      "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter" not in properties.get("metrics.reporter.register"):
+
+      validationItems.append({"config-name": 'metrics.reporter.register',
+                              "item": self.getWarnItem(
+                                "Should be set to org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter to report the metrics to Ambari Metrics service.")})
+
+    return self.toConfigurationValidationProblems(validationItems, "storm-site")
+
+  def validateAmsHbaseEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+
+    ams_env = getSiteProperties(configurations, "ams-env")
+    amsHbaseSite = getSiteProperties(configurations, "ams-hbase-site")
+    validationItems = []
+    mb = 1024 * 1024
+    gb = 1024 * mb
+
+    regionServerItem = self.validatorLessThenDefaultValue(properties, recommendedDefaults, "hbase_regionserver_heapsize") ## FIXME if new service added
+    if regionServerItem:
+      validationItems.extend([{"config-name": "hbase_regionserver_heapsize", "item": regionServerItem}])
+
+    hbaseMasterHeapsizeItem = self.validatorLessThenDefaultValue(properties, recommendedDefaults, "hbase_master_heapsize")
+    if hbaseMasterHeapsizeItem:
+      validationItems.extend([{"config-name": "hbase_master_heapsize", "item": hbaseMasterHeapsizeItem}])
+
+    logDirItem = self.validatorEqualsPropertyItem(properties, "hbase_log_dir", ams_env, "metrics_collector_log_dir")
+    if logDirItem:
+      validationItems.extend([{"config-name": "hbase_log_dir", "item": logDirItem}])
+
+    collector_heapsize = to_number(ams_env.get("metrics_collector_heapsize"))
+    hbase_master_heapsize = to_number(properties["hbase_master_heapsize"])
+    hbase_master_xmn_size = to_number(properties["hbase_master_xmn_size"])
+    hbase_regionserver_heapsize = to_number(properties["hbase_regionserver_heapsize"])
+    hbase_regionserver_xmn_size = to_number(properties["regionserver_xmn_size"])
+
+    # Validate Xmn settings.
+    masterXmnItem = None
+    regionServerXmnItem = None
+    is_hbase_distributed = amsHbaseSite.get("hbase.cluster.distributed").lower() == 'true'
+
+    if is_hbase_distributed:
+      minMasterXmn = 0.12 * hbase_master_heapsize
+      maxMasterXmn = 0.2 * hbase_master_heapsize
+      if hbase_master_xmn_size < minMasterXmn:
+        masterXmnItem = self.getWarnItem("Value is lesser than the recommended minimum Xmn size of {0} "
+                                         "(12% of hbase_master_heapsize)".format(int(ceil(minMasterXmn))))
+
+      if hbase_master_xmn_size > maxMasterXmn:
+        masterXmnItem = self.getWarnItem("Value is greater than the recommended maximum Xmn size of {0} "
+                                         "(20% of hbase_master_heapsize)".format(int(floor(maxMasterXmn))))
+
+      minRegionServerXmn = 0.12 * hbase_regionserver_heapsize
+      maxRegionServerXmn = 0.2 * hbase_regionserver_heapsize
+      if hbase_regionserver_xmn_size < minRegionServerXmn:
+        regionServerXmnItem = self.getWarnItem("Value is lesser than the recommended minimum Xmn size of {0} "
+                                               "(12% of hbase_regionserver_heapsize)"
+                                               .format(int(ceil(minRegionServerXmn))))
+
+      if hbase_regionserver_xmn_size > maxRegionServerXmn:
+        regionServerXmnItem = self.getWarnItem("Value is greater than the recommended maximum Xmn size of {0} "
+                                               "(20% of hbase_regionserver_heapsize)"
+                                               .format(int(floor(maxRegionServerXmn))))
+    else:
+      minMasterXmn = 0.12 * (hbase_master_heapsize + hbase_regionserver_heapsize)
+      maxMasterXmn = 0.2 *  (hbase_master_heapsize + hbase_regionserver_heapsize)
+      if hbase_master_xmn_size < minMasterXmn:
+        masterXmnItem = self.getWarnItem("Value is lesser than the recommended minimum Xmn size of {0} "
+                                         "(12% of hbase_master_heapsize + hbase_regionserver_heapsize)"
+                                         .format(int(ceil(minMasterXmn))))
+
+      if hbase_master_xmn_size > maxMasterXmn:
+        masterXmnItem = self.getWarnItem("Value is greater than the recommended maximum Xmn size of {0} "
+                                         "(20% of hbase_master_heapsize + hbase_regionserver_heapsize)"
+                                         .format(int(floor(maxMasterXmn))))
+    if masterXmnItem:
+      validationItems.extend([{"config-name": "hbase_master_xmn_size", "item": masterXmnItem}])
+
+    if regionServerXmnItem:
+      validationItems.extend([{"config-name": "regionserver_xmn_size", "item": regionServerXmnItem}])
+
+    if hbaseMasterHeapsizeItem is None:
+      hostMasterComponents = {}
+
+      for service in services["services"]:
+        for component in service["components"]:
+          if component["StackServiceComponents"]["hostnames"] is not None:
+            for hostName in component["StackServiceComponents"]["hostnames"]:
+              if self.isMasterComponent(component):
+                if hostName not in hostMasterComponents.keys():
+                  hostMasterComponents[hostName] = []
+                hostMasterComponents[hostName].append(component["StackServiceComponents"]["component_name"])
+
+      amsCollectorHosts = self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")
+      for collectorHostName in amsCollectorHosts:
+        for host in hosts["items"]:
+          if host["Hosts"]["host_name"] == collectorHostName:
+            # AMS Collector co-hosted with other master components in bigger clusters
+            if len(hosts['items']) > 31 and \
+                            len(hostMasterComponents[collectorHostName]) > 2 and \
+                            host["Hosts"]["total_mem"] < 32*mb: # < 32Gb(total_mem in k)
+              masterHostMessage = "Host {0} is used by multiple master components ({1}). " \
+                                  "It is recommended to use a separate host for the " \
+                                  "Ambari Metrics Collector component and ensure " \
+                                  "the host has sufficient memory available."
+
+              hbaseMasterHeapsizeItem = self.getWarnItem(masterHostMessage.format(
+                  collectorHostName, str(", ".join(hostMasterComponents[collectorHostName]))))
+              if hbaseMasterHeapsizeItem:
+                validationItems.extend([{"config-name": "hbase_master_heapsize", "item": hbaseMasterHeapsizeItem}])
+
+            # Check for unused RAM on AMS Collector node
+            hostComponents = []
+            for service in services["services"]:
+              for component in service["components"]:
+                if component["StackServiceComponents"]["hostnames"] is not None:
+                  if collectorHostName in component["StackServiceComponents"]["hostnames"]:
+                    hostComponents.append(component["StackServiceComponents"]["component_name"])
+
+            requiredMemory = getMemorySizeRequired(hostComponents, configurations)
+            unusedMemory = host["Hosts"]["total_mem"] * 1024 - requiredMemory # in bytes
+            if unusedMemory > 4*gb:  # warn user, if more than 4GB RAM is unused
+              heapPropertyToIncrease = "hbase_regionserver_heapsize" if is_hbase_distributed else "hbase_master_heapsize"
+              xmnPropertyToIncrease = "regionserver_xmn_size" if is_hbase_distributed else "hbase_master_xmn_size"
+              recommended_collector_heapsize = int((unusedMemory - 4*gb)/5) + collector_heapsize*mb
+              recommended_hbase_heapsize = int((unusedMemory - 4*gb)*4/5) + to_number(properties.get(heapPropertyToIncrease))*mb
+              recommended_hbase_heapsize = min(32*gb, recommended_hbase_heapsize) #Make sure heapsize <= 32GB
+              recommended_xmn_size = round_to_n(0.12*recommended_hbase_heapsize/mb,128)
+
+              if collector_heapsize < recommended_collector_heapsize or \
+                  to_number(properties[heapPropertyToIncrease]) < recommended_hbase_heapsize:
+                collectorHeapsizeItem = self.getWarnItem("{0} MB RAM is unused on the host {1} based on components " \
+                                                         "assigned. Consider allocating  {2} MB to " \
+                                                         "metrics_collector_heapsize in ams-env, " \
+                                                         "{3} MB to {4} in ams-hbase-env"
+                                                         .format(unusedMemory/mb, collectorHostName,
+                                                                 recommended_collector_heapsize/mb,
+                                                                 recommended_hbase_heapsize/mb,
+                                                                 heapPropertyToIncrease))
+                validationItems.extend([{"config-name": heapPropertyToIncrease, "item": collectorHeapsizeItem}])
+
+              if to_number(properties[xmnPropertyToIncrease]) < recommended_hbase_heapsize:
+                xmnPropertyToIncreaseItem = self.getWarnItem("Consider allocating {0} MB to use up some unused memory "
+                                                             "on host".format(recommended_xmn_size))
+                validationItems.extend([{"config-name": xmnPropertyToIncrease, "item": xmnPropertyToIncreaseItem}])
+      pass
+
+    return self.toConfigurationValidationProblems(validationItems, "ams-hbase-env")
+
+
+  def getPreferredMountPoints(self, hostInfo):
+
+    # '/etc/resolv.conf', '/etc/hostname', '/etc/hosts' are docker specific mount points
+    undesirableMountPoints = ["/", "/home", "/etc/resolv.conf", "/etc/hosts",
+                              "/etc/hostname", "/tmp"]
+    undesirableFsTypes = ["devtmpfs", "tmpfs", "vboxsf", "CDFS"]
+    mountPoints = []
+    if hostInfo and "disk_info" in hostInfo:
+      mountPointsDict = {}
+      for mountpoint in hostInfo["disk_info"]:
+        if not (mountpoint["mountpoint"] in undesirableMountPoints or
+                mountpoint["mountpoint"].startswith(("/boot", "/mnt")) or
+                mountpoint["type"] in undesirableFsTypes or
+                mountpoint["available"] == str(0)):
+          mountPointsDict[mountpoint["mountpoint"]] = to_number(mountpoint["available"])
+      if mountPointsDict:
+        mountPoints = sorted(mountPointsDict, key=mountPointsDict.get, reverse=True)
+    mountPoints.append("/")
+    return mountPoints
+
+  def validatorNotRootFs(self, properties, recommendedDefaults, propertyName, hostInfo):
+    if not propertyName in properties:
+      return self.getErrorItem("Value should be set")
+    dir = properties[propertyName]
+    if not dir.startswith("file://") or dir == recommendedDefaults.get(propertyName):
+      return None
+
+    dir = re.sub("^file://", "", dir, count=1)
+    mountPoints = []
+    for mountPoint in hostInfo["disk_info"]:
+      mountPoints.append(mountPoint["mountpoint"])
+    mountPoint = getMountPointForDir(dir, mountPoints)
+
+    if "/" == mountPoint and self.getPreferredMountPoints(hostInfo)[0] != mountPoint:
+      return self.getWarnItem("It is not recommended to use root partition for {0}".format(propertyName))
+
+    return None
+
+  def validatorEnoughDiskSpace(self, properties, propertyName, hostInfo, reqiuredDiskSpace):
+    if not propertyName in properties:
+      return self.getErrorItem("Value should be set")
+    dir = properties[propertyName]
+    if not dir.startswith("file://"):
+      return None
+
+    dir = re.sub("^file://", "", dir, count=1)
+    mountPoints = {}
+    for mountPoint in hostInfo["disk_info"]:
+      mountPoints[mountPoint["mountpoint"]] = to_number(mountPoint["available"])
+    mountPoint = getMountPointForDir(dir, mountPoints.keys())
+
+    if not mountPoints:
+      return self.getErrorItem("No disk info found on host %s" % hostInfo["host_name"])
+
+    if mountPoints[mountPoint] < reqiuredDiskSpace:
+      msg = "Ambari Metrics disk space requirements not met. \n" \
+            "Recommended disk space for partition {0} is {1}G"
+      return self.getWarnItem(msg.format(mountPoint, reqiuredDiskSpace/1048576)) # in Gb
+    return None
+
+  def validatorLessThenDefaultValue(self, properties, recommendedDefaults, propertyName):
+    if propertyName not in recommendedDefaults:
+      # If a property name exists in say hbase-env and hbase-site (which is allowed), then it will exist in the
+      # "properties" dictionary, but not necessarily in the "recommendedDefaults" dictionary". In this case, ignore it.
+      return None
+
+    if not propertyName in properties:
+      return self.getErrorItem("Value should be set")
+    value = to_number(properties[propertyName])
+    if value is None:
+      return self.getErrorItem("Value should be integer")
+    defaultValue = to_number(recommendedDefaults[propertyName])
+    if defaultValue is None:
+      return None
+    if value < defaultValue:
+      return self.getWarnItem("Value is less than the recommended default of {0}".format(defaultValue))
+    return None
+
+  def validatorEqualsPropertyItem(self, properties1, propertyName1,
+                                  properties2, propertyName2,
+                                  emptyAllowed=False):
+    if not propertyName1 in properties1:
+      return self.getErrorItem("Value should be set for %s" % propertyName1)
+    if not propertyName2 in properties2:
+      return self.getErrorItem("Value should be set for %s" % propertyName2)
+    value1 = properties1.get(propertyName1)
+    if value1 is None and not emptyAllowed:
+      return self.getErrorItem("Empty value for %s" % propertyName1)
+    value2 = properties2.get(propertyName2)
+    if value2 is None and not emptyAllowed:
+      return self.getErrorItem("Empty value for %s" % propertyName2)
+    if value1 != value2:
+      return self.getWarnItem("It is recommended to set equal values "
+             "for properties {0} and {1}".format(propertyName1, propertyName2))
+
+    return None
+
+  def validatorEqualsToRecommendedItem(self, properties, recommendedDefaults,
+                                       propertyName):
+    if not propertyName in properties:
+      return self.getErrorItem("Value should be set for %s" % propertyName)
+    value = properties.get(propertyName)
+    if not propertyName in recommendedDefaults:
+      return self.getErrorItem("Value should be recommended for %s" % propertyName)
+    recommendedValue = recommendedDefaults.get(propertyName)
+    if value != recommendedValue:
+      return self.getWarnItem("It is recommended to set value {0} "
+             "for property {1}".format(recommendedValue, propertyName))
+    return None
+
+  def validateMinMemorySetting(self, properties, defaultValue, propertyName):
+    if not propertyName in properties:
+      return self.getErrorItem("Value should be set")
+    if defaultValue is None:
+      return self.getErrorItem("Config's default value can't be null or undefined")
+
+    value = properties[propertyName]
+    if value is None:
+      return self.getErrorItem("Value can't be null or undefined")
+    try:
+      valueInt = to_number(value)
+      # TODO: generify for other use cases
+      defaultValueInt = int(str(defaultValue).strip())
+      if valueInt < defaultValueInt:
+        return self.getWarnItem("Value is less than the minimum recommended default of -Xmx" + str(defaultValue))
+    except:
+      return None
+
+    return None
+
+  def validatorYarnQueue(self, properties, recommendedDefaults, propertyName, services):
+    if propertyName not in properties:
+      return self.getErrorItem("Value should be set")
+
+    capacity_scheduler_properties, _ = self.getCapacitySchedulerProperties(services)
+    leaf_queue_names = self.getAllYarnLeafQueues(capacity_scheduler_properties)
+    queue_name = properties[propertyName]
+
+    if len(leaf_queue_names) == 0:
+      return None
+    elif queue_name not in leaf_queue_names:
+      return self.getErrorItem("Queue is not exist or not corresponds to existing YARN leaf queue")
+
+    return None
+
+  def recommendYarnQueue(self, services, catalog_name=None, queue_property=None):
+    old_queue_name = None
+
+    if services and 'configurations' in services:
+        configurations = services["configurations"]
+        if catalog_name in configurations and queue_property in configurations[catalog_name]["properties"]:
+          old_queue_name = configurations[catalog_name]["properties"][queue_property]
+
+        capacity_scheduler_properties, _ = self.getCapacitySchedulerProperties(services)
+        leaf_queues = sorted(self.getAllYarnLeafQueues(capacity_scheduler_properties))
+
+        if leaf_queues and (old_queue_name is None or old_queue_name not in leaf_queues):
+          return leaf_queues.pop()
+        elif old_queue_name and old_queue_name in leaf_queues:
+          return None
+
+    return "default"
+
+  def validateXmxValue(self, properties, recommendedDefaults, propertyName):
+    if not propertyName in properties:
+      return self.getErrorItem("Value should be set")
+    value = properties[propertyName]
+    defaultValue = recommendedDefaults[propertyName]
+    if defaultValue is None:
+      return self.getErrorItem("Config's default value can't be null or undefined")
+    if not checkXmxValueFormat(value) and checkXmxValueFormat(defaultValue):
+      # Xmx is in the default-value but not the value, should be an error
+      return self.getErrorItem('Invalid value format')
+    if not checkXmxValueFormat(defaultValue):
+      # if default value does not contain Xmx, then there is no point in validating existing value
+      return None
+    valueInt = formatXmxSizeToBytes(getXmxSize(value))
+    defaultValueXmx = getXmxSize(defaultValue)
+    defaultValueInt = formatXmxSizeToBytes(defaultValueXmx)
+    if valueInt < defaultValueInt:
+      return self.getWarnItem("Value is less than the recommended default of -Xmx" + defaultValueXmx)
+    return None
+
+  def validateMapReduce2Configurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = [ {"config-name": 'mapreduce.map.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'mapreduce.map.java.opts')},
+                        {"config-name": 'mapreduce.reduce.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'mapreduce.reduce.java.opts')},
+                        {"config-name": 'mapreduce.task.io.sort.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.task.io.sort.mb')},
+                        {"config-name": 'mapreduce.map.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.map.memory.mb')},
+                        {"config-name": 'mapreduce.reduce.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.reduce.memory.mb')},
+                        {"config-name": 'yarn.app.mapreduce.am.resource.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.app.mapreduce.am.resource.mb')},
+                        {"config-name": 'yarn.app.mapreduce.am.command-opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'yarn.app.mapreduce.am.command-opts')},
+                        {"config-name": 'mapreduce.job.queuename', "item": self.validatorYarnQueue(properties, recommendedDefaults, 'mapreduce.job.queuename', services)} ]
+    return self.toConfigurationValidationProblems(validationItems, "mapred-site")
+
+  def validateYARNConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    clusterEnv = getSiteProperties(configurations, "cluster-env")
+    validationItems = [ {"config-name": 'yarn.nodemanager.resource.memory-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.nodemanager.resource.memory-mb')},
+                        {"config-name": 'yarn.scheduler.minimum-allocation-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.scheduler.minimum-allocation-mb')},
+                        {"config-name": 'yarn.nodemanager.linux-container-executor.group', "item": self.validatorEqualsPropertyItem(properties, "yarn.nodemanager.linux-container-executor.group", clusterEnv, "user_group")},
+                        {"config-name": 'yarn.scheduler.maximum-allocation-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.scheduler.maximum-allocation-mb')} ]
+    return self.toConfigurationValidationProblems(validationItems, "yarn-site")
+
+  def validateYARNEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = [{"config-name": 'service_check.queue.name', "item": self.validatorYarnQueue(properties, recommendedDefaults, 'service_check.queue.name', services)} ]
+    return self.toConfigurationValidationProblems(validationItems, "yarn-env")
+
+  def validateHbaseEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    hbase_site = getSiteProperties(configurations, "hbase-site")
+    validationItems = [ {"config-name": 'hbase_regionserver_heapsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hbase_regionserver_heapsize')},
+                        {"config-name": 'hbase_master_heapsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hbase_master_heapsize')},
+                        {"config-name": "hbase_user", "item": self.validatorEqualsPropertyItem(properties, "hbase_user", hbase_site, "hbase.superuser")} ]
+    return self.toConfigurationValidationProblems(validationItems, "hbase-env")
+
+  def validateHDFSConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    clusterEnv = getSiteProperties(configurations, "cluster-env")
+    validationItems = [{"config-name": 'dfs.datanode.du.reserved', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'dfs.datanode.du.reserved')},
+                       {"config-name": 'dfs.datanode.data.dir', "item": self.validatorOneDataDirPerPartition(properties, 'dfs.datanode.data.dir', services, hosts, clusterEnv)}]
+    return self.toConfigurationValidationProblems(validationItems, "hdfs-site")
+
+  def validateHDFSConfigurationsEnv(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = [ {"config-name": 'namenode_heapsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_heapsize')},
+                        {"config-name": 'namenode_opt_newsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_opt_newsize')},
+                        {"config-name": 'namenode_opt_maxnewsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_opt_maxnewsize')}]
+    return self.toConfigurationValidationProblems(validationItems, "hadoop-env")
+
+  def validatorOneDataDirPerPartition(self, properties, propertyName, services, hosts, clusterEnv):
+    if not propertyName in properties:
+      return self.getErrorItem("Value should be set")
+    dirs = properties[propertyName]
+
+    if not (clusterEnv and "one_dir_per_partition" in clusterEnv and clusterEnv["one_dir_per_partition"].lower() == "true"):
+      return None
+
+    dataNodeHosts = self.getDataNodeHosts(services, hosts)
+
+    warnings = set()
+    for host in dataNodeHosts:
+      hostName = host["Hosts"]["host_name"]
+
+      mountPoints = []
+      for diskInfo in host["Hosts"]["disk_info"]:
+        mountPoints.append(diskInfo["mountpoint"])
+
+      if get_mounts_with_multiple_data_dirs(mountPoints, dirs):
+        # A detailed message can be too long on large clusters:
+        # warnings.append("Host: " + hostName + "; Mount: " + mountPoint + "; Data directories: " + ", ".join(dirList))
+        warnings.add(hostName)
+        break;
+
+    if len(warnings) > 0:
+      return self.getWarnItem("cluster-env/one_dir_per_partition is ena

<TRUNCATED>
http://git-wip-us.apache.org/repos/asf/bigtop/blob/b1d707c2/bigtop-packages/src/common/ambari/ambari-server.svc
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ambari-server.svc b/bigtop-packages/src/common/ambari/ambari-server.svc
new file mode 100644
index 0000000..e69de29


[06/52] bigtop git commit: Working around ODPI-186

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/490bcb65/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/MAPREDUCE2_metrics.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/MAPREDUCE2_metrics.json b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/MAPREDUCE2_metrics.json
new file mode 100755
index 0000000..f44e3b2
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/YARN/MAPREDUCE2_metrics.json
@@ -0,0 +1,2596 @@
+{
+  "HISTORYSERVER": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "jvm.JvmMetrics.ThreadsRunnable",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "jvm.JvmMetrics.ThreadsNew",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "rpc.metrics.RpcAuthorizationFailures",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "ugi.ugi.LoginSuccessAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "rpc.rpc.SentBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logWarn": {
+              "metric": "jvm.JvmMetrics.LogWarn",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTimedWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/process/proc_run": {
+              "metric": "proc_run",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/gcCount": {
+              "metric": "jvm.JvmMetrics.GcCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "rpc.rpc.ReceivedBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/memory/swap_total": {
+              "metric": "swap_total",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "jvm.JvmMetrics.ThreadsBlocked",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "rpc.rpc.RpcQueueTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/process/proc_total": {
+              "metric": "proc_total",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/part_max_used": {
+              "metric": "part_max_used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "rpc.rpc.NumOpenConnections",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/memory/mem_buffers": {
+              "metric": "mem_buffers",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_num_ops": {
+              "metric": "ugi.ugi.LoginSuccessNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcTimeMillis": {
+              "metric": "jvm.JvmMetrics.GcTimeMillis",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTerminated": {
+              "metric": "jvm.JvmMetrics.ThreadsTerminated",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/network/bytes_out": {
+              "metric": "bytes_out",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/cpu/cpu_aidle": {
+              "metric": "cpu_aidle",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/bytes_in": {
+              "metric": "bytes_in",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/boottime": {
+              "metric": "boottime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/pkts_out": {
+              "metric": "pkts_out",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/callQueueLen": {
+              "metric": "rpc.rpc.CallQueueLength",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logInfo": {
+              "metric": "jvm.JvmMetrics.LogInfo",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_num_ops": {
+              "metric": "ugi.ugi.LoginFailureNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_num_ops": {
+              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logError": {
+              "metric": "jvm.JvmMetrics.LogError",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_avg_time": {
+              "metric": "ugi.ugi.LoginFailureAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationSuccesses": {
+              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logFatal": {
+              "metric": "jvm.JvmMetrics.LogFatal",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_avg_time": {
+              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationSuccesses": {
+              "metric": "rpc.metrics.RpcAuthenticationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationFailures": {
+              "metric": "rpc.metrics.RpcAuthenticationFailures",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/network/pkts_in": {
+              "metric": "pkts_in",
+              "pointInTime": true,
+              "temporal": true
+            }
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "default": {
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.ReceivedBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.SentBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTimeNumOps": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcQueueTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTimeAvgTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcQueueTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTimeNumOps": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcProcessingTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTimeAvgTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcProcessingTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcAuthenticationFailures": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthenticationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcAuthenticationSuccesses": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthenticationSuccesses",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcAuthorizationFailures": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthorizationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcAuthorizationSuccesses": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthorizationSuccesses",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.NumOpenConnections",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/CallQueueLength": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.CallQueueLength",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemNonHeapUsedM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemNonHeapCommittedM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemHeapUsedM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapUsedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemHeapCommittedM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapCommittedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemMaxM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemMaxM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcCountCopy": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCountCopy",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcTimeMillisCopy": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillisCopy",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcCountMarkSweepCompact": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCountMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcTimeMillisMarkSweepCompact": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillisMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcTimeMillis": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillis",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsNew": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsNew",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsRunnable": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsRunnable",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsBlocked": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsBlocked",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsWaiting": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsWaiting",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsTimedWaiting": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsTerminated": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTerminated",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/LogFatal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogFatal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/LogError": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogError",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/LogWarn": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogWarn",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/LogInfo": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogInfo",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/HeapMemoryMax": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/HeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/HeapMemoryCommitted": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[committed]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/HeapMemoryInit": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[init]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/NonHeapMemoryMax": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/NonHeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/NonHeapMemoryCommitted": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[committed]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/NonHeapMemoryInit": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[init]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/MBeanServerId": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.MBeanServerId",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/SpecificationName": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationName",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/SpecificationVersion": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationVersion",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/SpecificationVendor": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationVendor",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/ImplementationName": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationName",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/ImplementationVersion": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationVersion",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/ImplementationVendor": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationVendor",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/ElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.ElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/PercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.PercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingFsImageCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingFsImageElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingFsImageTotal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingFsImagePercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImagePercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingEditsCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingEditsElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingEditsTotal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingEditsPercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsPercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SavingCheckpointCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SavingCheckpointElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SavingCheckpointTotal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SavingCheckpointPercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointPercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SafeModeCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SafeModeElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SafeModeTotal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SafeModePercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModePercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/HotSpotDiagnostic/DiagnosticOptions": {
+              "metric": "com.sun.management:type=HotSpotDiagnostic.DiagnosticOptions",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryManager/MemoryPoolNames": {
+              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.MemoryPoolNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryManager/Name": {
+              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryManager/Valid": {
+              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.Valid",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryManager/ObjectName": {
+              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.ObjectName",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Logging/LoggerNames": {
+              "metric": "java.util.logging:type=Logging.LoggerNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/UgiMetrics/LoginSuccessNumOps": {
+              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginSuccessNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/UgiMetrics/LoginSuccessAvgTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginSuccessAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/UgiMetrics/LoginFailureNumOps": {
+              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginFailureNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/UgiMetrics/LoginFailureAvgTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginFailureAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThreshold": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThreshold",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdCount": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/MemoryManagerNames": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.MemoryManagerNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/PeakUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.PeakUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/Usage": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Usage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdExceeded": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdExceeded",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/UsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.UsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/Name": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/Type": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Type",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/Valid": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Valid",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadAllocatedMemoryEnabled": {
+              "metric": "java.lang:type=Threading.ThreadAllocatedMemoryEnabled",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadAllocatedMemorySupported": {
+              "metric": "java.lang:type=Threading.ThreadAllocatedMemorySupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/DaemonThreadCount": {
+              "metric": "java.lang:type=Threading.DaemonThreadCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/PeakThreadCount": {
+              "metric": "java.lang:type=Threading.PeakThreadCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/CurrentThreadCpuTimeSupported": {
+              "metric": "java.lang:type=Threading.CurrentThreadCpuTimeSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ObjectMonitorUsageSupported": {
+              "metric": "java.lang:type=Threading.ObjectMonitorUsageSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/SynchronizerUsageSupported": {
+              "metric": "java.lang:type=Threading.SynchronizerUsageSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadContentionMonitoringSupported": {
+              "metric": "java.lang:type=Threading.ThreadContentionMonitoringSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadCpuTimeEnabled": {
+              "metric": "java.lang:type=Threading.ThreadCpuTimeEnabled",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/CurrentThreadCpuTime": {
+              "metric": "java.lang:type=Threading.CurrentThreadCpuTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/CurrentThreadUserTime": {
+              "metric": "java.lang:type=Threading.CurrentThreadUserTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadCount": {
+              "metric": "java.lang:type=Threading.ThreadCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/TotalStartedThreadCount": {
+              "metric": "java.lang:type=Threading.TotalStartedThreadCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadCpuTimeSupported": {
+              "metric": "java.lang:type=Threading.ThreadCpuTimeSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadContentionMonitoringEnabled": {
+              "metric": "java.lang:type=Threading.ThreadContentionMonitoringEnabled",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/AllThreadIds": {
+              "metric": "java.lang:type=Threading.AllThreadIds",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ClassLoading/LoadedClassCount": {
+              "metric": "java.lang:type=ClassLoading.LoadedClassCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ClassLoading/UnloadedClassCount": {
+              "metric": "java.lang:type=ClassLoading.UnloadedClassCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ClassLoading/TotalLoadedClassCount": {
+              "metric": "java.lang:type=ClassLoading.TotalLoadedClassCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ClassLoading/Verbose": {
+              "metric": "java.lang:type=ClassLoading.Verbose",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/BootClassPath": {
+              "metric": "java.lang:type=Runtime.BootClassPath",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/LibraryPath": {
+              "metric": "java.lang:type=Runtime.LibraryPath",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/VmName": {
+              "metric": "java.lang:type=Runtime.VmName",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/VmVendor": {
+              "metric": "java.lang:type=Runtime.VmVendor",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/VmVersion": {
+              "metric": "java.lang:type=Runtime.VmVersion",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/BootClassPathSupported": {
+              "metric": "java.lang:type=Runtime.BootClassPathSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/StartTime": {
+              "metric": "java.lang:type=Runtime.StartTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/InputArguments": {
+              "metric": "java.lang:type=Runtime.InputArguments",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/ManagementSpecVersion": {
+              "metric": "java.lang:type=Runtime.ManagementSpecVersion",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/SpecName": {
+              "metric": "java.lang:type=Runtime.SpecName",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/SpecVendor": {
+              "metric": "java.lang:type=Runtime.SpecVendor",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/SpecVersion": {
+              "metric": "java.lang:type=Runtime.SpecVersion",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/SystemProperties": {
+              "metric": "java.lang:type=Runtime.SystemProperties",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/Uptime": {
+              "metric": "java.lang:type=Runtime.Uptime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/Name": {
+              "metric": "java.lang:type=Runtime.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/ClassPath": {
+              "metric": "java.lang:type=Runtime.ClassPath",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/MaxFileDescriptorCount": {
+              "metric": "java.lang:type=OperatingSystem.MaxFileDescriptorCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/OpenFileDescriptorCount": {
+              "metric": "java.lang:type=OperatingSystem.OpenFileDescriptorCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/CommittedVirtualMemorySize": {
+              "metric": "java.lang:type=OperatingSystem.CommittedVirtualMemorySize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/FreePhysicalMemorySize": {
+              "metric": "java.lang:type=OperatingSystem.FreePhysicalMemorySize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/FreeSwapSpaceSize": {
+              "metric": "java.lang:type=OperatingSystem.FreeSwapSpaceSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/ProcessCpuLoad": {
+              "metric": "java.lang:type=OperatingSystem.ProcessCpuLoad",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/ProcessCpuTime": {
+              "metric": "java.lang:type=OperatingSystem.ProcessCpuTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/SystemCpuLoad": {
+              "metric": "java.lang:type=OperatingSystem.SystemCpuLoad",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/TotalPhysicalMemorySize": {
+              "metric": "java.lang:type=OperatingSystem.TotalPhysicalMemorySize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/TotalSwapSpaceSize": {
+              "metric": "java.lang:type=OperatingSystem.TotalSwapSpaceSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/AvailableProcessors": {
+              "metric": "java.lang:type=OperatingSystem.AvailableProcessors",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/Version": {
+              "metric": "java.lang:type=OperatingSystem.Version",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/Arch": {
+              "metric": "java.lang:type=OperatingSystem.Arch",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/SystemLoadAverage": {
+              "metric": "java.lang:type=OperatingSystem.SystemLoadAverage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/Name": {
+              "metric": "java.lang:type=OperatingSystem.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/CollectionUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/CollectionUsageThreshold": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsageThreshold",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/CollectionUsageThresholdCount": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsageThresholdCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/MemoryManagerNames": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.MemoryManagerNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/PeakUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.PeakUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/Usage": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.Usage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/UsageThreshold": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.UsageThreshold",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/UsageThresholdCount": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.UsageThresholdCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/CollectionUsageThresholdExceeded": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsageThresholdExceeded",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/CollectionUsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/UsageThresholdExceeded": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.UsageThresholdExceeded",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/UsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.UsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/Name": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/Type": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.Type",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/Valid": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.Valid",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/BufferPool/mapred/Count": {
+              "metric": "java.nio:type=BufferPool,name=mapped.Count",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/BufferPool/mapred/MemoryUsed": {
+              "metric": "java.nio:type=BufferPool,name=mapped.MemoryUsed",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/BufferPool/mapred/TotalCapacity": {
+              "metric": "java.nio:type=BufferPool,name=mapped.TotalCapacity",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/BufferPool/mapred/Name": {
+              "metric": "java.nio:type=BufferPool,name=mapped.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/BufferPool/direct/Count": {
+              "metric": "java.nio:type=BufferPool,name=direct.Count",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/BufferPool/direct/MemoryUsed": {
+              "metric": "java.nio:type=BufferPool,name=direct.MemoryUsed",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/BufferPool/direct/TotalCapacity": {
+              "metric": "java.nio:type=BufferPool,name=direct.TotalCapacity",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/BufferPool/direct/Name": {
+              "metric": "java.nio:type=BufferPool,name=direct.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/MarkSweepCompact/LastGcInfo": {
+              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.LastGcInfo",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/MarkSweepCompact/CollectionCount": {
+              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.CollectionCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/MarkSweepCompact/CollectionTime": {
+              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.CollectionTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/MarkSweepCompact/MemoryPoolNames": {
+              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.MemoryPoolNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/MarkSweepCompact/Name": {
+              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/MarkSweepCompact/Valid": {
+              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.Valid",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/Copy/LastGcInfo": {
+              "metric": "java.lang:type=GarbageCollector,name=Copy.LastGcInfo",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/Copy/CollectionCount": {
+              "metric": "java.lang:type=GarbageCollector,name=Copy.CollectionCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/Copy/CollectionTime": {
+              "metric": "java.lang:type=GarbageCollector,name=Copy.CollectionTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/Copy/MemoryPoolNames": {
+              "metric": "java.lang:type=GarbageCollector,name=Copy.MemoryPoolNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/Copy/Name": {
+              "metric": "java.lang:type=GarbageCollector,name=Copy.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/Copy/Valid": {
+              "metric": "java.lang:type=GarbageCollector,name=Copy.Valid",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/CollectionUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.CollectionUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/MemoryManagerNames": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.MemoryManagerNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/PeakUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.PeakUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/Usage": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.Usage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/UsageThreshold": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.UsageThreshold",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/UsageThresholdCount": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.UsageThresholdCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/CollectionUsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.CollectionUsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/UsageThresholdExceeded": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.UsageThresholdExceeded",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/UsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.UsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/Name": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/Type": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.Type",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/Valid": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.Valid",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/CollectionUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/CollectionUsageThreshold": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsageThreshold",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/CollectionUsageThresholdCount": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsageThresholdCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/MemoryManagerNames": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.MemoryManagerNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/PeakUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.PeakUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/Usage": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.Usage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/CollectionUsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/CollectionUsageThresholdExceeded": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsageThresholdExceeded",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/UsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.UsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/Name": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/Type": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.Type",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/Valid": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.Valid",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/CollectionUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/CollectionUsageThreshold": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsageThreshold",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/CollectionUsageThresholdCount": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsageThresholdCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/MemoryManagerNames": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.MemoryManagerNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/PeakUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.PeakUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/Usage": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.Usage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/UsageThreshold": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.UsageThreshold",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/UsageThresholdCount": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.UsageThresholdCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/CollectionUsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/CollectionUsageThresholdExceeded": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsageThresholdExceeded",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/UsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.UsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/UsageThresholdExceeded": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.UsageThresholdExceeded",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/Name": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/Type": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.Type",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/Valid": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.Valid",
+              "pointInTime": true,
+              "temporal": false
+            }
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "jvm.JvmMetrics.ThreadsRunnable",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "jvm.JvmMetrics.ThreadsNew",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "rpc.metrics.RpcAuthorizationFailures",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "ugi.ugi.LoginSuccessAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "rpc.rpc.SentBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logWarn": {
+              "metric": "jvm.JvmMetrics.LogWarn",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTimedWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/process/proc_run": {
+              "metric": "proc_run",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/gcCount": {
+              "metric": "jvm.JvmMetrics.GcCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "rpc.rpc.ReceivedBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/memory/swap_total": {
+              "metric": "swap_total",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "jvm.JvmMetrics.ThreadsBlocked",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "rpc.rpc.RpcQueueTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/process/proc_total": {
+              "metric": "proc_total",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/part_max_used": {
+              "metric": "part_max_used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "rpc.rpc.NumOpenConnections",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/memory/mem_buffers": {
+              "metric": "mem_buffers",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_num_ops": {
+              "metric": "ugi.ugi.LoginSuccessNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcTimeMillis": {
+              "metric": "jvm.JvmMetrics.GcTimeMillis",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTerminated": {
+              "metric": "jvm.JvmMetrics.ThreadsTerminated",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/network/bytes_out": {
+              "metric": "bytes_out",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/cpu/cpu_aidle": {
+              "metric": "cpu_aidle",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/bytes_in": {
+              "metric": "bytes_in",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/boottime": {
+              "metric": "boottime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/pkts_out": {
+              "metric": "pkts_out",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/callQueueLen": {
+              "metric": "rpc.rpc.CallQueueLength",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logInfo": {
+              "metric": "jvm.JvmMetrics.LogInfo",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_num_ops": {
+              "metric": "ugi.ugi.LoginFailureNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_num_ops": {
+              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logError": {
+              "metric": "jvm.JvmMetrics.LogError",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_avg_time": {
+              "metric": "ugi.ugi.LoginFailureAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationSuccesses": {
+              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logFatal": {
+              "metric": "jvm.JvmMetrics.LogFatal",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_avg_time": {
+              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationSuccesses": {
+              "metric": "rpc.metrics.RpcAuthenticationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationFailures": {
+              "metric": "rpc.metrics.RpcAuthenticationFailures",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/network/pkts_in": {
+              "metric": "pkts_in",
+              "pointInTime": true,
+              "temporal": true
+            }
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "default": {
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.ReceivedBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.SentBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTimeNumOps": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcQueueTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTimeAvgTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcQueueTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTimeNumOps": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcProcessingTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTimeAvgTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcProcessingTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcAuthenticationFailures": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthenticationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcAuthenticationSuccesses": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthenticationSuccesses",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcAuthorizationFailures": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthorizationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcAuthorizationSuccesses": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthorizationSuccesses",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.NumOpenConnections",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/CallQueueLength": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.CallQueueLength",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemNonHeapUsedM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemNonHeapCommittedM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemHeapUsedM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapUsedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemHeapCommittedM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapCommittedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemMaxM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemMaxM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcCountCopy": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCountCopy",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcTimeMillisCopy": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillisCopy",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcCountMarkSweepCompact": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCountMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcTimeMillisMarkSweepCompact": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillisMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcTimeMillis": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillis",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsNew": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsNew",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsRunnable": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsRunnable",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsBlocked": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsBlocked",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsWaiting": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsWaiting",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsTimedWaiting": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsTerminated": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTerminated",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/LogFatal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogFatal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/LogError": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogError",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/LogWarn": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogWarn",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/LogInfo": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogInfo",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/HeapMemoryMax": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/HeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/HeapMemoryCommitted": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[committed]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/HeapMemoryInit": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[init]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/NonHeapMemoryMax": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/NonHeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/NonHeapMemoryCommitted": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[committed]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/NonHeapMemoryInit": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[init]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/MBeanServerId": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.MBeanServerId",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/SpecificationName": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationName",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/SpecificationVersion": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationVersion",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/SpecificationVendor": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationVendor",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/ImplementationName": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationName",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/ImplementationVersion": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationVersion",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/ImplementationVendor": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationVendor",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/ElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.ElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/PercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.PercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingFsImageCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingFsImageElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingFsImageTotal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingFsImagePercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImagePercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingEditsCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingEditsElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingEditsTotal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingEditsPercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsPercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SavingCheckpointCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SavingCheckpointElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SavingCheckpointTotal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SavingCheckpointPercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointPercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SafeModeCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SafeModeElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SafeModeTotal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SafeModePercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModePercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/HotSpotDiagnostic/DiagnosticOptions": {
+              "metric": "com.sun.management:type=HotSpotDiagnostic.DiagnosticOptions",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryManager/MemoryPoolNames": {
+              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.MemoryPoolNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryManager/Name": {
+              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryManager/Valid": {
+              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.Valid",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryManager/ObjectName": {
+              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.ObjectName",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Logging/LoggerNames": {
+              "metric": "java.util.logging:type=Logging.LoggerNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/UgiMetrics/LoginSuccessNumOps": {
+              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginSuccessNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/UgiMetrics/LoginSuccessAvgTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginSuccessAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/UgiMetrics/LoginFailureNumOps": {
+              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginFailureNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/UgiMetrics/LoginFailureAvgTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginFailureAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThreshold": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThreshold",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdCount": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/MemoryManagerNames": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.MemoryManagerNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/PeakUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.PeakUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/Usage": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Usage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdExceeded": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdExceeded",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/UsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.UsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/Name": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/Type": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Type",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/Valid": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Valid",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadAllocatedMemoryEnabled": {
+              "metric": "java.lang:type=Threading.ThreadAllocatedMemoryEnabled",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadAllocatedMemorySupported": {
+              "metric": "java.lang:type=Threading.ThreadAllocatedMemorySupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/DaemonThreadCount": {
+              "metric": "java.lang:type=Threading.DaemonThreadCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/PeakThreadCount": {
+              "metric": "java.lang:type=Threading.PeakThreadCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/CurrentThreadCpuTimeSupported": {
+              "metric": "java.lang:type=Threading.CurrentThreadCpuTimeSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ObjectMonitorUsageSupported": {
+              "metric": "java.lang:type=Threading.ObjectMonitorUsageSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/SynchronizerUsageSupported": {
+              "metric": "java.lang:type=Threading.SynchronizerUsageSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadContentionMonitoringSupported": {
+              "metric": "java.lang:type=Threading.ThreadContentionMonitoringSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadCpuTimeEnabled": {
+              "metric": "java.lang:type=Threading.ThreadCpuTimeEnabled",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/CurrentThreadCpuTime": {
+              "metric": "java.lang:type=Threading.CurrentThreadCpuTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/CurrentThreadUserTime": {
+              "metric": "java.lang:type=Threading.CurrentThreadUserTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadCount": {
+              "metric": "java.lang:type=Threading.ThreadCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/TotalStartedThreadCount": {
+              "metric": "java.lang:type=Threading.TotalStartedThreadCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadCpuTimeSupported": {
+              "metric": "java.lang:type=Threading.ThreadCpuTimeSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadContentionMonitoringEnabled": {
+              "metric": "java.lang:type=Threading.ThreadContentionMonitoringEnabled",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/AllThreadIds": {
+              "metric": "java.lang:type=Threading.AllThreadIds",
+              "pointInTi

<TRUNCATED>

[29/52] bigtop git commit: Making ODPi Ambari stack compatible with ODPi packaging (getting rid of /usr/hdp)

Posted by rv...@apache.org.
Making ODPi Ambari stack compatible with ODPi packaging (getting rid of /usr/hdp)

(cherry picked from commit eb6a89809672ef41ec481176ac7bf732e4082243)


Project: http://git-wip-us.apache.org/repos/asf/bigtop/repo
Commit: http://git-wip-us.apache.org/repos/asf/bigtop/commit/7bd98d5a
Tree: http://git-wip-us.apache.org/repos/asf/bigtop/tree/7bd98d5a
Diff: http://git-wip-us.apache.org/repos/asf/bigtop/diff/7bd98d5a

Branch: refs/heads/master
Commit: 7bd98d5a2a28c3760b0436cb3b7ad825046e8c64
Parents: 4ded443
Author: Roman Shaposhnik <rv...@apache.org>
Authored: Fri Oct 28 10:56:11 2016 -0700
Committer: Roman Shaposhnik <rv...@apache.org>
Committed: Tue Mar 21 23:08:10 2017 -0700

----------------------------------------------------------------------
 .../ODPi/1.0/services/HIVE/package/scripts/params_linux.py    | 7 +++++--
 .../ODPi/1.0/services/HIVE/package/scripts/status_params.py   | 3 ++-
 2 files changed, 7 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/bigtop/blob/7bd98d5a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params_linux.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params_linux.py
index b33d715..9d79e12 100755
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params_linux.py
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/params_linux.py
@@ -195,8 +195,11 @@ purge_tables = 'true'
 
 # this is NOT a typo.  Configs for hcatalog/webhcat point to a
 # specific directory which is NOT called 'conf'
-hcat_conf_dir = format('{stack_root}/current/hive-webhcat/etc/hcatalog')
-config_dir = format('{stack_root}/current/hive-webhcat/etc/webhcat')
+# FIXME: ODPi
+# hcat_conf_dir = format('{stack_root}/current/hive-webhcat/etc/hcatalog')
+# config_dir = format('{stack_root}/current/hive-webhcat/etc/webhcat')
+hcat_conf_dir = format('/etc/hive-hcatalog/conf')
+config_dir = format('/etc/hive-webhcat/conf')
 
 hive_metastore_site_supported = True
 

http://git-wip-us.apache.org/repos/asf/bigtop/blob/7bd98d5a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/status_params.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/status_params.py
index 024f3df..a7b2e3f 100755
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/status_params.py
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/status_params.py
@@ -108,7 +108,8 @@ else:
   if check_stack_feature(StackFeature.HIVE_WEBHCAT_SPECIFIC_CONFIGS, stack_version_formatted_major):
     # this is NOT a typo. Configs for hcatalog/webhcat point to a
     # specific directory which is NOT called 'conf'
-    webhcat_conf_dir = format("{stack_root}/current/hive-webhcat/etc/webhcat")
+    #  FIXME ODPi: webhcat_conf_dir = format("{stack_root}/current/hive-webhcat/etc/webhcat")
+    webhcat_conf_dir = format("/etc/hive-webhcat/conf")
 
   # if stack version supports hive serve interactive
   if check_stack_feature(StackFeature.HIVE_SERVER_INTERACTIVE, stack_version_formatted_major):


[17/52] bigtop git commit: ODPI-193. Add Hive 1.2 to ODPi ambari reference implementation

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/metainfo.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/metainfo.xml
new file mode 100755
index 0000000..d6ecbed
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/metainfo.xml
@@ -0,0 +1,518 @@
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services><service>
+    <name>HIVE</name>
+    <displayName>Hive</displayName>
+    <version>1.2.1</version>
+    <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
+    <components>
+        <component>
+            <name>HCAT</name>
+            <displayName>HCat Client</displayName>
+            <category>CLIENT</category>
+            <deleted>false</deleted>
+            <cardinality>0+</cardinality>
+            <versionAdvertised>true</versionAdvertised>
+            <versionAdvertisedInternal>false</versionAdvertisedInternal>
+            <commandScript>
+                <script>scripts/hcat_client.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>0</timeout>
+            </commandScript>
+            <configFiles>
+                <configFile>
+                    <type>env</type>
+                    <fileName>hcat-env.sh</fileName>
+                    <dictionaryName>hcat-env</dictionaryName>
+                </configFile>
+            </configFiles>
+            <configuration-dependencies>
+                <config-type>hive-site</config-type>
+            </configuration-dependencies>
+            <recovery_enabled>false</recovery_enabled>
+        </component>
+        <component>
+            <name>HIVE_SERVER</name>
+            <displayName>HiveServer2</displayName>
+            <category>MASTER</category>
+            <deleted>false</deleted>
+            <cardinality>1+</cardinality>
+            <versionAdvertisedInternal>true</versionAdvertisedInternal>
+            <commandScript>
+                <script>scripts/hive_server.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>0</timeout>
+            </commandScript>
+            <logs>
+                <log>
+                    <logId>hive_hiveserver2</logId>
+                    <primary>true</primary>
+                </log>
+            </logs>
+            <dependencies>
+                <dependency>
+                    <auto-deploy>
+                        <enabled>true</enabled>
+                    </auto-deploy>
+                    <autoDeploy>
+                        <enabled>true</enabled>
+                    </autoDeploy>
+                    <name>TEZ/TEZ_CLIENT</name>
+                    <scope>host</scope>
+                </dependency>
+                <dependency>
+                    <auto-deploy>
+                        <co-locate>HIVE/HIVE_SERVER</co-locate>
+                        <coLocate>HIVE/HIVE_SERVER</coLocate>
+                        <enabled>true</enabled>
+                    </auto-deploy>
+                    <autoDeploy>
+                        <co-locate>HIVE/HIVE_SERVER</co-locate>
+                        <coLocate>HIVE/HIVE_SERVER</coLocate>
+                        <enabled>true</enabled>
+                    </autoDeploy>
+                    <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+                    <scope>cluster</scope>
+                </dependency>
+                <dependency>
+                    <auto-deploy>
+                        <enabled>true</enabled>
+                    </auto-deploy>
+                    <autoDeploy>
+                        <enabled>true</enabled>
+                    </autoDeploy>
+                    <name>YARN/YARN_CLIENT</name>
+                    <scope>host</scope>
+                </dependency>
+                <dependency>
+                    <auto-deploy>
+                        <enabled>true</enabled>
+                    </auto-deploy>
+                    <autoDeploy>
+                        <enabled>true</enabled>
+                    </autoDeploy>
+                    <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+                    <scope>host</scope>
+                </dependency>
+            </dependencies>
+            <configuration-dependencies>
+                <config-type>hiveserver2-site</config-type>
+                <config-type>hive-site</config-type>
+            </configuration-dependencies>
+            <recovery_enabled>false</recovery_enabled>
+            <reassignAllowed>true</reassignAllowed>
+        </component>
+        <component>
+            <name>HIVE_CLIENT</name>
+            <displayName>Hive Client</displayName>
+            <category>CLIENT</category>
+            <deleted>false</deleted>
+            <cardinality>1+</cardinality>
+            <versionAdvertised>true</versionAdvertised>
+            <versionAdvertisedInternal>false</versionAdvertisedInternal>
+            <commandScript>
+                <script>scripts/hive_client.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>0</timeout>
+            </commandScript>
+            <configFiles>
+                <configFile>
+                    <type>xml</type>
+                    <fileName>hive-site.xml</fileName>
+                    <dictionaryName>hive-site</dictionaryName>
+                </configFile>
+                <configFile>
+                    <type>env</type>
+                    <fileName>hive-env.sh</fileName>
+                    <dictionaryName>hive-env</dictionaryName>
+                </configFile>
+                <configFile>
+                    <type>env</type>
+                    <fileName>hive-log4j.properties</fileName>
+                    <dictionaryName>hive-log4j</dictionaryName>
+                </configFile>
+                <configFile>
+                    <type>env</type>
+                    <fileName>hive-exec-log4j.properties</fileName>
+                    <dictionaryName>hive-exec-log4j</dictionaryName>
+                </configFile>
+            </configFiles>
+            <configuration-dependencies>
+                <config-type>hive-site</config-type>
+            </configuration-dependencies>
+            <recovery_enabled>false</recovery_enabled>
+        </component>
+        <component>
+            <name>WEBHCAT_SERVER</name>
+            <displayName>WebHCat Server</displayName>
+            <category>MASTER</category>
+            <deleted>false</deleted>
+            <cardinality>1+</cardinality>
+            <versionAdvertisedInternal>true</versionAdvertisedInternal>
+            <commandScript>
+                <script>scripts/webhcat_server.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>1200</timeout>
+            </commandScript>
+            <clientsToUpdateConfigs>
+                <client>HCAT</client>
+            </clientsToUpdateConfigs>
+            <dependencies>
+                <dependency>
+                    <auto-deploy>
+                        <enabled>true</enabled>
+                    </auto-deploy>
+                    <autoDeploy>
+                        <enabled>true</enabled>
+                    </autoDeploy>
+                    <name>HDFS/HDFS_CLIENT</name>
+                    <scope>host</scope>
+                </dependency>
+                <dependency>
+                    <auto-deploy>
+                        <enabled>true</enabled>
+                    </auto-deploy>
+                    <autoDeploy>
+                        <enabled>true</enabled>
+                    </autoDeploy>
+                    <name>HIVE/HIVE_CLIENT</name>
+                    <scope>host</scope>
+                </dependency>
+                <dependency>
+                    <auto-deploy>
+                        <enabled>true</enabled>
+                    </auto-deploy>
+                    <autoDeploy>
+                        <enabled>true</enabled>
+                    </autoDeploy>
+                    <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+                    <scope>host</scope>
+                </dependency>
+                <dependency>
+                    <auto-deploy>
+                        <co-locate>HIVE/WEBHCAT_SERVER</co-locate>
+                        <coLocate>HIVE/WEBHCAT_SERVER</coLocate>
+                        <enabled>true</enabled>
+                    </auto-deploy>
+                    <autoDeploy>
+                        <co-locate>HIVE/WEBHCAT_SERVER</co-locate>
+                        <coLocate>HIVE/WEBHCAT_SERVER</coLocate>
+                        <enabled>true</enabled>
+                    </autoDeploy>
+                    <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+                    <scope>cluster</scope>
+                </dependency>
+                <dependency>
+                    <auto-deploy>
+                        <enabled>true</enabled>
+                    </auto-deploy>
+                    <autoDeploy>
+                        <enabled>true</enabled>
+                    </autoDeploy>
+                    <name>ZOOKEEPER/ZOOKEEPER_CLIENT</name>
+                    <scope>host</scope>
+                </dependency>
+                <dependency>
+                    <auto-deploy>
+                        <enabled>true</enabled>
+                    </auto-deploy>
+                    <autoDeploy>
+                        <enabled>true</enabled>
+                    </autoDeploy>
+                    <name>YARN/YARN_CLIENT</name>
+                    <scope>host</scope>
+                </dependency>
+                <dependency>
+                    <auto-deploy>
+                        <enabled>true</enabled>
+                    </auto-deploy>
+                    <autoDeploy>
+                        <enabled>true</enabled>
+                    </autoDeploy>
+                    <name>PIG/PIG</name>
+                    <scope>host</scope>
+                </dependency>
+            </dependencies>
+            <configuration-dependencies>
+                <config-type>hive-site</config-type>
+            </configuration-dependencies>
+            <recovery_enabled>false</recovery_enabled>
+            <reassignAllowed>true</reassignAllowed>
+        </component>
+        <component>
+            <name>MYSQL_SERVER</name>
+            <displayName>MySQL Server</displayName>
+            <category>MASTER</category>
+            <deleted>false</deleted>
+            <cardinality>0-1</cardinality>
+            <versionAdvertised>false</versionAdvertised>
+            <versionAdvertisedInternal>false</versionAdvertisedInternal>
+            <commandScript>
+                <script>scripts/mysql_server.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>0</timeout>
+            </commandScript>
+            <customCommands>
+                <customCommand>
+                    <name>CLEAN</name>
+                    <commandScript>
+                        <script>scripts/mysql_server.py</script>
+                        <scriptType>PYTHON</scriptType>
+                        <timeout>600</timeout>
+                    </commandScript>
+                    <background>false</background>
+                </customCommand>
+            </customCommands>
+            <recovery_enabled>false</recovery_enabled>
+            <reassignAllowed>true</reassignAllowed>
+        </component>
+        <component>
+            <name>HIVE_METASTORE</name>
+            <displayName>Hive Metastore</displayName>
+            <category>MASTER</category>
+            <deleted>false</deleted>
+            <cardinality>1+</cardinality>
+            <versionAdvertisedInternal>true</versionAdvertisedInternal>
+            <commandScript>
+                <script>scripts/hive_metastore.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>1200</timeout>
+            </commandScript>
+            <logs>
+                <log>
+                    <logId>hive_metastore</logId>
+                    <primary>true</primary>
+                </log>
+            </logs>
+            <configuration-dependencies>
+                <config-type>hive-site</config-type>
+            </configuration-dependencies>
+            <auto-deploy>
+                <co-locate>HIVE/HIVE_SERVER</co-locate>
+                <coLocate>HIVE/HIVE_SERVER</coLocate>
+                <enabled>true</enabled>
+            </auto-deploy>
+            <recovery_enabled>false</recovery_enabled>
+            <reassignAllowed>true</reassignAllowed>
+        </component>
+        <component>
+            <name>HIVE_SERVER_INTERACTIVE</name>
+            <displayName>HiveServer2 Interactive</displayName>
+            <category>MASTER</category>
+            <deleted>false</deleted>
+            <cardinality>0-1</cardinality>
+            <versionAdvertised>true</versionAdvertised>
+            <versionAdvertisedInternal>false</versionAdvertisedInternal>
+            <commandScript>
+                <script>scripts/hive_server_interactive.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>0</timeout>
+            </commandScript>
+            <customCommands>
+                <customCommand>
+                    <name>RESTART_LLAP</name>
+                    <commandScript>
+                        <script>scripts/hive_server_interactive.py</script>
+                        <scriptType>PYTHON</scriptType>
+                        <timeout>600</timeout>
+                    </commandScript>
+                    <background>false</background>
+                </customCommand>
+            </customCommands>
+            <dependencies>
+                <dependency>
+                    <auto-deploy>
+                        <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
+                        <coLocate>HIVE/HIVE_SERVER_INTERACTIVE</coLocate>
+                        <enabled>true</enabled>
+                    </auto-deploy>
+                    <autoDeploy>
+                        <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
+                        <coLocate>HIVE/HIVE_SERVER_INTERACTIVE</coLocate>
+                        <enabled>true</enabled>
+                    </autoDeploy>
+                    <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+                    <scope>cluster</scope>
+                </dependency>
+                <dependency>
+                    <auto-deploy>
+                        <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
+                        <coLocate>HIVE/HIVE_SERVER_INTERACTIVE</coLocate>
+                        <enabled>true</enabled>
+                    </auto-deploy>
+                    <autoDeploy>
+                        <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
+                        <coLocate>HIVE/HIVE_SERVER_INTERACTIVE</coLocate>
+                        <enabled>true</enabled>
+                    </autoDeploy>
+                    <name>YARN/YARN_CLIENT</name>
+                    <scope>host</scope>
+                </dependency>
+                <dependency>
+                    <auto-deploy>
+                        <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
+                        <coLocate>HIVE/HIVE_SERVER_INTERACTIVE</coLocate>
+                        <enabled>true</enabled>
+                    </auto-deploy>
+                    <autoDeploy>
+                        <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
+                        <coLocate>HIVE/HIVE_SERVER_INTERACTIVE</coLocate>
+                        <enabled>true</enabled>
+                    </autoDeploy>
+                    <name>HDFS/HDFS_CLIENT</name>
+                    <scope>host</scope>
+                </dependency>
+                <dependency>
+                    <auto-deploy>
+                        <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
+                        <coLocate>HIVE/HIVE_SERVER_INTERACTIVE</coLocate>
+                        <enabled>true</enabled>
+                    </auto-deploy>
+                    <autoDeploy>
+                        <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
+                        <coLocate>HIVE/HIVE_SERVER_INTERACTIVE</coLocate>
+                        <enabled>true</enabled>
+                    </autoDeploy>
+                    <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+                    <scope>host</scope>
+                </dependency>
+                <dependency>
+                    <auto-deploy>
+                        <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
+                        <coLocate>HIVE/HIVE_SERVER_INTERACTIVE</coLocate>
+                        <enabled>true</enabled>
+                    </auto-deploy>
+                    <autoDeploy>
+                        <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
+                        <coLocate>HIVE/HIVE_SERVER_INTERACTIVE</coLocate>
+                        <enabled>true</enabled>
+                    </autoDeploy>
+                    <name>TEZ/TEZ_CLIENT</name>
+                    <scope>host</scope>
+                </dependency>
+                <dependency>
+                    <auto-deploy>
+                        <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
+                        <coLocate>HIVE/HIVE_SERVER_INTERACTIVE</coLocate>
+                        <enabled>true</enabled>
+                    </auto-deploy>
+                    <autoDeploy>
+                        <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
+                        <coLocate>HIVE/HIVE_SERVER_INTERACTIVE</coLocate>
+                        <enabled>true</enabled>
+                    </autoDeploy>
+                    <name>PIG/PIG</name>
+                    <scope>host</scope>
+                </dependency>
+                <dependency>
+                    <auto-deploy>
+                        <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
+                        <coLocate>HIVE/HIVE_SERVER_INTERACTIVE</coLocate>
+                        <enabled>true</enabled>
+                    </auto-deploy>
+                    <autoDeploy>
+                        <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
+                        <coLocate>HIVE/HIVE_SERVER_INTERACTIVE</coLocate>
+                        <enabled>true</enabled>
+                    </autoDeploy>
+                    <name>SLIDER/SLIDER</name>
+                    <scope>host</scope>
+                </dependency>
+            </dependencies>
+            <configuration-dependencies>
+                <config-type>beeline-log4j2</config-type>
+                <config-type>hive-exec-log4j2</config-type>
+                <config-type>hive-log4j2</config-type>
+                <config-type>hive-site</config-type>
+                <config-type>hive-interactive-site</config-type>
+                <config-type>tez-interactive-site</config-type>
+                <config-type>hiveserver2-interactive-site</config-type>
+                <config-type>hive-interactive-env</config-type>
+                <config-type>llap-cli-log4j2</config-type>
+                <config-type>llap-daemon-log4j</config-type>
+            </configuration-dependencies>
+            <recovery_enabled>false</recovery_enabled>
+        </component>
+    </components>
+    <deleted>false</deleted>
+    <configuration-dependencies>
+        <config-type>application-properties</config-type>
+        <config-type>hive-atlas-application.properties</config-type>
+        <config-type>hive-log4j</config-type>
+        <config-type>hive-exec-log4j</config-type>
+        <config-type>tez-site</config-type>
+        <config-type>hive-env</config-type>
+        <config-type>hivemetastore-site.xml</config-type>
+        <config-type>webhcat-site</config-type>
+        <config-type>webhcat-env</config-type>
+        <config-type>ranger-hive-plugin-properties</config-type>
+        <config-type>ranger-hive-audit</config-type>
+        <config-type>ranger-hive-policymgr-ssl</config-type>
+        <config-type>ranger-hive-security</config-type>
+        <config-type>mapred-site</config-type>
+        <config-type>application.properties</config-type>
+    </configuration-dependencies>
+    <widgetsFileName>widgets.json</widgetsFileName>
+    <metricsFileName>metrics.json</metricsFileName>
+    <osSpecifics>
+        <osSpecific>
+            <osFamily>any</osFamily>
+            <packages>
+                <package>
+                    <name>mysql-connector-java</name>
+                    <condition>should_install_mysl_connector</condition>
+                    <skipUpgrade>true</skipUpgrade>
+                </package>
+            </packages>
+        </osSpecific>
+        <osSpecific>
+            <osFamily>redhat7,amazon2015,redhat6,suse11,suse12</osFamily>
+            <packages>
+                <package>
+                    <name>hive</name>
+                    <skipUpgrade>false</skipUpgrade>
+                </package>
+                <package>
+                    <name>hive-hcatalog</name>
+                    <skipUpgrade>false</skipUpgrade>
+                </package>
+                <package>
+                    <name>hive-webhcat</name>
+                    <skipUpgrade>false</skipUpgrade>
+                </package>
+            </packages>
+        </osSpecific>
+        <osSpecific>
+            <osFamily>amazon2015,redhat6,suse11,suse12</osFamily>
+            <packages>
+                <package>
+                    <name>mysql</name>
+                    <condition>should_install_mysql</condition>
+                    <skipUpgrade>true</skipUpgrade>
+                </package>
+            </packages>
+        </osSpecific>
+        <osSpecific>
+            <osFamily>amazon2015,redhat6,debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
+            <packages>
+                <package>
+                    <name>mysql-server</name>
+                    <condition>should_install_mysql</condition>
+                    <skipUpgrade>true</skipUpgrade>
+                </package>
+            </packages>
+        </osSpecific>
+    </osSpecifics>
+    <configuration-dir>configuration</configuration-dir>
+    <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+    </commandScript>
+    <requiredServices>
+        <service>ZOOKEEPER</service>
+        <service>HDFS</service>
+        <service>YARN</service>
+    </requiredServices>
+</service></services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_hive_interactive_thrift_port.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_hive_interactive_thrift_port.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_hive_interactive_thrift_port.py
new file mode 100755
index 0000000..6917160
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_hive_interactive_thrift_port.py
@@ -0,0 +1,216 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import os
+import socket
+import time
+import logging
+import traceback
+from resource_management.libraries.functions import hive_check
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import get_kinit_path
+from ambari_commons.os_check import OSConst
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+OK_MESSAGE = "TCP OK - {0:.3f}s response on port {1}"
+CRITICAL_MESSAGE = "Connection failed on host {0}:{1} ({2})"
+
+HIVE_SERVER_INTERACTIVE_THRIFT_PORT_KEY = '{{hive-interactive-site/hive.server2.thrift.port}}'
+HIVE_SERVER_INTERACTIVE_THRIFT_HTTP_PORT_KEY = '{{hive-interactive-site/hive.server2.thrift.http.port}}'
+HIVE_SERVER_INTERACTIVE_TRANSPORT_MODE_KEY = '{{hive-site/hive.server2.transport.mode}}'
+SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
+HIVE_SERVER2_INTERACTIVE_AUTHENTICATION_KEY = '{{hive-interactive-site/hive.server2.authentication}}'
+HIVE_SERVER2_AUTHENTICATION_KEY = '{{hive-site/hive.server2.authentication}}'
+HIVE_SERVER_INTERACTIVE_PRINCIPAL_KEY = '{{hive-site/hive.server2.authentication.kerberos.principal}}'
+SMOKEUSER_KEYTAB_KEY = '{{cluster-env/smokeuser_keytab}}'
+SMOKEUSER_PRINCIPAL_KEY = '{{cluster-env/smokeuser_principal_name}}'
+SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
+HIVE_SSL = '{{hive-site/hive.server2.use.SSL}}'
+HIVE_SSL_KEYSTORE_PATH = '{{hive-site/hive.server2.keystore.path}}'
+HIVE_SSL_KEYSTORE_PASSWORD = '{{hive-site/hive.server2.keystore.password}}'
+
+# The configured Kerberos executable search paths, if any
+KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
+
+THRIFT_PORT_DEFAULT = 10500
+HIVE_SERVER_INTERACTIVE_TRANSPORT_MODE_DEFAULT = 'binary'
+HIVE_SERVER_INTERACTIVE_PRINCIPAL_DEFAULT = 'hive/_HOST@EXAMPLE.COM'
+HIVE_SERVER2_INTERACTIVE_AUTHENTICATION_DEFAULT = 'NOSASL'
+
+# default keytab location
+SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY = 'default.smoke.keytab'
+SMOKEUSER_KEYTAB_DEFAULT = '/etc/security/keytabs/smokeuser.headless.keytab'
+
+# default smoke principal
+SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY = 'default.smoke.principal'
+SMOKEUSER_PRINCIPAL_DEFAULT = 'ambari-qa@EXAMPLE.COM'
+
+# default smoke user
+SMOKEUSER_SCRIPT_PARAM_KEY = 'default.smoke.user'
+SMOKEUSER_DEFAULT = 'ambari-qa'
+
+HADOOPUSER_KEY = '{{cluster-env/hadoop.user.name}}'
+HADOOPUSER_DEFAULT = 'hadoop'
+
+CHECK_COMMAND_TIMEOUT_KEY = 'check.command.timeout'
+CHECK_COMMAND_TIMEOUT_DEFAULT = 60.0
+
+logger = logging.getLogger('ambari_alerts')
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (HIVE_SERVER_INTERACTIVE_THRIFT_PORT_KEY, SECURITY_ENABLED_KEY, SMOKEUSER_KEY,
+          HIVE_SERVER2_INTERACTIVE_AUTHENTICATION_KEY, HIVE_SERVER2_AUTHENTICATION_KEY,
+          HIVE_SERVER_INTERACTIVE_PRINCIPAL_KEY, SMOKEUSER_KEYTAB_KEY, SMOKEUSER_PRINCIPAL_KEY,
+          HIVE_SERVER_INTERACTIVE_THRIFT_HTTP_PORT_KEY, HIVE_SERVER_INTERACTIVE_TRANSPORT_MODE_KEY,
+          KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY, HIVE_SSL, HIVE_SSL_KEYSTORE_PATH, HIVE_SSL_KEYSTORE_PASSWORD)
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def get_tokens():
+  pass
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  if configurations is None:
+    return ('UNKNOWN', ['There were no configurations supplied to the script.'])
+
+  transport_mode = HIVE_SERVER_INTERACTIVE_TRANSPORT_MODE_DEFAULT
+  if HIVE_SERVER_INTERACTIVE_TRANSPORT_MODE_KEY in configurations:
+    transport_mode = configurations[HIVE_SERVER_INTERACTIVE_TRANSPORT_MODE_KEY]
+
+  port = THRIFT_PORT_DEFAULT
+  if transport_mode.lower() == 'binary' and HIVE_SERVER_INTERACTIVE_THRIFT_PORT_KEY in configurations:
+    port = int(configurations[HIVE_SERVER_INTERACTIVE_THRIFT_PORT_KEY])
+  elif transport_mode.lower() == 'http' and HIVE_SERVER_INTERACTIVE_THRIFT_HTTP_PORT_KEY in configurations:
+    port = int(configurations[HIVE_SERVER_INTERACTIVE_THRIFT_HTTP_PORT_KEY])
+
+  security_enabled = False
+  if SECURITY_ENABLED_KEY in configurations:
+    security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
+
+  check_command_timeout = CHECK_COMMAND_TIMEOUT_DEFAULT
+  if CHECK_COMMAND_TIMEOUT_KEY in parameters:
+    check_command_timeout = float(parameters[CHECK_COMMAND_TIMEOUT_KEY])
+
+  hive_server2_authentication = HIVE_SERVER2_INTERACTIVE_AUTHENTICATION_DEFAULT
+  if HIVE_SERVER2_INTERACTIVE_AUTHENTICATION_KEY in configurations:
+    hive_server2_authentication = configurations[HIVE_SERVER2_INTERACTIVE_AUTHENTICATION_KEY]
+  elif HIVE_SERVER2_AUTHENTICATION_KEY in configurations:
+    hive_server2_authentication = configurations[HIVE_SERVER2_AUTHENTICATION_KEY]
+
+  hive_ssl = False
+  if HIVE_SSL in configurations:
+    hive_ssl = configurations[HIVE_SSL]
+
+  hive_ssl_keystore_path = None
+  if HIVE_SSL_KEYSTORE_PATH in configurations:
+    hive_ssl_keystore_path = configurations[HIVE_SSL_KEYSTORE_PATH]
+
+  hive_ssl_keystore_password = None
+  if HIVE_SSL_KEYSTORE_PASSWORD in configurations:
+    hive_ssl_keystore_password = configurations[HIVE_SSL_KEYSTORE_PASSWORD]
+
+  # defaults
+  smokeuser_keytab = SMOKEUSER_KEYTAB_DEFAULT
+  smokeuser_principal = SMOKEUSER_PRINCIPAL_DEFAULT
+  smokeuser = SMOKEUSER_DEFAULT
+
+  # check script params
+  if SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY in parameters:
+    smokeuser_principal = parameters[SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY]
+
+  if SMOKEUSER_SCRIPT_PARAM_KEY in parameters:
+    smokeuser = parameters[SMOKEUSER_SCRIPT_PARAM_KEY]
+
+  if SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY in parameters:
+    smokeuser_keytab = parameters[SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY]
+
+
+  # check configurations last as they should always take precedence
+  if SMOKEUSER_PRINCIPAL_KEY in configurations:
+    smokeuser_principal = configurations[SMOKEUSER_PRINCIPAL_KEY]
+
+  if SMOKEUSER_KEY in configurations:
+    smokeuser = configurations[SMOKEUSER_KEY]
+
+  result_code = None
+
+  if security_enabled:
+    hive_server_principal = HIVE_SERVER_INTERACTIVE_PRINCIPAL_DEFAULT
+    if HIVE_SERVER_INTERACTIVE_PRINCIPAL_KEY in configurations:
+      hive_server_principal = configurations[HIVE_SERVER_INTERACTIVE_PRINCIPAL_KEY]
+
+    if SMOKEUSER_KEYTAB_KEY in configurations:
+      smokeuser_keytab = configurations[SMOKEUSER_KEYTAB_KEY]
+
+    # Get the configured Kerberos executable search paths, if any
+    if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
+      kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
+    else:
+      kerberos_executable_search_paths = None
+
+    kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
+    kinitcmd=format("{kinit_path_local} -kt {smokeuser_keytab} {smokeuser_principal}; ")
+  else:
+    hive_server_principal = None
+    kinitcmd=None
+
+  try:
+    if host_name is None:
+      host_name = socket.getfqdn()
+
+    start_time = time.time()
+
+    try:
+      hive_check.check_thrift_port_sasl(host_name, port, hive_server2_authentication, hive_server_principal,
+                                        kinitcmd, smokeuser, transport_mode=transport_mode, ssl=hive_ssl,
+                                        ssl_keystore=hive_ssl_keystore_path, ssl_password=hive_ssl_keystore_password,
+                                        check_command_timeout=int(check_command_timeout))
+      result_code = 'OK'
+      total_time = time.time() - start_time
+      label = OK_MESSAGE.format(total_time, port)
+    except:
+      result_code = 'CRITICAL'
+      label = CRITICAL_MESSAGE.format(host_name, port, traceback.format_exc())
+
+  except:
+    label = traceback.format_exc()
+    result_code = 'UNKNOWN'
+
+  return (result_code, [label])
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def execute(configurations={}, parameters={}, host_name=None):
+  pass
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_hive_metastore.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_hive_metastore.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_hive_metastore.py
new file mode 100755
index 0000000..e02ed5a
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_hive_metastore.py
@@ -0,0 +1,270 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import os
+import socket
+import time
+import traceback
+import logging
+
+from resource_management.core import global_lock
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.core.resources import Execute
+from ambari_commons.os_check import OSConst
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+
+OK_MESSAGE = "Metastore OK - Hive command took {0:.3f}s"
+CRITICAL_MESSAGE = "Metastore on {0} failed ({1})"
+SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
+SMOKEUSER_KEYTAB_KEY = '{{cluster-env/smokeuser_keytab}}'
+SMOKEUSER_PRINCIPAL_KEY = '{{cluster-env/smokeuser_principal_name}}'
+SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
+HIVE_METASTORE_URIS_KEY = '{{hive-site/hive.metastore.uris}}'
+
+# The configured Kerberos executable search paths, if any
+KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
+
+# default keytab location
+SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY = 'default.smoke.keytab'
+SMOKEUSER_KEYTAB_DEFAULT = '/etc/security/keytabs/smokeuser.headless.keytab'
+
+# default smoke principal
+SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY = 'default.smoke.principal'
+SMOKEUSER_PRINCIPAL_DEFAULT = 'ambari-qa@EXAMPLE.COM'
+
+# default smoke user
+SMOKEUSER_SCRIPT_PARAM_KEY = 'default.smoke.user'
+SMOKEUSER_DEFAULT = 'ambari-qa'
+
+STACK_ROOT = '{{cluster-env/stack_root}}'
+
+HIVE_CONF_DIR_LEGACY = '/etc/hive/conf.server'
+
+HIVE_BIN_DIR_LEGACY = '/usr/lib/hive/bin'
+
+CHECK_COMMAND_TIMEOUT_KEY = 'check.command.timeout'
+CHECK_COMMAND_TIMEOUT_DEFAULT = 60.0
+
+HADOOPUSER_KEY = '{{cluster-env/hadoop.user.name}}'
+HADOOPUSER_DEFAULT = 'hadoop'
+
+logger = logging.getLogger('ambari_alerts')
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (SECURITY_ENABLED_KEY,SMOKEUSER_KEYTAB_KEY,SMOKEUSER_PRINCIPAL_KEY,
+    HIVE_METASTORE_URIS_KEY, SMOKEUSER_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY,
+    STACK_ROOT)
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (HIVE_METASTORE_URIS_KEY, HADOOPUSER_KEY)
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  if configurations is None:
+    return (('UNKNOWN', ['There were no configurations supplied to the script.']))
+
+  if not HIVE_METASTORE_URIS_KEY in configurations:
+    return (('UNKNOWN', ['Hive metastore uris were not supplied to the script.']))
+
+  metastore_uris = configurations[HIVE_METASTORE_URIS_KEY].split(',')
+
+  security_enabled = False
+  if SECURITY_ENABLED_KEY in configurations:
+    security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
+
+  check_command_timeout = CHECK_COMMAND_TIMEOUT_DEFAULT
+  if CHECK_COMMAND_TIMEOUT_KEY in parameters:
+    check_command_timeout = float(parameters[CHECK_COMMAND_TIMEOUT_KEY])
+
+  # defaults
+  smokeuser_keytab = SMOKEUSER_KEYTAB_DEFAULT
+  smokeuser_principal = SMOKEUSER_PRINCIPAL_DEFAULT
+  smokeuser = SMOKEUSER_DEFAULT
+
+  # check script params
+  if SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY in parameters:
+    smokeuser_principal = parameters[SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY]
+
+  if SMOKEUSER_SCRIPT_PARAM_KEY in parameters:
+    smokeuser = parameters[SMOKEUSER_SCRIPT_PARAM_KEY]
+
+  if SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY in parameters:
+    smokeuser_keytab = parameters[SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY]
+
+
+  # check configurations last as they should always take precedence
+  if SMOKEUSER_PRINCIPAL_KEY in configurations:
+    smokeuser_principal = configurations[SMOKEUSER_PRINCIPAL_KEY]
+
+  if SMOKEUSER_KEY in configurations:
+    smokeuser = configurations[SMOKEUSER_KEY]
+
+  result_code = None
+
+  try:
+    if security_enabled:
+      if SMOKEUSER_KEYTAB_KEY in configurations:
+        smokeuser_keytab = configurations[SMOKEUSER_KEYTAB_KEY]
+
+      # Get the configured Kerberos executable search paths, if any
+      if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
+        kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
+      else:
+        kerberos_executable_search_paths = None
+
+      kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
+      kinitcmd=format("{kinit_path_local} -kt {smokeuser_keytab} {smokeuser_principal}; ")
+
+      # prevent concurrent kinit
+      kinit_lock = global_lock.get_lock(global_lock.LOCK_TYPE_KERBEROS)
+      kinit_lock.acquire()
+      try:
+        Execute(kinitcmd, user=smokeuser,
+          path=["/bin/", "/usr/bin/", "/usr/lib/hive/bin/", "/usr/sbin/"],
+          timeout=10)
+      finally:
+        kinit_lock.release()
+
+    if host_name is None:
+      host_name = socket.getfqdn()
+
+    for uri in metastore_uris:
+      if host_name in uri:
+        metastore_uri = uri
+
+    conf_dir = HIVE_CONF_DIR_LEGACY
+    bin_dir = HIVE_BIN_DIR_LEGACY
+
+
+    if STACK_ROOT in configurations:
+      hive_conf_dir = configurations[STACK_ROOT] + format("/current/hive-metastore/conf/conf.server")
+      hive_bin_dir = configurations[STACK_ROOT] + format("/current/hive-metastore/bin")
+
+      if os.path.exists(hive_conf_dir):
+        conf_dir = hive_conf_dir
+        bin_dir = hive_bin_dir
+
+    cmd = format("export HIVE_CONF_DIR='{conf_dir}' ; "
+                 "hive --hiveconf hive.metastore.uris={metastore_uri}\
+                 --hiveconf hive.metastore.client.connect.retry.delay=1\
+                 --hiveconf hive.metastore.failure.retries=1\
+                 --hiveconf hive.metastore.connect.retries=1\
+                 --hiveconf hive.metastore.client.socket.timeout=14\
+                 --hiveconf hive.execution.engine=mr -e 'show databases;'")
+
+    start_time = time.time()
+
+    try:
+      Execute(cmd, user=smokeuser,
+        path=["/bin/", "/usr/bin/", "/usr/sbin/", bin_dir],
+        timeout=int(check_command_timeout) )
+
+      total_time = time.time() - start_time
+
+      result_code = 'OK'
+      label = OK_MESSAGE.format(total_time)
+    except:
+      result_code = 'CRITICAL'
+      label = CRITICAL_MESSAGE.format(host_name, traceback.format_exc())
+
+  except:
+    label = traceback.format_exc()
+    result_code = 'UNKNOWN'
+
+  return ((result_code, [label]))
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  from resource_management.libraries.functions import reload_windows_env
+  reload_windows_env()
+  hive_home = os.environ['HIVE_HOME']
+
+  if configurations is None:
+    return (('UNKNOWN', ['There were no configurations supplied to the script.']))
+  if not HIVE_METASTORE_URIS_KEY in configurations:
+    return (('UNKNOWN', ['Hive metastore uris were not supplied to the script.']))
+
+  metastore_uris = configurations[HIVE_METASTORE_URIS_KEY].split(',')
+
+  # defaults
+  hiveuser = HADOOPUSER_DEFAULT
+
+  if HADOOPUSER_KEY in configurations:
+    hiveuser = configurations[HADOOPUSER_KEY]
+
+  result_code = None
+  try:
+    if host_name is None:
+      host_name = socket.getfqdn()
+    for uri in metastore_uris:
+      if host_name in uri:
+        metastore_uri = uri
+
+    hive_cmd = os.path.join(hive_home, "bin", "hive.cmd")
+    cmd = format("cmd /c {hive_cmd} --hiveconf hive.metastore.uris={metastore_uri}\
+                 --hiveconf hive.metastore.client.connect.retry.delay=1\
+                 --hiveconf hive.metastore.failure.retries=1\
+                 --hiveconf hive.metastore.connect.retries=1\
+                 --hiveconf hive.metastore.client.socket.timeout=14\
+                 --hiveconf hive.execution.engine=mr -e 'show databases;'")
+    start_time = time.time()
+    try:
+      Execute(cmd, user=hiveuser, timeout=30)
+      total_time = time.time() - start_time
+      result_code = 'OK'
+      label = OK_MESSAGE.format(total_time)
+    except:
+      result_code = 'CRITICAL'
+      label = CRITICAL_MESSAGE.format(host_name, traceback.format_exc())
+  except:
+    label = traceback.format_exc()
+    result_code = 'UNKNOWN'
+
+  return ((result_code, [label]))

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_hive_thrift_port.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_hive_thrift_port.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_hive_thrift_port.py
new file mode 100755
index 0000000..32da1cc
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_hive_thrift_port.py
@@ -0,0 +1,274 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import os
+import socket
+import time
+import logging
+import traceback
+from resource_management.libraries.functions import hive_check
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import get_kinit_path
+from ambari_commons.os_check import OSConst
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+OK_MESSAGE = "TCP OK - {0:.3f}s response on port {1}"
+CRITICAL_MESSAGE = "Connection failed on host {0}:{1} ({2})"
+
+HIVE_SERVER_THRIFT_PORT_KEY = '{{hive-site/hive.server2.thrift.port}}'
+HIVE_SERVER_THRIFT_HTTP_PORT_KEY = '{{hive-site/hive.server2.thrift.http.port}}'
+HIVE_SERVER_TRANSPORT_MODE_KEY = '{{hive-site/hive.server2.transport.mode}}'
+SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
+HIVE_SERVER2_AUTHENTICATION_KEY = '{{hive-site/hive.server2.authentication}}'
+HIVE_SERVER_PRINCIPAL_KEY = '{{hive-site/hive.server2.authentication.kerberos.principal}}'
+SMOKEUSER_KEYTAB_KEY = '{{cluster-env/smokeuser_keytab}}'
+SMOKEUSER_PRINCIPAL_KEY = '{{cluster-env/smokeuser_principal_name}}'
+SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
+HIVE_SSL = '{{hive-site/hive.server2.use.SSL}}'
+HIVE_SSL_KEYSTORE_PATH = '{{hive-site/hive.server2.keystore.path}}'
+HIVE_SSL_KEYSTORE_PASSWORD = '{{hive-site/hive.server2.keystore.password}}'
+
+# The configured Kerberos executable search paths, if any
+KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
+
+THRIFT_PORT_DEFAULT = 10000
+HIVE_SERVER_TRANSPORT_MODE_DEFAULT = 'binary'
+HIVE_SERVER_PRINCIPAL_DEFAULT = 'hive/_HOST@EXAMPLE.COM'
+HIVE_SERVER2_AUTHENTICATION_DEFAULT = 'NOSASL'
+
+# default keytab location
+SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY = 'default.smoke.keytab'
+SMOKEUSER_KEYTAB_DEFAULT = '/etc/security/keytabs/smokeuser.headless.keytab'
+
+# default smoke principal
+SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY = 'default.smoke.principal'
+SMOKEUSER_PRINCIPAL_DEFAULT = 'ambari-qa@EXAMPLE.COM'
+
+# default smoke user
+SMOKEUSER_SCRIPT_PARAM_KEY = 'default.smoke.user'
+SMOKEUSER_DEFAULT = 'ambari-qa'
+
+HADOOPUSER_KEY = '{{cluster-env/hadoop.user.name}}'
+HADOOPUSER_DEFAULT = 'hadoop'
+
+CHECK_COMMAND_TIMEOUT_KEY = 'check.command.timeout'
+CHECK_COMMAND_TIMEOUT_DEFAULT = 60.0
+
+logger = logging.getLogger('ambari_alerts')
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (HIVE_SERVER_THRIFT_PORT_KEY, SECURITY_ENABLED_KEY, SMOKEUSER_KEY,
+          HIVE_SERVER2_AUTHENTICATION_KEY, HIVE_SERVER_PRINCIPAL_KEY,
+          SMOKEUSER_KEYTAB_KEY, SMOKEUSER_PRINCIPAL_KEY, HIVE_SERVER_THRIFT_HTTP_PORT_KEY,
+          HIVE_SERVER_TRANSPORT_MODE_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY, HIVE_SSL,
+          HIVE_SSL_KEYSTORE_PATH, HIVE_SSL_KEYSTORE_PASSWORD)
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (HIVE_SERVER_THRIFT_PORT_KEY, HIVE_SERVER_THRIFT_HTTP_PORT_KEY,
+          HIVE_SERVER_TRANSPORT_MODE_KEY, HADOOPUSER_KEY)
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  if configurations is None:
+    return ('UNKNOWN', ['There were no configurations supplied to the script.'])
+
+  transport_mode = HIVE_SERVER_TRANSPORT_MODE_DEFAULT
+  if HIVE_SERVER_TRANSPORT_MODE_KEY in configurations:
+    transport_mode = configurations[HIVE_SERVER_TRANSPORT_MODE_KEY]
+
+  port = THRIFT_PORT_DEFAULT
+  if transport_mode.lower() == 'binary' and HIVE_SERVER_THRIFT_PORT_KEY in configurations:
+    port = int(configurations[HIVE_SERVER_THRIFT_PORT_KEY])
+  elif transport_mode.lower() == 'http' and HIVE_SERVER_THRIFT_HTTP_PORT_KEY in configurations:
+    port = int(configurations[HIVE_SERVER_THRIFT_HTTP_PORT_KEY])
+
+  security_enabled = False
+  if SECURITY_ENABLED_KEY in configurations:
+    security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
+
+  check_command_timeout = CHECK_COMMAND_TIMEOUT_DEFAULT
+  if CHECK_COMMAND_TIMEOUT_KEY in parameters:
+    check_command_timeout = float(parameters[CHECK_COMMAND_TIMEOUT_KEY])
+
+  hive_server2_authentication = HIVE_SERVER2_AUTHENTICATION_DEFAULT
+  if HIVE_SERVER2_AUTHENTICATION_KEY in configurations:
+    hive_server2_authentication = configurations[HIVE_SERVER2_AUTHENTICATION_KEY]
+
+  hive_ssl = False
+  if HIVE_SSL in configurations:
+    hive_ssl = configurations[HIVE_SSL]
+
+  hive_ssl_keystore_path = None
+  if HIVE_SSL_KEYSTORE_PATH in configurations:
+    hive_ssl_keystore_path = configurations[HIVE_SSL_KEYSTORE_PATH]
+
+  hive_ssl_keystore_password = None
+  if HIVE_SSL_KEYSTORE_PASSWORD in configurations:
+    hive_ssl_keystore_password = configurations[HIVE_SSL_KEYSTORE_PASSWORD]
+
+  # defaults
+  smokeuser_keytab = SMOKEUSER_KEYTAB_DEFAULT
+  smokeuser_principal = SMOKEUSER_PRINCIPAL_DEFAULT
+  smokeuser = SMOKEUSER_DEFAULT
+
+  # check script params
+  if SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY in parameters:
+    smokeuser_principal = parameters[SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY]
+
+  if SMOKEUSER_SCRIPT_PARAM_KEY in parameters:
+    smokeuser = parameters[SMOKEUSER_SCRIPT_PARAM_KEY]
+
+  if SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY in parameters:
+    smokeuser_keytab = parameters[SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY]
+
+
+  # check configurations last as they should always take precedence
+  if SMOKEUSER_PRINCIPAL_KEY in configurations:
+    smokeuser_principal = configurations[SMOKEUSER_PRINCIPAL_KEY]
+
+  if SMOKEUSER_KEY in configurations:
+    smokeuser = configurations[SMOKEUSER_KEY]
+
+  result_code = None
+
+  if security_enabled:
+    hive_server_principal = HIVE_SERVER_PRINCIPAL_DEFAULT
+    if HIVE_SERVER_PRINCIPAL_KEY in configurations:
+      hive_server_principal = configurations[HIVE_SERVER_PRINCIPAL_KEY]
+
+    if SMOKEUSER_KEYTAB_KEY in configurations:
+      smokeuser_keytab = configurations[SMOKEUSER_KEYTAB_KEY]
+
+    # Get the configured Kerberos executable search paths, if any
+    if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
+      kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
+    else:
+      kerberos_executable_search_paths = None
+
+    kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
+    kinitcmd=format("{kinit_path_local} -kt {smokeuser_keytab} {smokeuser_principal}; ")
+  else:
+    hive_server_principal = None
+    kinitcmd=None
+
+  try:
+    if host_name is None:
+      host_name = socket.getfqdn()
+
+    start_time = time.time()
+
+    try:
+      hive_check.check_thrift_port_sasl(host_name, port, hive_server2_authentication, hive_server_principal,
+                                        kinitcmd, smokeuser, transport_mode=transport_mode, ssl=hive_ssl,
+                                        ssl_keystore=hive_ssl_keystore_path, ssl_password=hive_ssl_keystore_password,
+                                        check_command_timeout=int(check_command_timeout))
+      result_code = 'OK'
+      total_time = time.time() - start_time
+      label = OK_MESSAGE.format(total_time, port)
+    except:
+      result_code = 'CRITICAL'
+      label = CRITICAL_MESSAGE.format(host_name, port, traceback.format_exc())
+
+  except:
+    label = traceback.format_exc()
+    result_code = 'UNKNOWN'
+
+  return (result_code, [label])
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  from resource_management.libraries.functions import reload_windows_env
+  from resource_management.core.resources import Execute
+  reload_windows_env()
+  hive_home = os.environ['HIVE_HOME']
+
+  if configurations is None:
+    return ('UNKNOWN', ['There were no configurations supplied to the script.'])
+
+  transport_mode = HIVE_SERVER_TRANSPORT_MODE_DEFAULT
+  if HIVE_SERVER_TRANSPORT_MODE_KEY in configurations:
+    transport_mode = configurations[HIVE_SERVER_TRANSPORT_MODE_KEY]
+
+  port = THRIFT_PORT_DEFAULT
+  if transport_mode.lower() == 'binary' and HIVE_SERVER_THRIFT_PORT_KEY in configurations:
+    port = int(configurations[HIVE_SERVER_THRIFT_PORT_KEY])
+  elif transport_mode.lower() == 'http' and HIVE_SERVER_THRIFT_HTTP_PORT_KEY in configurations:
+    port = int(configurations[HIVE_SERVER_THRIFT_HTTP_PORT_KEY])
+
+  hiveuser = HADOOPUSER_DEFAULT
+  if HADOOPUSER_KEY in configurations:
+    hiveuser = configurations[HADOOPUSER_KEY]
+
+  result_code = None
+  try:
+    if host_name is None:
+      host_name = socket.getfqdn()
+
+    beeline_url = ['jdbc:hive2://{host_name}:{port}/', "transportMode={transport_mode}"]
+    # append url according to used transport
+    if transport_mode == "http":
+      beeline_url.append('httpPath=cliservice')
+    beeline_url_string = format(";".join(beeline_url))
+    beeline_cmd = os.path.join(hive_home, "bin", "beeline.cmd")
+    cmd = format("cmd /c {beeline_cmd} -u {beeline_url_string} -e '' 2>&1 | findstr Connected")
+
+    start_time = time.time()
+    try:
+      Execute(cmd, user=hiveuser, timeout=30)
+      total_time = time.time() - start_time
+      result_code = 'OK'
+      label = OK_MESSAGE.format(total_time, port)
+    except:
+      result_code = 'CRITICAL'
+      label = CRITICAL_MESSAGE.format(host_name, port, traceback.format_exc())
+  except:
+    label = traceback.format_exc()
+    result_code = 'UNKNOWN'
+
+  return (result_code, [label])

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_llap_app_status.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_llap_app_status.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_llap_app_status.py
new file mode 100755
index 0000000..095be3f
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_llap_app_status.py
@@ -0,0 +1,299 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import time
+import logging
+import traceback
+import json
+import subprocess
+
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import get_kinit_path
+from ambari_commons.os_check import OSConst
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from resource_management.core import shell
+from resource_management.core.resources import Execute
+from resource_management.core import global_lock
+from resource_management.core.exceptions import Fail
+from resource_management.libraries.script.script import Script
+
+OK_MESSAGE = "The application reported a '{0}' state in {1:.3f}s"
+MESSAGE_WITH_STATE_AND_INSTANCES = "The application reported a '{0}' state in {1:.3f}s. [Live: {2}, Desired: {3}]"
+CRITICAL_MESSAGE_WITH_STATE = "The application reported a '{0}' state. Check took {1:.3f}s"
+CRITICAL_MESSAGE = "Application information could not be retrieved"
+
+# results codes
+CRITICAL_RESULT_CODE = 'CRITICAL'
+OK_RESULT_CODE = 'OK'
+UKNOWN_STATUS_CODE = 'UNKNOWN'
+
+
+SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
+
+HIVE_PRINCIPAL_KEY = '{{hive-interactive-site/hive.llap.zk.sm.principal}}'
+HIVE_PRINCIPAL_DEFAULT = 'default.hive.principal'
+
+HIVE_PRINCIPAL_KEYTAB_KEY = '{{hive-interactive-site/hive.llap.zk.sm.keytab.file}}'
+HIVE_PRINCIPAL_KEYTAB_DEFAULT = 'default.hive.keytab'
+
+HIVE_AUTHENTICATION_DEFAULT = 'NOSASL'
+
+HIVE_USER_KEY = '{{hive-env/hive_user}}'
+HIVE_USER_DEFAULT = 'default.smoke.user'
+
+STACK_ROOT = '{{cluster-env/stack_root}}'
+STACK_ROOT_DEFAULT = Script.get_stack_root()
+
+LLAP_APP_NAME_KEY = '{{hive-interactive-env/llap_app_name}}'
+LLAP_APP_NAME_DEFAULT = 'llap0'
+
+# The configured Kerberos executable search paths, if any
+KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
+
+
+CHECK_COMMAND_TIMEOUT_KEY = 'check.command.timeout'
+CHECK_COMMAND_TIMEOUT_DEFAULT = 120.0
+
+
+# Mapping of LLAP app states to 'user friendly' state names.
+llap_app_state_dict = {'RUNNING_ALL': 'RUNNING',
+                       'RUNNING_PARTIAL': 'RUNNING',
+                       'COMPLETE': 'NOT RUNNING',
+                       'LAUNCHING': 'LAUNCHING',
+                       'APP_NOT_FOUND': 'APP NOT FOUND'}
+
+logger = logging.getLogger('ambari_alerts')
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (SECURITY_ENABLED_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY, HIVE_PRINCIPAL_KEY, HIVE_PRINCIPAL_KEYTAB_KEY,
+          HIVE_USER_KEY, STACK_ROOT, LLAP_APP_NAME_KEY)
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  LLAP_APP_STATUS_CMD_TIMEOUT = 0
+
+  if configurations is None:
+    return ('UNKNOWN', ['There were no configurations supplied to the script.'])
+
+  result_code = None
+
+  try:
+    security_enabled = False
+    if SECURITY_ENABLED_KEY in configurations:
+      security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
+
+    check_command_timeout = CHECK_COMMAND_TIMEOUT_DEFAULT
+    if CHECK_COMMAND_TIMEOUT_KEY in configurations:
+      check_command_timeout = int(parameters[CHECK_COMMAND_TIMEOUT_KEY])
+
+    hive_user = HIVE_USER_DEFAULT
+    if HIVE_USER_KEY in configurations:
+      hive_user = configurations[HIVE_USER_KEY]
+
+    llap_app_name = LLAP_APP_NAME_DEFAULT
+    if LLAP_APP_NAME_KEY in configurations:
+      llap_app_name = configurations[LLAP_APP_NAME_KEY]
+
+    if security_enabled:
+      if HIVE_PRINCIPAL_KEY in configurations:
+        llap_principal = configurations[HIVE_PRINCIPAL_KEY]
+      else:
+        llap_principal = HIVE_PRINCIPAL_DEFAULT
+      llap_principal = llap_principal.replace('_HOST',host_name.lower())
+
+      llap_keytab = HIVE_PRINCIPAL_KEYTAB_DEFAULT
+      if HIVE_PRINCIPAL_KEYTAB_KEY in configurations:
+        llap_keytab = configurations[HIVE_PRINCIPAL_KEYTAB_KEY]
+
+      # Get the configured Kerberos executable search paths, if any
+      if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
+        kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
+      else:
+        kerberos_executable_search_paths = None
+
+      kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
+      kinitcmd=format("{kinit_path_local} -kt {llap_keytab} {llap_principal}; ")
+
+      # prevent concurrent kinit
+      kinit_lock = global_lock.get_lock(global_lock.LOCK_TYPE_KERBEROS)
+      kinit_lock.acquire()
+      try:
+        Execute(kinitcmd, user=hive_user,
+                path=["/bin/", "/usr/bin/", "/usr/lib/hive/bin/", "/usr/sbin/"],
+                timeout=10)
+      finally:
+        kinit_lock.release()
+
+
+
+    start_time = time.time()
+    if STACK_ROOT in configurations:
+      llap_status_cmd = configurations[STACK_ROOT] + format("/current/hive-server2-hive2/bin/hive --service llapstatus --name {llap_app_name}  --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
+    else:
+      llap_status_cmd = STACK_ROOT_DEFAULT + format("/current/hive-server2-hive2/bin/hive --service llapstatus --name {llap_app_name} --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
+
+    code, output, error = shell.checked_call(llap_status_cmd, user=hive_user, stderr=subprocess.PIPE,
+                                             timeout=check_command_timeout,
+                                             logoutput=False)
+    # Call for getting JSON
+    llap_app_info = make_valid_json(output)
+
+    if llap_app_info is None or 'state' not in llap_app_info:
+      alert_label = traceback.format_exc()
+      result_code = UKNOWN_STATUS_CODE
+      return (result_code, [alert_label])
+
+    retrieved_llap_app_state = llap_app_info['state'].upper()
+    if retrieved_llap_app_state in ['RUNNING_ALL']:
+      result_code = OK_RESULT_CODE
+      total_time = time.time() - start_time
+      alert_label = OK_MESSAGE.format(llap_app_state_dict.get(retrieved_llap_app_state, retrieved_llap_app_state), total_time)
+    elif retrieved_llap_app_state in ['RUNNING_PARTIAL']:
+      live_instances = 0
+      desired_instances = 0
+      percentInstancesUp = 0
+      percent_desired_instances_to_be_up = 80
+      # Get 'live' and 'desired' instances
+      if 'liveInstances' not in llap_app_info or 'desiredInstances' not in llap_app_info:
+        result_code = CRITICAL_RESULT_CODE
+        total_time = time.time() - start_time
+        alert_label = CRITICAL_MESSAGE_WITH_STATE.format(llap_app_state_dict.get(retrieved_llap_app_state, retrieved_llap_app_state), total_time)
+        return (result_code, [alert_label])
+
+      live_instances = llap_app_info['liveInstances']
+      desired_instances = llap_app_info['desiredInstances']
+      if live_instances < 0 or desired_instances <= 0:
+        result_code = CRITICAL_RESULT_CODE
+        total_time = time.time() - start_time
+        alert_label = CRITICAL_MESSAGE_WITH_STATE.format(llap_app_state_dict.get(retrieved_llap_app_state, retrieved_llap_app_state), total_time)
+        return (result_code, [alert_label])
+
+      percentInstancesUp = float(live_instances) / desired_instances * 100
+      if percentInstancesUp >= percent_desired_instances_to_be_up:
+        result_code = OK_RESULT_CODE
+        total_time = time.time() - start_time
+        alert_label = MESSAGE_WITH_STATE_AND_INSTANCES.format(llap_app_state_dict.get(retrieved_llap_app_state, retrieved_llap_app_state),
+                                                              total_time,
+                                                              llap_app_info['liveInstances'],
+                                                              llap_app_info['desiredInstances'])
+      else:
+        result_code = CRITICAL_RESULT_CODE
+        total_time = time.time() - start_time
+        alert_label = MESSAGE_WITH_STATE_AND_INSTANCES.format(llap_app_state_dict.get(retrieved_llap_app_state, retrieved_llap_app_state),
+                                                              total_time,
+                                                              llap_app_info['liveInstances'],
+                                                              llap_app_info['desiredInstances'])
+    else:
+      result_code = CRITICAL_RESULT_CODE
+      total_time = time.time() - start_time
+      alert_label = CRITICAL_MESSAGE_WITH_STATE.format(llap_app_state_dict.get(retrieved_llap_app_state, retrieved_llap_app_state), total_time)
+  except:
+    alert_label = traceback.format_exc()
+    traceback.format_exc()
+    result_code = UKNOWN_STATUS_CODE
+  return (result_code, [alert_label])
+
+
+"""
+Remove extra lines from 'llapstatus' status output (eg: because of MOTD logging) so as to have a valid JSON data to be passed in
+to JSON converter.
+"""
+def make_valid_json(output):
+  '''
+
+  Note: It is assumed right now that extra lines will be only at the start and not at the end.
+
+  Sample expected JSON to be passed for 'loads' is either of the form :
+
+  Case 'A':
+  {
+      "amInfo" : {
+      "appName" : "llap0",
+      "appType" : "org-apache-slider",
+      "appId" : "APP1",
+      "containerId" : "container_1466036628595_0010_01_000001",
+      "hostname" : "hostName",
+      "amWebUrl" : "http://hostName:port/"
+    },
+    "state" : "LAUNCHING",
+    ....
+    "desiredInstances" : 1,
+    "liveInstances" : 0,
+    ....
+    ....
+  }
+
+  or
+
+  Case 'B':
+  {
+    "state" : "APP_NOT_FOUND"
+  }
+
+  '''
+  splits = output.split("\n")
+
+  len_splits = len(splits)
+  if (len_splits < 3):
+    raise Fail("Malformed JSON data received from 'llapstatus' command. Exiting ....")
+
+  marker_idx = None  # To detect where from to start reading for JSON data
+  for idx, split in enumerate(splits):
+    curr_elem = split.strip()
+    if idx + 2 > len_splits:
+      raise Fail(
+        "Iterated over the received 'llapstatus' comamnd. Couldn't validate the received output for JSON parsing.")
+    next_elem = (splits[(idx + 1)]).strip()
+    if curr_elem == "{":
+      if next_elem == "\"amInfo\" : {" and (splits[len_splits - 1]).strip() == '}':
+        # For Case 'A'
+        marker_idx = idx
+        break;
+      elif idx + 3 == len_splits and next_elem.startswith('"state" : ') and (splits[idx + 2]).strip() == '}':
+        # For Case 'B'
+        marker_idx = idx
+        break;
+
+
+  # Remove extra logging from possible JSON output
+  if marker_idx is None:
+    raise Fail("Couldn't validate the received output for JSON parsing.")
+  else:
+    if marker_idx != 0:
+      del splits[0:marker_idx]
+
+  scanned_output = '\n'.join(splits)
+  llap_app_info = json.loads(scanned_output)
+  return llap_app_info
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_webhcat_server.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_webhcat_server.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_webhcat_server.py
new file mode 100755
index 0000000..c9575c0
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/alerts/alert_webhcat_server.py
@@ -0,0 +1,228 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+import socket
+import time
+import urllib2
+import traceback
+import logging
+
+from resource_management.core.environment import Environment
+from resource_management.libraries.functions.curl_krb_request import curl_krb_request
+from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
+from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
+
+
+RESULT_CODE_OK = "OK"
+RESULT_CODE_CRITICAL = "CRITICAL"
+RESULT_CODE_UNKNOWN = "UNKNOWN"
+
+OK_MESSAGE = "WebHCat status was OK ({0:.3f}s response from {1})"
+CRITICAL_CONNECTION_MESSAGE = "Connection failed to {0} + \n{1}"
+CRITICAL_HTTP_MESSAGE = "HTTP {0} response from {1} \n{2}"
+CRITICAL_WEBHCAT_STATUS_MESSAGE = 'WebHCat returned an unexpected status of "{0}"'
+CRITICAL_WEBHCAT_UNKNOWN_JSON_MESSAGE = "Unable to determine WebHCat health from unexpected JSON response"
+
+TEMPLETON_PORT_KEY = '{{webhcat-site/templeton.port}}'
+SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
+WEBHCAT_PRINCIPAL_KEY = '{{webhcat-site/templeton.kerberos.principal}}'
+WEBHCAT_KEYTAB_KEY = '{{webhcat-site/templeton.kerberos.keytab}}'
+
+SMOKEUSER_KEYTAB_KEY = '{{cluster-env/smokeuser_keytab}}'
+SMOKEUSER_PRINCIPAL_KEY = '{{cluster-env/smokeuser_principal_name}}'
+SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
+
+# The configured Kerberos executable search paths, if any
+KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
+
+WEBHCAT_OK_RESPONSE = 'ok'
+WEBHCAT_PORT_DEFAULT = 50111
+
+CONNECTION_TIMEOUT_KEY = 'connection.timeout'
+CONNECTION_TIMEOUT_DEFAULT = 5.0
+CURL_CONNECTION_TIMEOUT_DEFAULT = str(int(CONNECTION_TIMEOUT_DEFAULT))
+
+# default keytab location
+SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY = 'default.smoke.keytab'
+SMOKEUSER_KEYTAB_DEFAULT = '/etc/security/keytabs/smokeuser.headless.keytab'
+
+# default smoke principal
+SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY = 'default.smoke.principal'
+SMOKEUSER_PRINCIPAL_DEFAULT = 'ambari-qa@EXAMPLE.COM'
+
+# default smoke user
+SMOKEUSER_DEFAULT = 'ambari-qa'
+logger = logging.getLogger('ambari_alerts')
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (TEMPLETON_PORT_KEY, SECURITY_ENABLED_KEY, SMOKEUSER_KEYTAB_KEY,SMOKEUSER_PRINCIPAL_KEY,
+          KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY, SMOKEUSER_KEY)
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  result_code = RESULT_CODE_UNKNOWN
+
+  if configurations is None:
+    return (result_code, ['There were no configurations supplied to the script.'])
+
+  webhcat_port = WEBHCAT_PORT_DEFAULT
+  if TEMPLETON_PORT_KEY in configurations:
+    webhcat_port = int(configurations[TEMPLETON_PORT_KEY])
+
+  security_enabled = False
+  if SECURITY_ENABLED_KEY in configurations:
+    security_enabled = configurations[SECURITY_ENABLED_KEY].lower() == 'true'
+
+  # parse script arguments
+  connection_timeout = CONNECTION_TIMEOUT_DEFAULT
+  curl_connection_timeout = CURL_CONNECTION_TIMEOUT_DEFAULT
+  if CONNECTION_TIMEOUT_KEY in parameters:
+    connection_timeout = float(parameters[CONNECTION_TIMEOUT_KEY])
+    curl_connection_timeout = str(int(connection_timeout))
+
+
+  # the alert will always run on the webhcat host
+  if host_name is None:
+    host_name = socket.getfqdn()
+
+  smokeuser = SMOKEUSER_DEFAULT
+
+  if SMOKEUSER_KEY in configurations:
+    smokeuser = configurations[SMOKEUSER_KEY]
+
+  # webhcat always uses http, never SSL
+  query_url = "http://{0}:{1}/templeton/v1/status?user.name={2}".format(host_name, webhcat_port, smokeuser)
+
+  # initialize
+  total_time = 0
+  json_response = {}
+
+  if security_enabled:
+    try:
+      # defaults
+      smokeuser_keytab = SMOKEUSER_KEYTAB_DEFAULT
+      smokeuser_principal = SMOKEUSER_PRINCIPAL_DEFAULT
+
+      # check script params
+      if SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY in parameters:
+        smokeuser_principal = parameters[SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY]
+      if SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY in parameters:
+        smokeuser_keytab = parameters[SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY]
+
+      # check configurations last as they should always take precedence
+      if SMOKEUSER_PRINCIPAL_KEY in configurations:
+        smokeuser_principal = configurations[SMOKEUSER_PRINCIPAL_KEY]
+      if SMOKEUSER_KEYTAB_KEY in configurations:
+        smokeuser_keytab = configurations[SMOKEUSER_KEYTAB_KEY]
+
+      # Get the configured Kerberos executable search paths, if any
+      kerberos_executable_search_paths = None
+      if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
+        kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
+
+      kinit_timer_ms = parameters.get(KERBEROS_KINIT_TIMER_PARAMETER, DEFAULT_KERBEROS_KINIT_TIMER_MS)
+
+      env = Environment.get_instance()
+      stdout, stderr, time_millis = curl_krb_request(env.tmp_dir, smokeuser_keytab, smokeuser_principal,
+        query_url, "webhcat_alert_cc_", kerberos_executable_search_paths, True,
+        "WebHCat Server Status", smokeuser, connection_timeout=curl_connection_timeout,
+        kinit_timer_ms = kinit_timer_ms)
+
+      # check the response code
+      response_code = int(stdout)
+
+      # 0 indicates no connection
+      if response_code == 0:
+        label = CRITICAL_CONNECTION_MESSAGE.format(query_url, traceback.format_exc())
+        return (RESULT_CODE_CRITICAL, [label])
+
+      # any other response aside from 200 is a problem
+      if response_code != 200:
+        label = CRITICAL_HTTP_MESSAGE.format(response_code, query_url, traceback.format_exc())
+        return (RESULT_CODE_CRITICAL, [label])
+
+      # now that we have the http status and it was 200, get the content
+      stdout, stderr, total_time = curl_krb_request(env.tmp_dir, smokeuser_keytab, smokeuser_principal,
+        query_url, "webhcat_alert_cc_", kerberos_executable_search_paths,
+        False, "WebHCat Server Status", smokeuser, connection_timeout=curl_connection_timeout,
+        kinit_timer_ms = kinit_timer_ms)
+
+      json_response = json.loads(stdout)
+    except:
+      return (RESULT_CODE_CRITICAL, [traceback.format_exc()])
+  else:
+    url_response = None
+
+    try:
+      # execute the query for the JSON that includes WebHCat status
+      start_time = time.time()
+      url_response = urllib2.urlopen(query_url, timeout=connection_timeout)
+      total_time = time.time() - start_time
+
+      json_response = json.loads(url_response.read())
+    except urllib2.HTTPError as httpError:
+      label = CRITICAL_HTTP_MESSAGE.format(httpError.code, query_url, traceback.format_exc())
+      return (RESULT_CODE_CRITICAL, [label])
+    except:
+      label = CRITICAL_CONNECTION_MESSAGE.format(query_url, traceback.format_exc())
+      return (RESULT_CODE_CRITICAL, [label])
+    finally:
+      if url_response is not None:
+        try:
+          url_response.close()
+        except:
+          pass
+
+
+  # if status is not in the response, we can't do any check; return CRIT
+  if 'status' not in json_response:
+    return (RESULT_CODE_CRITICAL, [CRITICAL_WEBHCAT_UNKNOWN_JSON_MESSAGE + str(json_response)])
+
+
+  # URL response received, parse it
+  try:
+    webhcat_status = json_response['status']
+  except:
+    return (RESULT_CODE_CRITICAL, [CRITICAL_WEBHCAT_UNKNOWN_JSON_MESSAGE + "\n" + traceback.format_exc()])
+
+
+  # proper JSON received, compare against known value
+  if webhcat_status.lower() == WEBHCAT_OK_RESPONSE:
+    result_code = RESULT_CODE_OK
+    label = OK_MESSAGE.format(total_time, query_url)
+  else:
+    result_code = RESULT_CODE_CRITICAL
+    label = CRITICAL_WEBHCAT_STATUS_MESSAGE.format(webhcat_status)
+
+  return (result_code, [label])


[21/52] bigtop git commit: ODPI-193. Add Hive 1.2 to ODPi ambari reference implementation

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hivemetastore-site.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hivemetastore-site.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hivemetastore-site.xml
new file mode 100755
index 0000000..75be7f2
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hivemetastore-site.xml
@@ -0,0 +1,43 @@
+<configuration><property require-input="false">
+    <name>hive.metastore.metrics.enabled</name>
+    <value>true</value>
+    <filename>hivemetastore-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>boolean</type>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.service.metrics.reporter</name>
+    <value>JSON_FILE, JMX, HADOOP2</value>
+    <filename>hivemetastore-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.service.metrics.hadoop2.component</name>
+    <value>hivemetastore</value>
+    <filename>hivemetastore-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.service.metrics.file.location</name>
+    <value>/var/log/hive/hivemetastore-report.json</value>
+    <filename>hivemetastore-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hiveserver2-interactive-site.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hiveserver2-interactive-site.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hiveserver2-interactive-site.xml
new file mode 100755
index 0000000..6954e56
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hiveserver2-interactive-site.xml
@@ -0,0 +1,56 @@
+<configuration><property require-input="false">
+    <name>hive.metastore.metrics.enabled</name>
+    <value>true</value>
+    <filename>hiveserver2-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>boolean</type>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.service.metrics.reporter</name>
+    <value>JSON_FILE, JMX, HADOOP2</value>
+    <filename>hiveserver2-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.service.metrics.hadoop2.component</name>
+    <value>hiveserver2</value>
+    <filename>hiveserver2-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.service.metrics.file.location</name>
+    <value>/var/log/hive/hiveserver2Interactive-report.json</value>
+    <filename>hiveserver2-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.async.log.enabled</name>
+    <value>false</value>
+    <description>Whether to enable Log4j2's asynchronous logging. Asynchronous logging can give significant performance improvement as logging will be handled in separate thread that uses LMAX disruptor queue for buffering log messages. Refer https://logging.apache.org/log4j/2.x/manual/async.html for benefits and drawbacks.</description>
+    <filename>hiveserver2-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>boolean</type>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hiveserver2-site.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hiveserver2-site.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hiveserver2-site.xml
new file mode 100755
index 0000000..e78f176
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hiveserver2-site.xml
@@ -0,0 +1,122 @@
+<configuration><property require-input="false">
+    <name>hive.metastore.metrics.enabled</name>
+    <value>true</value>
+    <filename>hiveserver2-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>boolean</type>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.service.metrics.reporter</name>
+    <value>JSON_FILE, JMX, HADOOP2</value>
+    <filename>hiveserver2-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.service.metrics.hadoop2.component</name>
+    <value>hiveserver2</value>
+    <filename>hiveserver2-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.service.metrics.file.location</name>
+    <value>/var/log/hive/hiveserver2-report.json</value>
+    <filename>hiveserver2-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.security.authenticator.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator</value>
+    <description>Hive client authenticator manager class name. The user-defined authenticator class should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.  </description>
+    <filename>hiveserver2-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on>
+        <property>
+            <name>hive_security_authorization</name>
+            <type>hive-env</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.security.authorization.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory</value>
+    <description>the hive client authorization manager class name.
+    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
+    <filename>hiveserver2-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on>
+        <property>
+            <name>hive_security_authorization</name>
+            <type>hive-env</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.security.authorization.enabled</name>
+    <value>false</value>
+    <description>enable or disable the Hive client authorization</description>
+    <display-name>Enable Authorization</display-name>
+    <filename>hiveserver2-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>value-list</type>
+        <entries>
+            <entry>
+                <value>true</value>
+                <label>True</label>
+            </entry>
+            <entry>
+                <value>false</value>
+                <label>False</label>
+            </entry>
+        </entries>
+        <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+        <property>
+            <name>hive_security_authorization</name>
+            <type>hive-env</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>hive.conf.restricted.list</name>
+    <value>hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role</value>
+    <description></description>
+    <filename>hiveserver2-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on>
+        <property>
+            <name>hive_security_authorization</name>
+            <type>hive-env</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/llap-cli-log4j2.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/llap-cli-log4j2.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/llap-cli-log4j2.xml
new file mode 100755
index 0000000..b7f6523
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/llap-cli-log4j2.xml
@@ -0,0 +1,91 @@
+<configuration><property require-input="false">
+    <name>content</name>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+status = WARN
+name = LlapCliLog4j2
+packages = org.apache.hadoop.hive.ql.log
+
+# list of properties
+property.hive.log.level = INFO
+property.hive.root.logger = console
+property.hive.log.dir = ${sys:java.io.tmpdir}/${sys:user.name}
+property.hive.log.file = llap-cli.log
+
+# list of all appenders
+appenders = console, DRFA
+
+# console appender
+appender.console.type = Console
+appender.console.name = console
+appender.console.target = SYSTEM_ERR
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = %p %c{2}: %m%n
+
+# daily rolling file appender
+appender.DRFA.type = RollingRandomAccessFile
+appender.DRFA.name = DRFA
+appender.DRFA.fileName = ${sys:hive.log.dir}/${sys:hive.log.file}
+# Use %pid in the filePattern to append process-id@host-name to the filename if you want separate log files for different CLI session
+appender.DRFA.filePattern = ${sys:hive.log.dir}/${sys:hive.log.file}.%d{yyyy-MM-dd}
+appender.DRFA.layout.type = PatternLayout
+appender.DRFA.layout.pattern = %d{ISO8601} %5p [%t] %c{2}: %m%n
+appender.DRFA.policies.type = Policies
+appender.DRFA.policies.time.type = TimeBasedTriggeringPolicy
+appender.DRFA.policies.time.interval = 1
+appender.DRFA.policies.time.modulate = true
+appender.DRFA.strategy.type = DefaultRolloverStrategy
+appender.DRFA.strategy.max = 30
+
+# list of all loggers
+loggers = ZooKeeper, DataNucleus, Datastore, JPOX, HadoopConf
+
+logger.ZooKeeper.name = org.apache.zookeeper
+logger.ZooKeeper.level = WARN
+
+logger.DataNucleus.name = DataNucleus
+logger.DataNucleus.level = ERROR
+
+logger.Datastore.name = Datastore
+logger.Datastore.level = ERROR
+
+logger.JPOX.name = JPOX
+logger.JPOX.level = ERROR
+
+logger.HadoopConf.name = org.apache.hadoop.conf.Configuration
+logger.HadoopConf.level = ERROR
+
+# root logger
+rootLogger.level = ${sys:hive.log.level}
+rootLogger.appenderRefs = root, DRFA
+rootLogger.appenderRef.root.ref = ${sys:hive.root.logger}
+rootLogger.appenderRef.DRFA.ref = DRFA
+  </value>
+    <description>Custom llap-cli-log4j2.properties</description>
+    <display-name>llap-cli-log4j2 template</display-name>
+    <filename>llap-cli-log4j2.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>content</type>
+        <show-property-name>false</show-property-name>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/llap-daemon-log4j.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/llap-daemon-log4j.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/llap-daemon-log4j.xml
new file mode 100755
index 0000000..30c31be
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/llap-daemon-log4j.xml
@@ -0,0 +1,158 @@
+<configuration><property require-input="false">
+    <name>content</name>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# This is the log4j2 properties file used by llap-daemons. There's several loggers defined, which
+# can be selected while configuring LLAP.
+# Based on the one selected - UI links etc need to be manipulated in the system.
+# Note: Some names and logic is common to this file and llap LogHelpers. Make sure to change that
+# as well, if changing this file.
+
+status = INFO
+name = LlapDaemonLog4j2
+packages = org.apache.hadoop.hive.ql.log
+
+# list of properties
+property.llap.daemon.log.level = INFO
+property.llap.daemon.root.logger = console
+property.llap.daemon.log.dir = .
+property.llap.daemon.log.file = llapdaemon.log
+property.llap.daemon.historylog.file = llapdaemon_history.log
+property.llap.daemon.log.maxfilesize = 256MB
+property.llap.daemon.log.maxbackupindex = 240
+
+# list of all appenders
+appenders = console, RFA, HISTORYAPPENDER, query-routing
+
+# console appender
+appender.console.type = Console
+appender.console.name = console
+appender.console.target = SYSTEM_ERR
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = %d{ISO8601} %5p [%t (%X{fragmentId})] %c{2}: %m%n
+
+# rolling file appender
+appender.RFA.type = RollingRandomAccessFile
+appender.RFA.name = RFA
+appender.RFA.fileName = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.log.file}
+appender.RFA.filePattern = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.log.file}_%d{yyyy-MM-dd-HH}_%i.done
+appender.RFA.layout.type = PatternLayout
+appender.RFA.layout.pattern = %d{ISO8601} %-5p [%t (%X{fragmentId})] %c: %m%n
+appender.RFA.policies.type = Policies
+appender.RFA.policies.time.type = TimeBasedTriggeringPolicy
+appender.RFA.policies.time.interval = 1
+appender.RFA.policies.time.modulate = true
+appender.RFA.policies.size.type = SizeBasedTriggeringPolicy
+appender.RFA.policies.size.size = ${sys:llap.daemon.log.maxfilesize}
+appender.RFA.strategy.type = DefaultRolloverStrategy
+appender.RFA.strategy.max = ${sys:llap.daemon.log.maxbackupindex}
+
+# history file appender
+appender.HISTORYAPPENDER.type = RollingRandomAccessFile
+appender.HISTORYAPPENDER.name = HISTORYAPPENDER
+appender.HISTORYAPPENDER.fileName = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.historylog.file}
+appender.HISTORYAPPENDER.filePattern = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.historylog.file}_%d{yyyy-MM-dd}_%i.done
+appender.HISTORYAPPENDER.layout.type = PatternLayout
+appender.HISTORYAPPENDER.layout.pattern = %m%n
+appender.HISTORYAPPENDER.policies.type = Policies
+appender.HISTORYAPPENDER.policies.size.type = SizeBasedTriggeringPolicy
+appender.HISTORYAPPENDER.policies.size.size = ${sys:llap.daemon.log.maxfilesize}
+appender.HISTORYAPPENDER.policies.time.type = TimeBasedTriggeringPolicy
+appender.HISTORYAPPENDER.policies.time.interval = 1
+appender.HISTORYAPPENDER.policies.time.modulate = true
+appender.HISTORYAPPENDER.strategy.type = DefaultRolloverStrategy
+appender.HISTORYAPPENDER.strategy.max = ${sys:llap.daemon.log.maxbackupindex}
+
+# queryId based routing file appender
+appender.query-routing.type = Routing
+appender.query-routing.name = query-routing
+appender.query-routing.routes.type = Routes
+appender.query-routing.routes.pattern = $${ctx:queryId}
+#Purge polciy for query-based Routing Appender
+appender.query-routing.purgePolicy.type = LlapRoutingAppenderPurgePolicy
+# Note: Do not change this name without changing the corresponding entry in LlapConstants
+appender.query-routing.purgePolicy.name = llapLogPurgerQueryRouting
+# default route
+appender.query-routing.routes.route-default.type = Route
+appender.query-routing.routes.route-default.key = $${ctx:queryId}
+appender.query-routing.routes.route-default.ref = RFA
+# queryId based route
+appender.query-routing.routes.route-mdc.type = Route
+appender.query-routing.routes.route-mdc.file-mdc.type = LlapWrappedAppender
+appender.query-routing.routes.route-mdc.file-mdc.name = IrrelevantName-query-routing
+appender.query-routing.routes.route-mdc.file-mdc.app.type = RandomAccessFile
+appender.query-routing.routes.route-mdc.file-mdc.app.name = file-mdc
+appender.query-routing.routes.route-mdc.file-mdc.app.fileName = ${sys:llap.daemon.log.dir}/${ctx:queryId}-${ctx:dagId}.log
+appender.query-routing.routes.route-mdc.file-mdc.app.layout.type = PatternLayout
+appender.query-routing.routes.route-mdc.file-mdc.app.layout.pattern = %d{ISO8601} %5p [%t (%X{fragmentId})] %c{2}: %m%n
+
+# list of all loggers
+loggers = NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX, HistoryLogger, LlapIoImpl, LlapIoOrc, LlapIoCache, LlapIoLocking
+
+logger.LlapIoImpl.name = LlapIoImpl
+logger.LlapIoImpl.level = INFO
+
+logger.LlapIoOrc.name = LlapIoOrc
+logger.LlapIoOrc.level = WARN
+
+logger.LlapIoCache.name = LlapIoCache
+logger.LlapIOCache.level = WARN
+
+logger.LlapIoLocking.name = LlapIoLocking
+logger.LlapIoLocking.level = WARN
+
+logger.NIOServerCnxn.name = org.apache.zookeeper.server.NIOServerCnxn
+logger.NIOServerCnxn.level = WARN
+
+logger.ClientCnxnSocketNIO.name = org.apache.zookeeper.ClientCnxnSocketNIO
+logger.ClientCnxnSocketNIO.level = WARN
+
+logger.DataNucleus.name = DataNucleus
+logger.DataNucleus.level = ERROR
+
+logger.Datastore.name = Datastore
+logger.Datastore.level = ERROR
+
+logger.JPOX.name = JPOX
+logger.JPOX.level = ERROR
+
+logger.HistoryLogger.name = org.apache.hadoop.hive.llap.daemon.HistoryLogger
+logger.HistoryLogger.level = INFO
+logger.HistoryLogger.additivity = false
+logger.HistoryLogger.appenderRefs = HistoryAppender
+logger.HistoryLogger.appenderRef.HistoryAppender.ref = HISTORYAPPENDER
+
+# root logger
+rootLogger.level = ${sys:llap.daemon.log.level}
+rootLogger.appenderRefs = root
+rootLogger.appenderRef.root.ref = ${sys:llap.daemon.root.logger}
+  </value>
+    <description>Custom llap-daemon-log4j2.properties</description>
+    <display-name>llap-deamon-log4j template</display-name>
+    <filename>llap-daemon-log4j.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>content</type>
+        <show-property-name>false</show-property-name>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-audit.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-audit.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-audit.xml
new file mode 100755
index 0000000..9e74aa0
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-audit.xml
@@ -0,0 +1,136 @@
+<configuration><property require-input="false">
+    <name>xasecure.audit.is.enabled</name>
+    <value>true</value>
+    <description>Is Audit enabled?</description>
+    <filename>ranger-hive-audit.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>xasecure.audit.destination.hdfs</name>
+    <value>true</value>
+    <description>Is Audit to HDFS enabled?</description>
+    <display-name>Audit to HDFS</display-name>
+    <filename>ranger-hive-audit.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+        <property>
+            <name>xasecure.audit.destination.hdfs</name>
+            <type>ranger-env</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>xasecure.audit.destination.hdfs.dir</name>
+    <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
+    <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
+    <filename>ranger-hive-audit.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on>
+        <property>
+            <name>xasecure.audit.destination.hdfs.dir</name>
+            <type>ranger-env</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
+    <value>/var/log/hive/audit/hdfs/spool</value>
+    <description>/var/log/hive/audit/hdfs/spool</description>
+    <filename>ranger-hive-audit.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>xasecure.audit.destination.solr</name>
+    <value>false</value>
+    <description>Is Solr audit enabled?</description>
+    <display-name>Audit to SOLR</display-name>
+    <filename>ranger-hive-audit.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+        <property>
+            <name>xasecure.audit.destination.solr</name>
+            <type>ranger-env</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>xasecure.audit.destination.solr.urls</name>
+    <value></value>
+    <description>Solr URL</description>
+    <filename>ranger-hive-audit.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+        <property>
+            <name>ranger.audit.solr.urls</name>
+            <type>ranger-admin-site</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>xasecure.audit.destination.solr.zookeepers</name>
+    <value>NONE</value>
+    <description>Solr Zookeeper string</description>
+    <filename>ranger-hive-audit.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on>
+        <property>
+            <name>ranger.audit.solr.zookeepers</name>
+            <type>ranger-admin-site</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
+    <value>/var/log/hive/audit/solr/spool</value>
+    <description>/var/log/hive/audit/solr/spool</description>
+    <filename>ranger-hive-audit.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>xasecure.audit.provider.summary.enabled</name>
+    <value>false</value>
+    <description>Enable Summary audit?</description>
+    <display-name>Audit provider summary enabled</display-name>
+    <filename>ranger-hive-audit.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>boolean</type>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-plugin-properties.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-plugin-properties.xml
new file mode 100755
index 0000000..346baa9
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-plugin-properties.xml
@@ -0,0 +1,63 @@
+<configuration><property require-input="false">
+    <name>policy_user</name>
+    <value>ambari-qa</value>
+    <description>This user must be system user and also present at Ranger admin portal</description>
+    <display-name>Policy user for HIVE</display-name>
+    <filename>ranger-hive-plugin-properties.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>jdbc.driverClassName</name>
+    <value>org.apache.hive.jdbc.HiveDriver</value>
+    <description>Used for repository creation on ranger admin</description>
+    <filename>ranger-hive-plugin-properties.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>common.name.for.certificate</name>
+    <value></value>
+    <description>Common name for certificate, this value should match what is specified in repo within ranger admin</description>
+    <filename>ranger-hive-plugin-properties.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>REPOSITORY_CONFIG_USERNAME</name>
+    <value>hive</value>
+    <description>Used for repository creation on ranger admin</description>
+    <display-name>Ranger repository config user</display-name>
+    <filename>ranger-hive-plugin-properties.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>REPOSITORY_CONFIG_PASSWORD</name>
+    <value>hive</value>
+    <description>Used for repository creation on ranger admin</description>
+    <display-name>Ranger repository config password</display-name>
+    <filename>ranger-hive-plugin-properties.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type>PASSWORD</property-type>
+    <value-attributes>
+        <type>password</type>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-policymgr-ssl.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-policymgr-ssl.xml
new file mode 100755
index 0000000..bf73d9f
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-policymgr-ssl.xml
@@ -0,0 +1,71 @@
+<configuration><property require-input="false">
+    <name>xasecure.policymgr.clientssl.keystore</name>
+    <value>/usr/hdp/current/hive-server2/conf/ranger-plugin-keystore.jks</value>
+    <description>Java Keystore files</description>
+    <filename>ranger-hive-policymgr-ssl.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>xasecure.policymgr.clientssl.keystore.password</name>
+    <value>myKeyFilePassword</value>
+    <description>password for keystore</description>
+    <filename>ranger-hive-policymgr-ssl.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type>PASSWORD</property-type>
+    <value-attributes>
+        <type>password</type>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>xasecure.policymgr.clientssl.truststore</name>
+    <value>/usr/hdp/current/hive-server2/conf/ranger-plugin-truststore.jks</value>
+    <description>java truststore file</description>
+    <filename>ranger-hive-policymgr-ssl.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>xasecure.policymgr.clientssl.truststore.password</name>
+    <value>changeit</value>
+    <description>java truststore password</description>
+    <filename>ranger-hive-policymgr-ssl.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type>PASSWORD</property-type>
+    <value-attributes>
+        <type>password</type>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java keystore credential file</description>
+    <filename>ranger-hive-policymgr-ssl.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java truststore credential file</description>
+    <filename>ranger-hive-policymgr-ssl.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-security.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-security.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-security.xml
new file mode 100755
index 0000000..a29780f
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/ranger-hive-security.xml
@@ -0,0 +1,81 @@
+<configuration><property require-input="false">
+    <name>ranger.plugin.hive.policy.rest.ssl.config.file</name>
+    <value>/usr/hdp/current/{{ranger_hive_component}}/conf/conf.server/ranger-policymgr-ssl.xml</value>
+    <description>Path to the file containing SSL details to contact Ranger Admin</description>
+    <filename>ranger-hive-security.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>ranger.plugin.hive.service.name</name>
+    <value>{{repo_name}}</value>
+    <description>Name of the Ranger service containing policies for this HIVE instance</description>
+    <filename>ranger-hive-security.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>ranger.plugin.hive.policy.source.impl</name>
+    <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
+    <description>Class to retrieve policies from the source</description>
+    <filename>ranger-hive-security.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>ranger.plugin.hive.policy.rest.url</name>
+    <value>{{policymgr_mgr_url}}</value>
+    <description>URL to Ranger Admin</description>
+    <filename>ranger-hive-security.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>ranger.plugin.hive.policy.pollIntervalMs</name>
+    <value>30000</value>
+    <description>How often to poll for changes in policies?</description>
+    <filename>ranger-hive-security.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>ranger.plugin.hive.policy.cache.dir</name>
+    <value>/etc/ranger/{{repo_name}}/policycache</value>
+    <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
+    <filename>ranger-hive-security.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>xasecure.hive.update.xapolicies.on.grant.revoke</name>
+    <value>true</value>
+    <description>Should Hive plugin update Ranger policies for updates to permissions done using GRANT/REVOKE?</description>
+    <display-name>Should Hive GRANT/REVOKE update XA policies</display-name>
+    <filename>ranger-hive-security.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>boolean</type>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/tez-interactive-site.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/tez-interactive-site.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/tez-interactive-site.xml
new file mode 100755
index 0000000..3865c36
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/tez-interactive-site.xml
@@ -0,0 +1,144 @@
+<configuration><property require-input="false">
+    <name>tez.runtime.shuffle.fetch.buffer.percent</name>
+    <value>0.6</value>
+    <description>Fraction (0-1) of the available memory which can be used to
+      retain shuffled data</description>
+    <filename>tez-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>tez.runtime.shuffle.memory.limit.percent</name>
+    <value>0.25</value>
+    <description>This property determines the maximum size of a shuffle segment
+      which can be fetched to memory. Fraction (0-1) of shuffle memory
+      (after applying tez.runtime.shuffle.fetch.buffer.percent)</description>
+    <filename>tez-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>tez.runtime.report.partition.stats</name>
+    <value>true</value>
+    <filename>tez-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>tez.runtime.pipelined-shuffle.enabled</name>
+    <value>false</value>
+    <filename>tez-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>tez.runtime.pipelined.sorter.lazy-allocate.memory</name>
+    <value>true</value>
+    <filename>tez-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>tez.grouping.node.local.only</name>
+    <value>true</value>
+    <filename>tez-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>tez.runtime.shuffle.fetch.verify-disk-checksum</name>
+    <value>false</value>
+    <filename>tez-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>tez.lib.uris</name>
+    <value>/hdp/apps/${hdp.version}/tez_hive2/tez.tar.gz</value>
+    <description>Comma-delimited list of the location of the Tez libraries which will be localized for DAGs.
+      Specifying a single .tar.gz or .tgz assumes that a compressed version of the tez libs is being used. This is uncompressed into a tezlibs directory when running containers, and tezlibs/;tezlibs/lib/ are added to the classpath (after . and .*).
+      If multiple files are specified - files are localized as regular files, contents of directories are localized as regular files (non-recursive).
+    </description>
+    <filename>tez-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>tez.dag.recovery.enabled</name>
+    <value>false</value>
+    <filename>tez-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>tez.am.resource.memory.mb</name>
+    <value>1536</value>
+    <description>The amount of memory to be used by the AppMaster</description>
+    <filename>tez-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>int</type>
+    </value-attributes>
+    <depends-on>
+        <property>
+            <name>yarn.scheduler.capacity.root.queues</name>
+            <type>capacity-scheduler</type>
+        </property>
+        <property>
+            <name>hive.llap.daemon.queue.name</name>
+            <type>hive-interactive-site</type>
+        </property>
+        <property>
+            <name>llap_queue_capacity</name>
+            <type>hive-interactive-env</type>
+        </property>
+        <property>
+            <name>enable_hive_interactive</name>
+            <type>hive-interactive-env</type>
+        </property>
+        <property>
+            <name>hive.server2.tez.sessions.per.default.queue</name>
+            <type>hive-interactive-site</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>tez.session.am.dag.submit.timeout.secs</name>
+    <value>3600</value>
+    <filename>tez-interactive-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/webhcat-env.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/webhcat-env.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/webhcat-env.xml
new file mode 100755
index 0000000..fa62c78
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/webhcat-env.xml
@@ -0,0 +1,38 @@
+<configuration><property require-input="false">
+    <name>content</name>
+    <value>
+# The file containing the running pid
+PID_FILE={{webhcat_pid_file}}
+
+TEMPLETON_LOG_DIR={{templeton_log_dir}}/
+
+
+WEBHCAT_LOG_DIR={{templeton_log_dir}}/
+
+# The console error log
+ERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log
+
+# The console log
+CONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log
+
+#TEMPLETON_JAR=templeton_jar_name
+
+#HADOOP_PREFIX=hadoop_prefix
+
+#HCAT_PREFIX=hive_prefix
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+export HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+    </value>
+    <description>webhcat-env.sh content</description>
+    <display-name>webhcat-env template</display-name>
+    <filename>webhcat-env.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>content</type>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/webhcat-log4j.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/webhcat-log4j.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/webhcat-log4j.xml
new file mode 100755
index 0000000..541b1c9
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/webhcat-log4j.xml
@@ -0,0 +1,63 @@
+<configuration><property require-input="false">
+    <name>content</name>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Define some default values that can be overridden by system properties
+webhcat.root.logger = INFO, standard
+webhcat.log.dir = .
+webhcat.log.file = webhcat.log
+
+log4j.rootLogger = ${webhcat.root.logger}
+
+# Logging Threshold
+log4j.threshhold = DEBUG
+
+log4j.appender.standard  =  org.apache.log4j.DailyRollingFileAppender
+log4j.appender.standard.File = ${webhcat.log.dir}/${webhcat.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern = .yyyy-MM-dd
+
+log4j.appender.DRFA.layout = org.apache.log4j.PatternLayout
+
+log4j.appender.standard.layout = org.apache.log4j.PatternLayout
+log4j.appender.standard.layout.conversionPattern = %-5p | %d{DATE} | %c | %m%n
+
+# Class logging settings
+log4j.logger.com.sun.jersey = DEBUG
+log4j.logger.com.sun.jersey.spi.container.servlet.WebComponent = ERROR
+log4j.logger.org.apache.hadoop = INFO
+log4j.logger.org.apache.hadoop.conf = WARN
+log4j.logger.org.apache.zookeeper = WARN
+log4j.logger.org.eclipse.jetty = INFO
+
+    </value>
+    <description>Custom webhcat-log4j.properties</description>
+    <display-name>webhcat-log4j template</display-name>
+    <filename>webhcat-log4j.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>content</type>
+        <show-property-name>false</show-property-name>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property></configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/webhcat-site.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/webhcat-site.xml
new file mode 100755
index 0000000..680ddc8
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/webhcat-site.xml
@@ -0,0 +1,287 @@
+<configuration><property require-input="false">
+    <name>templeton.libjars</name>
+    <value>/usr/hdp/${hdp.version}/zookeeper/zookeeper.jar,/usr/hdp/${hdp.version}/hive/lib/hive-common.jar</value>
+    <description>Jars to add the the classpath.</description>
+    <filename>webhcat-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>templeton.hive.extra.files</name>
+    <value>/usr/hdp/${hdp.version}/tez/conf/tez-site.xml,/usr/hdp/${hdp.version}/tez,/usr/hdp/${hdp.version}/tez/lib</value>
+    <description>The resources in this list will be localized to the node running LaunchMapper and added to HADOOP_CLASSPTH
+      before launching 'hive' command.  If the path /foo/bar is a directory, the contents of the the entire dir will be localized
+      and ./foo/* will be added to HADOOP_CLASSPATH.  Note that since classpath path processing does not recurse into subdirectories,
+      the paths in this property may be overlapping.  In the example above, "./tez-site.xml:./tez-client/*:./lib/*" will be added to
+      HADOOP_CLASSPATH.
+      This can be used to specify config files, Tez artifacts, etc.  This will be sent -files option of hadoop jar command thus
+      each path is interpreted by Generic Option Parser.  It can be local or hdfs path.
+    </description>
+    <filename>webhcat-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>templeton.jar</name>
+    <value>/usr/hdp/${hdp.version}/hive/share/webhcat/svr/lib/hive-webhcat-*.jar</value>
+    <description>The path to the Templeton jar file.</description>
+    <filename>webhcat-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>templeton.hadoop</name>
+    <value>/usr/hdp/${hdp.version}/hadoop/bin/hadoop</value>
+    <description>The path to the Hadoop executable.</description>
+    <filename>webhcat-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>templeton.pig.archive</name>
+    <value>hdfs:///hdp/apps/${hdp.version}/pig/pig.tar.gz</value>
+    <description>The path to the Pig archive in HDFS.</description>
+    <filename>webhcat-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>templeton.pig.path</name>
+    <value>pig.tar.gz/pig/bin/pig</value>
+    <description>The path to the Pig executable.</description>
+    <filename>webhcat-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>templeton.hcat</name>
+    <value>/usr/hdp/${hdp.version}/hive/bin/hcat</value>
+    <description>The path to the hcatalog executable.</description>
+    <filename>webhcat-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>templeton.hive.archive</name>
+    <value>hdfs:///hdp/apps/${hdp.version}/hive/hive.tar.gz</value>
+    <description>The path to the Hive archive.</description>
+    <filename>webhcat-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>templeton.hive.home</name>
+    <value>hive.tar.gz/hive</value>
+    <description>The path to the Hive home within the tar. Has no effect if templeton.hive.archive is not set.</description>
+    <filename>webhcat-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>templeton.hcat.home</name>
+    <value>hive.tar.gz/hive/hcatalog</value>
+    <description>The path to the HCat home within the tar. Has no effect if templeton.hive.archive is not set.</description>
+    <filename>webhcat-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>templeton.hive.path</name>
+    <value>hive.tar.gz/hive/bin/hive</value>
+    <description>The path to the Hive executable.</description>
+    <filename>webhcat-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>templeton.sqoop.archive</name>
+    <value>hdfs:///hdp/apps/${hdp.version}/sqoop/sqoop.tar.gz</value>
+    <description>The path to the Sqoop archive in HDFS.</description>
+    <filename>webhcat-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>templeton.sqoop.path</name>
+    <value>sqoop.tar.gz/sqoop/bin/sqoop</value>
+    <description>The path to the Sqoop executable.</description>
+    <filename>webhcat-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>templeton.sqoop.home</name>
+    <value>sqoop.tar.gz/sqoop</value>
+    <description>The path to the Sqoop home within the tar. Has no effect if
+      templeton.sqoop.archive is not set.
+    </description>
+    <filename>webhcat-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>templeton.streaming.jar</name>
+    <value>hdfs:///hdp/apps/${hdp.version}/mapreduce/hadoop-streaming.jar</value>
+    <description>The hdfs path to the Hadoop streaming jar file.</description>
+    <filename>webhcat-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>templeton.port</name>
+    <value>50111</value>
+    <description>The HTTP port for the main server.</description>
+    <filename>webhcat-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>templeton.hadoop.conf.dir</name>
+    <value>/etc/hadoop/conf</value>
+    <description>The path to the Hadoop configuration.</description>
+    <filename>webhcat-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>templeton.python</name>
+    <value>${env.PYTHON_CMD}</value>
+    <description>The path to the Python executable.</description>
+    <filename>webhcat-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>templeton.hive.properties</name>
+    <value>hive.metastore.local=false,hive.metastore.uris=thrift://localhost:9083,hive.metastore.sasl.enabled=false</value>
+    <description>Properties to set when running hive.</description>
+    <filename>webhcat-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>templeton.zookeeper.hosts</name>
+    <value>localhost:2181</value>
+    <description>ZooKeeper servers, as comma separated host:port pairs</description>
+    <filename>webhcat-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes>
+        <type>multiLine</type>
+    </value-attributes>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>templeton.storage.class</name>
+    <value>org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage</value>
+    <description>The class to use as storage</description>
+    <filename>webhcat-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>templeton.override.enabled</name>
+    <value>false</value>
+    <description>Enable the override path in templeton.override.jars</description>
+    <filename>webhcat-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>templeton.exec.timeout</name>
+    <value>60000</value>
+    <description>Time out for templeton api</description>
+    <filename>webhcat-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="true" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on/>
+    <property_depended_by/>
+</property><property require-input="false">
+    <name>templeton.hadoop.queue.name</name>
+    <value>default</value>
+    <description>
+      MapReduce queue name where WebHCat map-only jobs will be submitted to. Can be used to avoid a deadlock where all map slots in the cluster are taken over by Templeton launcher tasks.
+    </description>
+    <filename>webhcat-site.xml</filename>
+    <deleted>false</deleted>
+    <on-ambari-upgrade add="false" delete="false" update="false"/>
+    <property-type></property-type>
+    <value-attributes/>
+    <depends-on>
+        <property>
+            <name>yarn.scheduler.capacity.root.queues</name>
+            <type>capacity-scheduler</type>
+        </property>
+    </depends-on>
+    <property_depended_by/>
+</property></configuration>
\ No newline at end of file


[28/52] bigtop git commit: Making ODPi Ambari stack compatible with ODPi packaging

Posted by rv...@apache.org.
Making ODPi Ambari stack compatible with ODPi packaging

(cherry picked from commit 40465c12191dc22beca5b72952ecf2721893ca0d)


Project: http://git-wip-us.apache.org/repos/asf/bigtop/repo
Commit: http://git-wip-us.apache.org/repos/asf/bigtop/commit/d7c2a9a3
Tree: http://git-wip-us.apache.org/repos/asf/bigtop/tree/d7c2a9a3
Diff: http://git-wip-us.apache.org/repos/asf/bigtop/diff/d7c2a9a3

Branch: refs/heads/master
Commit: d7c2a9a3d80604e2777a79a0bccf6f2bb485a686
Parents: aab5273
Author: Roman Shaposhnik <rv...@apache.org>
Authored: Fri Oct 28 10:54:16 2016 -0700
Committer: Roman Shaposhnik <rv...@apache.org>
Committed: Tue Mar 21 23:08:09 2017 -0700

----------------------------------------------------------------------
 .../services/HIVE/configuration/hive-site.xml   |  2 +-
 .../HIVE/package/files/templetonSmoke.sh        | 37 --------------------
 2 files changed, 1 insertion(+), 38 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/bigtop/blob/d7c2a9a3/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-site.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-site.xml
index cddb624..c1f2a98 100755
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-site.xml
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/configuration/hive-site.xml
@@ -2167,7 +2167,7 @@
     <property_depended_by/>
 </property><property require-input="false">
     <name>hive.execution.engine</name>
-    <value>tez</value>
+    <value>mr</value>
     <description>
       Expects one of [mr, tez].
       Chooses execution engine. Options are: mr (Map reduce, default) or tez (hadoop 2 only)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/d7c2a9a3/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/templetonSmoke.sh
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/templetonSmoke.sh b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/templetonSmoke.sh
index dd61631..0ab94fe 100755
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/templetonSmoke.sh
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/templetonSmoke.sh
@@ -55,41 +55,4 @@ if [[ "$httpExitCode" -ne "200" ]] ; then
   exit 1
 fi
 
-#try hcat ddl command
-/var/lib/ambari-agent/ambari-sudo.sh rm -f ${tmp_dir}/show_db.post.txt
-echo "user.name=${smoke_test_user}&exec=show databases;" > ${tmp_dir}/show_db.post.txt
-/var/lib/ambari-agent/ambari-sudo.sh chown ${smoke_test_user} ${tmp_dir}/show_db.post.txt
-cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>' -d  @${tmp_dir}/show_db.post.txt  $ttonurl/ddl 2>&1"
-retVal=`/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (ddl cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit  1
-fi
-
-# NOT SURE?? SUHAS
-if [[ $security_enabled == "true" ]]; then
-  echo "Templeton Pig Smoke Tests not run in secure mode"
-  exit 0
-fi
-
-#try pig query
-
-#create, copy post args file
-/var/lib/ambari-agent/ambari-sudo.sh rm -f ${tmp_dir}/pig_post.txt
-echo -n "user.name=${smoke_test_user}&file=/tmp/$ttonTestScript" > ${tmp_dir}/pig_post.txt
-/var/lib/ambari-agent/ambari-sudo.sh chown ${smoke_test_user} ${tmp_dir}/pig_post.txt
-
-#submit pig query
-cmd="curl --negotiate -u : -s -w 'http_code <%{http_code}>' -d  @${tmp_dir}/pig_post.txt  $ttonurl/pig 2>&1"
-retVal=`/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (pig cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit 1
-fi
-
 exit 0


[15/52] bigtop git commit: ODPI-193. Add Hive 1.2 to ODPi ambari reference implementation

Posted by rv...@apache.org.
http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/etc/hive-schema-0.12.0.postgres.sql
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/etc/hive-schema-0.12.0.postgres.sql b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/etc/hive-schema-0.12.0.postgres.sql
new file mode 100755
index 0000000..bc6486b
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/etc/hive-schema-0.12.0.postgres.sql
@@ -0,0 +1,1406 @@
+--
+-- PostgreSQL database dump
+--
+
+SET statement_timeout = 0;
+SET client_encoding = 'UTF8';
+SET standard_conforming_strings = off;
+SET check_function_bodies = false;
+SET client_min_messages = warning;
+SET escape_string_warning = off;
+
+SET search_path = public, pg_catalog;
+
+SET default_tablespace = '';
+
+SET default_with_oids = false;
+
+--
+-- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "BUCKETING_COLS" (
+    "SD_ID" bigint NOT NULL,
+    "BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "CDS" (
+    "CD_ID" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_OLD; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_OLD" (
+    "SD_ID" bigint NOT NULL,
+    "COMMENT" character varying(256) DEFAULT NULL::character varying,
+    "COLUMN_NAME" character varying(128) NOT NULL,
+    "TYPE_NAME" character varying(4000) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_V2" (
+    "CD_ID" bigint NOT NULL,
+    "COMMENT" character varying(4000),
+    "COLUMN_NAME" character varying(128) NOT NULL,
+    "TYPE_NAME" character varying(4000),
+    "INTEGER_IDX" integer NOT NULL
+);
+
+
+--
+-- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DATABASE_PARAMS" (
+    "DB_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(180) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DBS" (
+    "DB_ID" bigint NOT NULL,
+    "DESC" character varying(4000) DEFAULT NULL::character varying,
+    "DB_LOCATION_URI" character varying(4000) NOT NULL,
+    "NAME" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DB_PRIVS" (
+    "DB_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DB_ID" bigint,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "DB_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "GLOBAL_PRIVS" (
+    "USER_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "USER_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "IDXS" (
+    "INDEX_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DEFERRED_REBUILD" boolean NOT NULL,
+    "INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character varying,
+    "INDEX_NAME" character varying(128) DEFAULT NULL::character varying,
+    "INDEX_TBL_ID" bigint,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "ORIG_TBL_ID" bigint,
+    "SD_ID" bigint
+);
+
+
+--
+-- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "INDEX_PARAMS" (
+    "INDEX_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "NUCLEUS_TABLES" (
+    "CLASS_NAME" character varying(128) NOT NULL,
+    "TABLE_NAME" character varying(128) NOT NULL,
+    "TYPE" character varying(4) NOT NULL,
+    "OWNER" character varying(2) NOT NULL,
+    "VERSION" character varying(20) NOT NULL,
+    "INTERFACE_NAME" character varying(255) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PARTITIONS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITIONS" (
+    "PART_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "PART_NAME" character varying(767) DEFAULT NULL::character varying,
+    "SD_ID" bigint,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: PARTITION_EVENTS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_EVENTS" (
+    "PART_NAME_ID" bigint NOT NULL,
+    "DB_NAME" character varying(128),
+    "EVENT_TIME" bigint NOT NULL,
+    "EVENT_TYPE" integer NOT NULL,
+    "PARTITION_NAME" character varying(767),
+    "TBL_NAME" character varying(128)
+);
+
+
+--
+-- Name: PARTITION_KEYS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_KEYS" (
+    "TBL_ID" bigint NOT NULL,
+    "PKEY_COMMENT" character varying(4000) DEFAULT NULL::character varying,
+    "PKEY_NAME" character varying(128) NOT NULL,
+    "PKEY_TYPE" character varying(767) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: PARTITION_KEY_VALS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_KEY_VALS" (
+    "PART_ID" bigint NOT NULL,
+    "PART_KEY_VAL" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: PARTITION_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_PARAMS" (
+    "PART_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PART_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_COL_PRIVS" (
+    "PART_COLUMN_GRANT_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_ID" bigint,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_COL_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PART_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_PRIVS" (
+    "PART_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_ID" bigint,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: ROLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "ROLES" (
+    "ROLE_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+    "ROLE_NAME" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: ROLE_MAP; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "ROLE_MAP" (
+    "ROLE_GRANT_ID" bigint NOT NULL,
+    "ADD_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "ROLE_ID" bigint
+);
+
+
+--
+-- Name: SDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SDS" (
+    "SD_ID" bigint NOT NULL,
+    "INPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+    "IS_COMPRESSED" boolean NOT NULL,
+    "LOCATION" character varying(4000) DEFAULT NULL::character varying,
+    "NUM_BUCKETS" bigint NOT NULL,
+    "OUTPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+    "SERDE_ID" bigint,
+    "CD_ID" bigint,
+    "IS_STOREDASSUBDIRECTORIES" boolean NOT NULL
+);
+
+
+--
+-- Name: SD_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SD_PARAMS" (
+    "SD_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SEQUENCE_TABLE; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SEQUENCE_TABLE" (
+    "SEQUENCE_NAME" character varying(255) NOT NULL,
+    "NEXT_VAL" bigint NOT NULL
+);
+
+
+--
+-- Name: SERDES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SERDES" (
+    "SERDE_ID" bigint NOT NULL,
+    "NAME" character varying(128) DEFAULT NULL::character varying,
+    "SLIB" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SERDE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SERDE_PARAMS" (
+    "SERDE_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SORT_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SORT_COLS" (
+    "SD_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "ORDER" bigint NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: TABLE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TABLE_PARAMS" (
+    "TBL_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: TBLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBLS" (
+    "TBL_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DB_ID" bigint,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "OWNER" character varying(767) DEFAULT NULL::character varying,
+    "RETENTION" bigint NOT NULL,
+    "SD_ID" bigint,
+    "TBL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "VIEW_EXPANDED_TEXT" text,
+    "VIEW_ORIGINAL_TEXT" text
+);
+
+
+--
+-- Name: TBL_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBL_COL_PRIVS" (
+    "TBL_COLUMN_GRANT_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "TBL_COL_PRIV" character varying(128) DEFAULT NULL::character varying,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: TBL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBL_PRIVS" (
+    "TBL_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "TBL_PRIV" character varying(128) DEFAULT NULL::character varying,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: TYPES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TYPES" (
+    "TYPES_ID" bigint NOT NULL,
+    "TYPE_NAME" character varying(128) DEFAULT NULL::character varying,
+    "TYPE1" character varying(767) DEFAULT NULL::character varying,
+    "TYPE2" character varying(767) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: TYPE_FIELDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TYPE_FIELDS" (
+    "TYPE_NAME" bigint NOT NULL,
+    "COMMENT" character varying(256) DEFAULT NULL::character varying,
+    "FIELD_NAME" character varying(128) NOT NULL,
+    "FIELD_TYPE" character varying(767) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_STRING_LIST" (
+    "STRING_LIST_ID" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_STRING_LIST_VALUES" (
+    "STRING_LIST_ID" bigint NOT NULL,
+    "STRING_LIST_VALUE" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_COL_NAMES" (
+    "SD_ID" bigint NOT NULL,
+    "SKEWED_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_COL_VALUE_LOC_MAP" (
+    "SD_ID" bigint NOT NULL,
+    "STRING_LIST_ID_KID" bigint NOT NULL,
+    "LOCATION" character varying(4000) DEFAULT NULL::character varying
+);
+
+CREATE TABLE "SKEWED_VALUES" (
+    "SD_ID_OID" bigint NOT NULL,
+    "STRING_LIST_ID_EID" bigint NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: TAB_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE  "MASTER_KEYS"
+(
+    "KEY_ID" SERIAL,
+    "MASTER_KEY" varchar(767) NULL,
+    PRIMARY KEY ("KEY_ID")
+);
+
+CREATE TABLE  "DELEGATION_TOKENS"
+(
+    "TOKEN_IDENT" varchar(767) NOT NULL,
+    "TOKEN" varchar(767) NULL,
+    PRIMARY KEY ("TOKEN_IDENT")
+);
+
+CREATE TABLE "TAB_COL_STATS" (
+ "CS_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "TBL_ID" bigint NOT NULL,
+ "LONG_LOW_VALUE" bigint,
+ "LONG_HIGH_VALUE" bigint,
+ "DOUBLE_LOW_VALUE" double precision,
+ "DOUBLE_HIGH_VALUE" double precision,
+ "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_NULLS" bigint NOT NULL,
+ "NUM_DISTINCTS" bigint,
+ "AVG_COL_LEN" double precision,
+ "MAX_COL_LEN" bigint,
+ "NUM_TRUES" bigint,
+ "NUM_FALSES" bigint,
+ "LAST_ANALYZED" bigint NOT NULL
+);
+
+--
+-- Table structure for VERSION
+--
+CREATE TABLE "VERSION" (
+  "VER_ID" bigint,
+  "SCHEMA_VERSION" character varying(127) NOT NULL,
+  "VERSION_COMMENT" character varying(255) NOT NULL
+);
+
+--
+-- Name: PART_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_COL_STATS" (
+ "CS_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
+ "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "PART_ID" bigint NOT NULL,
+ "LONG_LOW_VALUE" bigint,
+ "LONG_HIGH_VALUE" bigint,
+ "DOUBLE_LOW_VALUE" double precision,
+ "DOUBLE_HIGH_VALUE" double precision,
+ "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_NULLS" bigint NOT NULL,
+ "NUM_DISTINCTS" bigint,
+ "AVG_COL_LEN" double precision,
+ "MAX_COL_LEN" bigint,
+ "NUM_TRUES" bigint,
+ "NUM_FALSES" bigint,
+ "LAST_ANALYZED" bigint NOT NULL
+);
+
+--
+-- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "BUCKETING_COLS"
+    ADD CONSTRAINT "BUCKETING_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+
+--
+-- Name: CDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "CDS"
+    ADD CONSTRAINT "CDS_pkey" PRIMARY KEY ("CD_ID");
+
+
+--
+-- Name: COLUMNS_V2_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "COLUMNS_V2"
+    ADD CONSTRAINT "COLUMNS_V2_pkey" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
+
+
+--
+-- Name: COLUMNS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "COLUMNS_OLD"
+    ADD CONSTRAINT "COLUMNS_pkey" PRIMARY KEY ("SD_ID", "COLUMN_NAME");
+
+
+--
+-- Name: DATABASE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DATABASE_PARAMS"
+    ADD CONSTRAINT "DATABASE_PARAMS_pkey" PRIMARY KEY ("DB_ID", "PARAM_KEY");
+
+
+--
+-- Name: DBPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DBPRIVILEGEINDEX" UNIQUE ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: DBS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DBS"
+    ADD CONSTRAINT "DBS_pkey" PRIMARY KEY ("DB_ID");
+
+
+--
+-- Name: DB_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DB_PRIVS_pkey" PRIMARY KEY ("DB_GRANT_ID");
+
+
+--
+-- Name: GLOBALPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "GLOBAL_PRIVS"
+    ADD CONSTRAINT "GLOBALPRIVILEGEINDEX" UNIQUE ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: GLOBAL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "GLOBAL_PRIVS"
+    ADD CONSTRAINT "GLOBAL_PRIVS_pkey" PRIMARY KEY ("USER_GRANT_ID");
+
+
+--
+-- Name: IDXS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_pkey" PRIMARY KEY ("INDEX_ID");
+
+
+--
+-- Name: INDEX_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "INDEX_PARAMS"
+    ADD CONSTRAINT "INDEX_PARAMS_pkey" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
+
+
+--
+-- Name: NUCLEUS_TABLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "NUCLEUS_TABLES"
+    ADD CONSTRAINT "NUCLEUS_TABLES_pkey" PRIMARY KEY ("CLASS_NAME");
+
+
+--
+-- Name: PARTITIONS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_pkey" PRIMARY KEY ("PART_ID");
+
+
+--
+-- Name: PARTITION_EVENTS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_EVENTS"
+    ADD CONSTRAINT "PARTITION_EVENTS_pkey" PRIMARY KEY ("PART_NAME_ID");
+
+
+--
+-- Name: PARTITION_KEYS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_KEYS"
+    ADD CONSTRAINT "PARTITION_KEYS_pkey" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
+
+
+--
+-- Name: PARTITION_KEY_VALS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_KEY_VALS"
+    ADD CONSTRAINT "PARTITION_KEY_VALS_pkey" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
+
+
+--
+-- Name: PARTITION_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_PARAMS"
+    ADD CONSTRAINT "PARTITION_PARAMS_pkey" PRIMARY KEY ("PART_ID", "PARAM_KEY");
+
+
+--
+-- Name: PART_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PART_COL_PRIVS"
+    ADD CONSTRAINT "PART_COL_PRIVS_pkey" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
+
+
+--
+-- Name: PART_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PART_PRIVS"
+    ADD CONSTRAINT "PART_PRIVS_pkey" PRIMARY KEY ("PART_GRANT_ID");
+
+
+--
+-- Name: ROLEENTITYINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLES"
+    ADD CONSTRAINT "ROLEENTITYINDEX" UNIQUE ("ROLE_NAME");
+
+
+--
+-- Name: ROLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLES"
+    ADD CONSTRAINT "ROLES_pkey" PRIMARY KEY ("ROLE_ID");
+
+
+--
+-- Name: ROLE_MAP_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "ROLE_MAP_pkey" PRIMARY KEY ("ROLE_GRANT_ID");
+
+
+--
+-- Name: SDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_pkey" PRIMARY KEY ("SD_ID");
+
+
+--
+-- Name: SD_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SD_PARAMS"
+    ADD CONSTRAINT "SD_PARAMS_pkey" PRIMARY KEY ("SD_ID", "PARAM_KEY");
+
+
+--
+-- Name: SEQUENCE_TABLE_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SEQUENCE_TABLE"
+    ADD CONSTRAINT "SEQUENCE_TABLE_pkey" PRIMARY KEY ("SEQUENCE_NAME");
+
+
+--
+-- Name: SERDES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SERDES"
+    ADD CONSTRAINT "SERDES_pkey" PRIMARY KEY ("SERDE_ID");
+
+
+--
+-- Name: SERDE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SERDE_PARAMS"
+    ADD CONSTRAINT "SERDE_PARAMS_pkey" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
+
+
+--
+-- Name: SORT_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SORT_COLS"
+    ADD CONSTRAINT "SORT_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+
+--
+-- Name: TABLE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TABLE_PARAMS"
+    ADD CONSTRAINT "TABLE_PARAMS_pkey" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
+
+
+--
+-- Name: TBLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_pkey" PRIMARY KEY ("TBL_ID");
+
+
+--
+-- Name: TBL_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBL_COL_PRIVS"
+    ADD CONSTRAINT "TBL_COL_PRIVS_pkey" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
+
+
+--
+-- Name: TBL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBL_PRIVS"
+    ADD CONSTRAINT "TBL_PRIVS_pkey" PRIMARY KEY ("TBL_GRANT_ID");
+
+
+--
+-- Name: TYPES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPES"
+    ADD CONSTRAINT "TYPES_pkey" PRIMARY KEY ("TYPES_ID");
+
+
+--
+-- Name: TYPE_FIELDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPE_FIELDS"
+    ADD CONSTRAINT "TYPE_FIELDS_pkey" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_pkey" PRIMARY KEY ("STRING_LIST_ID");
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_pkey" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
+
+
+ALTER TABLE ONLY "SKEWED_COL_NAMES"
+    ADD CONSTRAINT "SKEWED_COL_NAMES_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_pkey" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_pkey" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
+
+--
+-- Name: TAB_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+--
+-- Name: PART_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+--
+-- Name: UNIQUEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "UNIQUEINDEX" UNIQUE ("INDEX_NAME", "ORIG_TBL_ID");
+
+
+--
+-- Name: UNIQUEPARTITION; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("PART_NAME", "TBL_ID");
+
+
+--
+-- Name: UNIQUETABLE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "UNIQUETABLE" UNIQUE ("TBL_NAME", "DB_ID");
+
+
+--
+-- Name: UNIQUE_DATABASE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DBS"
+    ADD CONSTRAINT "UNIQUE_DATABASE" UNIQUE ("NAME");
+
+
+--
+-- Name: UNIQUE_TYPE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPES"
+    ADD CONSTRAINT "UNIQUE_TYPE" UNIQUE ("TYPE_NAME");
+
+
+--
+-- Name: USERROLEMAPINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "USERROLEMAPINDEX" UNIQUE ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "BUCKETING_COLS_N49" ON "BUCKETING_COLS" USING btree ("SD_ID");
+
+
+--
+-- Name: COLUMNS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "COLUMNS_N49" ON "COLUMNS_OLD" USING btree ("SD_ID");
+
+
+--
+-- Name: DATABASE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "DATABASE_PARAMS_N49" ON "DATABASE_PARAMS" USING btree ("DB_ID");
+
+
+--
+-- Name: DB_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "DB_PRIVS_N49" ON "DB_PRIVS" USING btree ("DB_ID");
+
+
+--
+-- Name: IDXS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N49" ON "IDXS" USING btree ("ORIG_TBL_ID");
+
+
+--
+-- Name: IDXS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N50" ON "IDXS" USING btree ("INDEX_TBL_ID");
+
+
+--
+-- Name: IDXS_N51; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N51" ON "IDXS" USING btree ("SD_ID");
+
+
+--
+-- Name: INDEX_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "INDEX_PARAMS_N49" ON "INDEX_PARAMS" USING btree ("INDEX_ID");
+
+
+--
+-- Name: PARTITIONCOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONCOLUMNPRIVILEGEINDEX" ON "PART_COL_PRIVS" USING btree ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: PARTITIONEVENTINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONEVENTINDEX" ON "PARTITION_EVENTS" USING btree ("PARTITION_NAME");
+
+
+--
+-- Name: PARTITIONS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONS_N49" ON "PARTITIONS" USING btree ("TBL_ID");
+
+
+--
+-- Name: PARTITIONS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONS_N50" ON "PARTITIONS" USING btree ("SD_ID");
+
+
+--
+-- Name: PARTITION_KEYS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_KEYS_N49" ON "PARTITION_KEYS" USING btree ("TBL_ID");
+
+
+--
+-- Name: PARTITION_KEY_VALS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_KEY_VALS_N49" ON "PARTITION_KEY_VALS" USING btree ("PART_ID");
+
+
+--
+-- Name: PARTITION_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_PARAMS_N49" ON "PARTITION_PARAMS" USING btree ("PART_ID");
+
+
+--
+-- Name: PARTPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTPRIVILEGEINDEX" ON "PART_PRIVS" USING btree ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: PART_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_COL_PRIVS_N49" ON "PART_COL_PRIVS" USING btree ("PART_ID");
+
+
+--
+-- Name: PART_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID");
+
+
+--
+-- Name: ROLE_MAP_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "ROLE_MAP_N49" ON "ROLE_MAP" USING btree ("ROLE_ID");
+
+
+--
+-- Name: SDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SDS_N49" ON "SDS" USING btree ("SERDE_ID");
+
+
+--
+-- Name: SD_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SD_PARAMS_N49" ON "SD_PARAMS" USING btree ("SD_ID");
+
+
+--
+-- Name: SERDE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SERDE_PARAMS_N49" ON "SERDE_PARAMS" USING btree ("SERDE_ID");
+
+
+--
+-- Name: SORT_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SORT_COLS_N49" ON "SORT_COLS" USING btree ("SD_ID");
+
+
+--
+-- Name: TABLECOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLECOLUMNPRIVILEGEINDEX" ON "TBL_COL_PRIVS" USING btree ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: TABLEPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLEPRIVILEGEINDEX" ON "TBL_PRIVS" USING btree ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: TABLE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLE_PARAMS_N49" ON "TABLE_PARAMS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TBLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBLS_N49" ON "TBLS" USING btree ("DB_ID");
+
+
+--
+-- Name: TBLS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBLS_N50" ON "TBLS" USING btree ("SD_ID");
+
+
+--
+-- Name: TBL_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBL_COL_PRIVS_N49" ON "TBL_COL_PRIVS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TBL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBL_PRIVS_N49" ON "TBL_PRIVS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TYPE_FIELDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TYPE_FIELDS_N49" ON "TYPE_FIELDS" USING btree ("TYPE_NAME");
+
+--
+-- Name: TAB_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TAB_COL_STATS_N49" ON "TAB_COL_STATS" USING btree ("TBL_ID");
+
+--
+-- Name: PART_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID");
+
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_fkey" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "SKEWED_COL_NAMES"
+    ADD CONSTRAINT "SKEWED_COL_NAMES_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey1" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_fkey1" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_fkey2" FOREIGN KEY ("SD_ID_OID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: BUCKETING_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "BUCKETING_COLS"
+    ADD CONSTRAINT "BUCKETING_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: COLUMNS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "COLUMNS_OLD"
+    ADD CONSTRAINT "COLUMNS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: COLUMNS_V2_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "COLUMNS_V2"
+    ADD CONSTRAINT "COLUMNS_V2_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+
+
+--
+-- Name: DATABASE_PARAMS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "DATABASE_PARAMS"
+    ADD CONSTRAINT "DATABASE_PARAMS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: DB_PRIVS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DB_PRIVS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_INDEX_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_INDEX_TBL_ID_fkey" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_ORIG_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_ORIG_TBL_ID_fkey" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: INDEX_PARAMS_INDEX_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "INDEX_PARAMS"
+    ADD CONSTRAINT "INDEX_PARAMS_INDEX_ID_fkey" FOREIGN KEY ("INDEX_ID") REFERENCES "IDXS"("INDEX_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITIONS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITIONS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_KEYS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_KEYS"
+    ADD CONSTRAINT "PARTITION_KEYS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_KEY_VALS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_KEY_VALS"
+    ADD CONSTRAINT "PARTITION_KEY_VALS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_PARAMS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_PARAMS"
+    ADD CONSTRAINT "PARTITION_PARAMS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_COL_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PART_COL_PRIVS"
+    ADD CONSTRAINT "PART_COL_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PART_PRIVS"
+    ADD CONSTRAINT "PART_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: ROLE_MAP_ROLE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "ROLE_MAP_ROLE_ID_fkey" FOREIGN KEY ("ROLE_ID") REFERENCES "ROLES"("ROLE_ID") DEFERRABLE;
+
+
+--
+-- Name: SDS_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+
+
+--
+-- Name: SDS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+
+
+--
+-- Name: SD_PARAMS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SD_PARAMS"
+    ADD CONSTRAINT "SD_PARAMS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: SERDE_PARAMS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SERDE_PARAMS"
+    ADD CONSTRAINT "SERDE_PARAMS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+
+
+--
+-- Name: SORT_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SORT_COLS"
+    ADD CONSTRAINT "SORT_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: TABLE_PARAMS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TABLE_PARAMS"
+    ADD CONSTRAINT "TABLE_PARAMS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TBLS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: TBLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: TBL_COL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBL_COL_PRIVS"
+    ADD CONSTRAINT "TBL_COL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TBL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBL_PRIVS"
+    ADD CONSTRAINT "TBL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TYPE_FIELDS_TYPE_NAME_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TYPE_FIELDS"
+    ADD CONSTRAINT "TYPE_FIELDS_TYPE_NAME_fkey" FOREIGN KEY ("TYPE_NAME") REFERENCES "TYPES"("TYPES_ID") DEFERRABLE;
+
+--
+-- Name: TAB_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_fkey" FOREIGN KEY("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID");
+
+--
+-- Name: public; Type: ACL; Schema: -; Owner: hiveuser
+--
+
+REVOKE ALL ON SCHEMA public FROM PUBLIC;
+GRANT ALL ON SCHEMA public TO PUBLIC;
+
+
+INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '0.12.0', 'Hive release version 0.12.0');
+--
+-- PostgreSQL database dump complete
+--
+

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/addMysqlUser.sh
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/addMysqlUser.sh b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/addMysqlUser.sh
new file mode 100755
index 0000000..862e9b2
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/addMysqlUser.sh
@@ -0,0 +1,39 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+mysqldservice=$1
+mysqldbuser=$2
+mysqldbpasswd=$3
+userhost=$4
+
+# The restart (not start) is required to pick up mysql configuration changes made by sed
+# during install, in case mysql is already started. The changes are required by Hive later on.
+/var/lib/ambari-agent/ambari-sudo.sh service $mysqldservice restart
+
+# MySQL 5.7 installed in non-interactive way uses a socket authentication plugin.
+# "mysql -u root" should be executed from root user
+echo "Adding user $mysqldbuser@% and removing users with empty name"
+/var/lib/ambari-agent/ambari-sudo.sh mysql -u root -e "CREATE USER '$mysqldbuser'@'%' IDENTIFIED BY '$mysqldbpasswd';"
+/var/lib/ambari-agent/ambari-sudo.sh mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'%';"
+/var/lib/ambari-agent/ambari-sudo.sh mysql -u root -e "DELETE FROM mysql.user WHERE user='';"
+/var/lib/ambari-agent/ambari-sudo.sh mysql -u root -e "flush privileges;"
+/var/lib/ambari-agent/ambari-sudo.sh service $mysqldservice stop

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hcatSmoke.sh
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hcatSmoke.sh b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hcatSmoke.sh
new file mode 100755
index 0000000..39e63a6
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hcatSmoke.sh
@@ -0,0 +1,41 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+export tablename=$1
+
+export purge_cmd=""
+if [ "$3" == "true" ]; then
+	export purge_cmd="purge"
+fi
+
+case "$2" in
+
+prepare)
+  hcat -e "show tables"
+  hcat -e "drop table IF EXISTS ${tablename} ${purge_cmd}"
+  hcat -e "create table ${tablename} ( id INT, name string ) stored as rcfile ;"
+;;
+
+cleanup)
+  hcat -e "drop table IF EXISTS ${tablename} ${purge_cmd}"
+;;
+
+esac

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveSmoke.sh
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveSmoke.sh b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveSmoke.sh
new file mode 100755
index 0000000..f9f2020
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveSmoke.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+export tablename=$1
+echo "CREATE EXTERNAL TABLE IF NOT EXISTS ${tablename} ( foo INT, bar STRING );" | hive
+echo "DESCRIBE ${tablename};" | hive

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveTezSetup.cmd
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveTezSetup.cmd b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveTezSetup.cmd
new file mode 100755
index 0000000..10d6a1c
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveTezSetup.cmd
@@ -0,0 +1,58 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements.  See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License.  You may obtain a copy of the License at
+rem
+rem     http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+if not defined HADOOP_HOME (
+  set EXITCODE=5
+  goto :errorexit
+)
+if not defined HIVE_HOME (
+  set EXITCODE=6
+  goto :errorexit
+)
+if not defined TEZ_HOME (
+  set EXITCODE=7
+  goto :errorexit
+)
+
+set EXITCODE=0
+
+if not exist %HIVE_HOME%\conf\hive-tez-configured (
+  %HADOOP_HOME%\bin\hadoop.cmd fs -mkdir /apps/tez
+  set EXITCODE=%ERRORLEVEL%
+  if %EXITCODE% neq 0 goto :errorexit
+
+  %HADOOP_HOME%\bin\hadoop.cmd fs -chmod -R 755 /apps/tez
+  set EXITCODE=%ERRORLEVEL%
+  if %EXITCODE% neq 0 goto :errorexit
+
+  %HADOOP_HOME%\bin\hadoop.cmd fs -chown -R hadoop:users /apps/tez
+  set EXITCODE=%ERRORLEVEL%
+  if %EXITCODE% neq 0 goto :errorexit
+
+  %HADOOP_HOME%\bin\hadoop.cmd fs -put %TEZ_HOME%\* /apps/tez
+  set EXITCODE=%ERRORLEVEL%
+  if %EXITCODE% neq 0 goto :errorexit
+
+  %HADOOP_HOME%\bin\hadoop.cmd fs -rm -r -skipTrash /apps/tez/conf
+  set EXITCODE=%ERRORLEVEL%
+  if %EXITCODE% neq 0 goto :errorexit
+
+  echo done > %HIVE_HOME%\conf\hive-tez-configured
+)
+goto :eof
+
+:errorexit
+exit /B %EXITCODE%

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveserver2.sql
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveserver2.sql b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveserver2.sql
new file mode 100755
index 0000000..99a3865
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveserver2.sql
@@ -0,0 +1,23 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+CREATE EXTERNAL TABLE IF NOT EXISTS hiveserver2smoke20408 ( foo INT, bar STRING );
+DESCRIBE hiveserver2smoke20408;

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveserver2Smoke.sh
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveserver2Smoke.sh b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveserver2Smoke.sh
new file mode 100755
index 0000000..77d7b3e
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/hiveserver2Smoke.sh
@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+smokeout=`/usr/lib/hive/bin/beeline -u $1 -n fakeuser -p fakepwd -d org.apache.hive.jdbc.HiveDriver -e '!run $2' 2>&1| awk '{print}'|grep Error`
+
+if [ "x$smokeout" == "x" ]; then
+  echo "Smoke test of hiveserver2 passed"
+  exit 0
+else
+  echo "Smoke test of hiveserver2 wasnt passed"
+  echo $smokeout
+  exit 1
+fi

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/pigSmoke.sh b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/pigSmoke.sh
new file mode 100755
index 0000000..2e90ac0
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/pigSmoke.sh
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+A = load 'passwd' using PigStorage(':');
+B = foreach A generate \$0 as id;
+store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/removeMysqlUser.sh
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/removeMysqlUser.sh b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/removeMysqlUser.sh
new file mode 100755
index 0000000..7b6d331
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/removeMysqlUser.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+mysqldservice=$1
+mysqldbuser=$2
+userhost=$3
+myhostname=$(hostname -f)
+sudo_prefix = "/var/lib/ambari-agent/ambari-sudo.sh -H -E"
+
+$sudo_prefix service $mysqldservice start
+echo "Removing user $mysqldbuser@$userhost"
+/var/lib/ambari-agent/ambari-sudo.sh su mysql -s /bin/bash - -c "mysql -u root -e \"DROP USER '$mysqldbuser'@'$userhost';\""
+/var/lib/ambari-agent/ambari-sudo.sh su mysql -s /bin/bash - -c "mysql -u root -e \"flush privileges;\""
+$sudo_prefix service $mysqldservice stop

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/startMetastore.sh
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/startMetastore.sh b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/startMetastore.sh
new file mode 100755
index 0000000..86541f0
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/startMetastore.sh
@@ -0,0 +1,25 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+HIVE_BIN=${HIVE_BIN:-"hive"}
+
+HIVE_CONF_DIR=$4 $HIVE_BIN --service metastore -hiveconf hive.log.file=hivemetastore.log -hiveconf hive.log.dir=$5 > $1 2> $2 &
+echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/templetonSmoke.sh
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/templetonSmoke.sh b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/templetonSmoke.sh
new file mode 100755
index 0000000..dd61631
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/files/templetonSmoke.sh
@@ -0,0 +1,95 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export ttonhost=$1
+export smoke_test_user=$2
+export templeton_port=$3
+export ttonTestScript=$4
+export smoke_user_keytab=$5
+export security_enabled=$6
+export kinit_path_local=$7
+export smokeuser_principal=$8
+export tmp_dir=$9
+export ttonurl="http://${ttonhost}:${templeton_port}/templeton/v1"
+
+if [[ $security_enabled == "true" ]]; then
+  kinitcmd="${kinit_path_local}  -kt ${smoke_user_keytab} ${smokeuser_principal}; "
+else
+  kinitcmd=""
+fi
+
+export no_proxy=$ttonhost
+cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>'  $ttonurl/status 2>&1"
+retVal=`/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+
+# try again for 2.3 username requirement
+if [[ "$httpExitCode" == "500" ]] ; then
+  cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>'  $ttonurl/status?user.name=$smoke_test_user 2>&1"
+  retVal=`/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
+  httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+fi
+
+if [[ "$httpExitCode" -ne "200" ]] ; then
+  echo "Templeton Smoke Test (status cmd): Failed. : $retVal"
+  export TEMPLETON_EXIT_CODE=1
+  exit 1
+fi
+
+#try hcat ddl command
+/var/lib/ambari-agent/ambari-sudo.sh rm -f ${tmp_dir}/show_db.post.txt
+echo "user.name=${smoke_test_user}&exec=show databases;" > ${tmp_dir}/show_db.post.txt
+/var/lib/ambari-agent/ambari-sudo.sh chown ${smoke_test_user} ${tmp_dir}/show_db.post.txt
+cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>' -d  @${tmp_dir}/show_db.post.txt  $ttonurl/ddl 2>&1"
+retVal=`/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+
+if [[ "$httpExitCode" -ne "200" ]] ; then
+  echo "Templeton Smoke Test (ddl cmd): Failed. : $retVal"
+  export TEMPLETON_EXIT_CODE=1
+  exit  1
+fi
+
+# NOT SURE?? SUHAS
+if [[ $security_enabled == "true" ]]; then
+  echo "Templeton Pig Smoke Tests not run in secure mode"
+  exit 0
+fi
+
+#try pig query
+
+#create, copy post args file
+/var/lib/ambari-agent/ambari-sudo.sh rm -f ${tmp_dir}/pig_post.txt
+echo -n "user.name=${smoke_test_user}&file=/tmp/$ttonTestScript" > ${tmp_dir}/pig_post.txt
+/var/lib/ambari-agent/ambari-sudo.sh chown ${smoke_test_user} ${tmp_dir}/pig_post.txt
+
+#submit pig query
+cmd="curl --negotiate -u : -s -w 'http_code <%{http_code}>' -d  @${tmp_dir}/pig_post.txt  $ttonurl/pig 2>&1"
+retVal=`/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+if [[ "$httpExitCode" -ne "200" ]] ; then
+  echo "Templeton Smoke Test (pig cmd): Failed. : $retVal"
+  export TEMPLETON_EXIT_CODE=1
+  exit 1
+fi
+
+exit 0

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/__init__.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/__init__.py
new file mode 100755
index 0000000..5561e10
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hcat.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hcat.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hcat.py
new file mode 100755
index 0000000..5e2c709
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hcat.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+
+from resource_management import *
+import sys
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster, setup_atlas_hook
+from ambari_commons.constants import SERVICE
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def hcat():
+  import params
+
+  XmlConfig("hive-site.xml",
+            conf_dir = params.hive_conf_dir,
+            configurations = params.config['configurations']['hive-site'],
+            owner=params.hive_user,
+            configuration_attributes=params.config['configuration_attributes']['hive-site']
+  )
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def hcat():
+  import params
+
+  Directory(params.hive_conf_dir,
+            create_parents = True,
+            owner=params.hcat_user,
+            group=params.user_group,
+  )
+
+
+  Directory(params.hcat_conf_dir,
+            create_parents = True,
+            owner=params.hcat_user,
+            group=params.user_group,
+  )
+
+  Directory(params.hcat_pid_dir,
+            owner=params.webhcat_user,
+            create_parents = True
+  )
+
+  XmlConfig("hive-site.xml",
+            conf_dir=params.hive_client_conf_dir,
+            configurations=params.config['configurations']['hive-site'],
+            configuration_attributes=params.config['configuration_attributes']['hive-site'],
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0644)
+
+  File(format("{hcat_conf_dir}/hcat-env.sh"),
+       owner=params.hcat_user,
+       group=params.user_group,
+       content=InlineTemplate(params.hcat_env_sh_template)
+  )
+
+  # Generate atlas-application.properties.xml file
+  if has_atlas_in_cluster():
+    atlas_hook_filepath = os.path.join(params.hive_config_dir, params.atlas_hook_filename)
+    setup_atlas_hook(SERVICE.HIVE, params.hive_atlas_application_properties, atlas_hook_filepath, params.hive_user, params.user_group)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hcat_client.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hcat_client.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hcat_client.py
new file mode 100755
index 0000000..b37698e
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hcat_client.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from hcat import hcat
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import ClientComponentHasNoStatus
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.script.script import Script
+
+
+class HCatClient(Script):
+  def install(self, env):
+    import params
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hcat()
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HCatClientWindows(HCatClient):
+  pass
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HCatClientDefault(HCatClient):
+  def get_component_name(self):
+    # HCat client doesn't have a first-class entry in <stack-selector-tool>. Since clients always
+    # update after daemons, this ensures that the hcat directories are correct on hosts
+    # which do not include the WebHCat daemon
+    return "hive-webhcat"
+
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    """
+    Execute <stack-selector-tool> before reconfiguring this client to the new stack version.
+
+    :param env:
+    :param upgrade_type:
+    :return:
+    """
+    Logger.info("Executing Hive HCat Client Stack Upgrade pre-restart")
+
+    import params
+    env.set_params(params)
+
+    # this function should not execute if the stack version does not support rolling upgrade
+    if not (params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version)):
+      return
+
+    # HCat client doesn't have a first-class entry in <stack-selector-tool>. Since clients always
+    # update after daemons, this ensures that the hcat directories are correct on hosts
+    # which do not include the WebHCat daemon
+    stack_select.select("hive-webhcat", params.version)
+
+
+if __name__ == "__main__":
+  HCatClient().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/ba8d7f50/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hcat_service_check.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hcat_service_check.py b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hcat_service_check.py
new file mode 100755
index 0000000..07b4095
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/ODPi/1.0/services/HIVE/package/scripts/hcat_service_check.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+from resource_management import *
+from resource_management.libraries.functions import get_unique_id_and_date
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def hcat_service_check():
+  import params
+  smoke_cmd = os.path.join(params.stack_root, "Run-SmokeTests.cmd")
+  service = "HCatalog"
+  Execute(format("cmd /C {smoke_cmd} {service}"), user=params.hcat_user, logoutput=True)
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def hcat_service_check():
+    import params
+    unique = get_unique_id_and_date()
+    output_file = format("{hive_apps_whs_dir}/hcatsmoke{unique}")
+    test_cmd = format("fs -test -e {output_file}")
+
+    if params.security_enabled:
+      kinit_cmd = format(
+        "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; ")
+    else:
+      kinit_cmd = ""
+
+    File(format("{tmp_dir}/hcatSmoke.sh"),
+         content=StaticFile("hcatSmoke.sh"),
+         mode=0755
+    )
+
+    prepare_cmd = format("{kinit_cmd}env JAVA_HOME={java64_home} {tmp_dir}/hcatSmoke.sh hcatsmoke{unique} prepare {purge_tables}")
+
+    exec_path = params.execute_path
+    if params.version and params.stack_root:
+      upgrade_hive_bin = format("{stack_root}/{version}/hive/bin")
+      exec_path =  os.environ['PATH'] + os.pathsep + params.hadoop_bin_dir + os.pathsep + upgrade_hive_bin
+
+    Execute(prepare_cmd,
+            tries=3,
+            user=params.smokeuser,
+            try_sleep=5,
+            path=['/usr/sbin', '/usr/local/bin', '/bin', '/usr/bin', exec_path],
+            logoutput=True)
+
+    if params.security_enabled:
+      Execute (format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
+               user = params.hdfs_user,
+      )
+
+    ExecuteHadoop(test_cmd,
+                  user=params.hdfs_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  bin_dir=params.execute_path
+    )
+
+    cleanup_cmd = format("{kinit_cmd} {tmp_dir}/hcatSmoke.sh hcatsmoke{unique} cleanup {purge_tables}")
+
+    Execute(cleanup_cmd,
+            tries=3,
+            user=params.smokeuser,
+            try_sleep=5,
+            path=['/usr/sbin', '/usr/local/bin', '/bin', '/usr/bin', exec_path],
+            logoutput=True)