You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by al...@apache.org on 2016/04/13 23:46:15 UTC

ambari git commit: AMBARI-15850. Add support for Rolling and Express Upgrade for Hive Server Interactive and LLAP (alejandro)

Repository: ambari
Updated Branches:
  refs/heads/trunk c7ba19a33 -> f67855443


AMBARI-15850. Add support for Rolling and Express Upgrade for Hive Server Interactive and LLAP (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f6785544
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f6785544
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f6785544

Branch: refs/heads/trunk
Commit: f678554439a60bcf9a5b5894ccd276507a2c2f8d
Parents: c7ba19a
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Tue Apr 12 17:19:33 2016 -0700
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Wed Apr 13 14:43:58 2016 -0700

----------------------------------------------------------------------
 .../libraries/functions/copy_tarball.py         | 85 ++++++++++----------
 .../package/scripts/hive_interactive.py         |  7 +-
 .../package/scripts/hive_server_interactive.py  | 83 ++++++++++++-------
 .../0.12.0.2.0/package/scripts/params_linux.py  |  2 +-
 .../0.4.0.2.1/package/scripts/pre_upgrade.py    |  2 +-
 .../main/resources/scripts/Ambaripreupload.py   |  6 ++
 .../HDP/2.0.6/configuration/cluster-env.xml     | 11 ---
 .../HDP/2.0.6/properties/tarball_map.json       | 42 ----------
 .../HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml | 27 ++++---
 .../stacks/HDP/2.5/upgrades/upgrade-2.5.xml     | 22 +++--
 10 files changed, 143 insertions(+), 144 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f6785544/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py b/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
index 2ba12b6..38e4fb4 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
@@ -24,7 +24,6 @@ import os
 import uuid
 import tempfile
 import re
-import json
 
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
@@ -37,7 +36,12 @@ STACK_NAME_PATTERN = "{{ stack_name }}"
 STACK_ROOT_PATTERN = "{{ stack_root }}"
 STACK_VERSION_PATTERN = "{{ stack_version }}"
 
-_DEFAULT_TARBALL_MAP = {
+# TODO, in the future, each stack can define its own mapping of tarballs
+# inside the stack definition directory in some sort of xml file.
+# PLEASE DO NOT put this in cluster-env since it becomes much harder to change,
+# especially since it is an attribute of a stack and becomes
+# complicated to change during a Rolling/Express upgrade.
+TARBALL_MAP = {
   "slider": ("{0}/{1}/slider/lib/slider.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
              "/{0}/apps/{1}/slider/slider.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
   "tez": ("{0}/{1}/tez/lib/tez.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
@@ -60,54 +64,43 @@ _DEFAULT_TARBALL_MAP = {
             "/{0}/apps/{1}/spark/spark-{0}-assembly.jar".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN))
 }
 
-def _get_tarball_map():
+
+def _get_tarball_paths(name, use_upgrading_version_during_upgrade=True, custom_source_file=None, custom_dest_file=None):
   """
-  Get the stack-specific tarball source and destination mappings
-  :return: tarball_map
+  For a given tarball name, get the source and destination paths to use.
+  :param name: Tarball name
+  :param use_upgrading_version_during_upgrade:
+  :param custom_source_file: If specified, use this source path instead of the default one from the map.
+  :param custom_dest_file: If specified, use this destination path instead of the default one from the map.
+  :return: A tuple of (success status, source path, destination path)
   """
-  tarball_map_config = default("/configurations/cluster-env/tarball_map", None)
-
-  tarball_map = _DEFAULT_TARBALL_MAP
-  if tarball_map_config:
-    tarball_map = json.loads(tarball_map_config)
-
-  return tarball_map
-
-def _get_tarball_paths(name, use_upgrading_version_during_uprade=True, custom_source_file=None, custom_dest_file=None):
-
   stack_name = Script.get_stack_name()
+
   if not stack_name:
-    Logger.error("Cannot copy {0} tarball to HDFS because stack name could be be determined.".format(
-            str(name)))
+    Logger.error("Cannot copy {0} tarball to HDFS because stack name could not be determined.".format(str(name)))
     return (False, None, None)
-  stack_version = _get_current_version(use_upgrading_version_during_uprade)
+
+  stack_version = _get_current_version(use_upgrading_version_during_upgrade)
   if not stack_version:
-    Logger.error("Cannot copy {0} tarball to HDFS because stack version could be be determined.".format(
-            str(name)))
+    Logger.error("Cannot copy {0} tarball to HDFS because stack version could be be determined.".format(str(name)))
     return (False, None, None)
 
   stack_root = Script.get_stack_root()
   if not stack_root:
-    Logger.error("Cannot copy {0} tarball to HDFS because stack root could be be determined.".format(
-          str(name)))
+    Logger.error("Cannot copy {0} tarball to HDFS because stack root could be be determined.".format(str(name)))
     return (False, None, None)
 
-  tarball_map = _get_tarball_map()
-  if not tarball_map:
-    Logger.error("Cannot copy {0} tarball to HDFS because tarball map could not be determined.".format(
-            str(name), str(stack_name)))
-
-  if name is None or name.lower() not in tarball_map:
-    Logger.error("Cannot copy tarball to HDFS because {0} is not supported in stack {1} for this operation.".format(
-            str(name), str(stack_name)))
+  if name is None or name.lower() not in TARBALL_MAP:
+    Logger.error("Cannot copy tarball to HDFS because {0} is not supported in stack {1} for this operation.".format(str(name), str(stack_name)))
     return (False, None, None)
-  (source_file, dest_file) = tarball_map[name.lower()]
+  (source_file, dest_file) = TARBALL_MAP[name.lower()]
 
   if custom_source_file is not None:
     source_file = custom_source_file
 
   if custom_dest_file is not None:
     dest_file = custom_dest_file
+
   source_file = source_file.replace(STACK_NAME_PATTERN, stack_name.lower())
   dest_file = dest_file.replace(STACK_NAME_PATTERN, stack_name.lower())
 
@@ -119,13 +112,19 @@ def _get_tarball_paths(name, use_upgrading_version_during_uprade=True, custom_so
 
   return (True, source_file, dest_file)
 
-def _get_current_version(use_upgrading_version_during_uprade=True):
+
+def _get_current_version(use_upgrading_version_during_upgrade=True):
+  """
+  Get the effective version to use to copy the tarballs to.
+  :param use_upgrading_version_during_upgrade: True, except when the RU/EU hasn't started yet.
+  :return: Version, or False if an error occurred.
+  """
   upgrade_direction = default("/commandParams/upgrade_direction", None)
   is_stack_upgrade = upgrade_direction is not None
   current_version = default("/hostLevelParams/current_version", None)
   Logger.info("Default version is {0}".format(current_version))
   if is_stack_upgrade:
-    if use_upgrading_version_during_uprade:
+    if use_upgrading_version_during_upgrade:
       # This is the version going to. In the case of a downgrade, it is the lower version.
       current_version = default("/commandParams/version", None)
       Logger.info("Because this is a Stack Upgrade, will use version {0}".format(current_version))
@@ -147,6 +146,7 @@ def _get_current_version(use_upgrading_version_during_uprade=True):
 
   return current_version
 
+
 def _get_single_version_from_stack_select():
   """
   Call "<stack-selector> versions" and return the version string if only one version is available.
@@ -187,8 +187,9 @@ def _get_single_version_from_stack_select():
 
   return stack_version
 
+
 def copy_to_hdfs(name, user_group, owner, file_mode=0444, custom_source_file=None, custom_dest_file=None, force_execute=False,
-                 use_upgrading_version_during_uprade=True, replace_existing_files=False, host_sys_prepped=False):
+                 use_upgrading_version_during_upgrade=True, replace_existing_files=False, host_sys_prepped=False):
   """
   :param name: Tarball name, e.g., tez, hive, pig, sqoop.
   :param user_group: Group to own the directory.
@@ -197,29 +198,29 @@ def copy_to_hdfs(name, user_group, owner, file_mode=0444, custom_source_file=Non
   :param custom_source_file: Override the source file path
   :param custom_dest_file: Override the destination file path
   :param force_execute: If true, will execute the HDFS commands immediately, otherwise, will defer to the calling function.
-  :param use_upgrading_version_during_uprade: If true, will use the version going to during upgrade. Otherwise, use the CURRENT (source) version.
+  :param use_upgrading_version_during_upgrade: If true, will use the version going to during upgrade. Otherwise, use the CURRENT (source) version.
   :param host_sys_prepped: If true, tarballs will not be copied as the cluster deployment uses prepped VMs.
   :return: Will return True if successful, otherwise, False.
   """
   import params
+
   Logger.info("Called copy_to_hdfs tarball: {0}".format(name))
-  (success, source_file, dest_file) = _get_tarball_paths(
-          name, use_upgrading_version_during_uprade, custom_source_file, custom_dest_file)
+  (success, source_file, dest_file) = _get_tarball_paths(name, use_upgrading_version_during_upgrade,
+                                                         custom_source_file, custom_dest_file)
 
   if not success:
+    Logger.error("Could not copy tarball {0} due to a missing or incorrect parameter.".format(str(name)))
     return False
 
   if host_sys_prepped:
-    Logger.info("Skipping copying {0} to {1} for {2} as its a sys_prepped host.".format(
-            str(source_file), str(dest_file), str(name)))
+    Logger.warning("Skipping copying {0} to {1} for {2} as its a sys_prepped host.".format(str(source_file), str(dest_file), str(name)))
     return True
 
   Logger.info("Source file: {0} , Dest file in HDFS: {1}".format(source_file, dest_file))
 
   if not os.path.exists(source_file):
-    Logger.warning("WARNING. Cannot copy {0} tarball because file does not exist: {1} . "
-                   "It is possible that this component is not installed on this host.".format(
-            str(name), str(source_file)))
+    Logger.error("WARNING. Cannot copy {0} tarball because file does not exist: {1} . "
+                   "It is possible that this component is not installed on this host.".format(str(name), str(source_file)))
     return False
 
   # Because CopyFromLocal does not guarantee synchronization, it's possible for two processes to first attempt to

http://git-wip-us.apache.org/repos/asf/ambari/blob/f6785544/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_interactive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_interactive.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_interactive.py
index 188f199..9d05d37 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_interactive.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_interactive.py
@@ -56,7 +56,7 @@ def hive_interactive(name=None):
   import params
 
   # Copy Tarballs in HDFS.
-  copy_to_hdfs("tez_hive2",
+  resource_created = copy_to_hdfs("tez_hive2",
                params.user_group,
                params.hdfs_user,
                file_mode=params.tarballs_mode,
@@ -66,9 +66,10 @@ def hive_interactive(name=None):
                params.user_group,
                params.hdfs_user,
                file_mode=params.tarballs_mode,
-               host_sys_prepped=params.host_sys_prepped)
+               host_sys_prepped=params.host_sys_prepped) or resource_created
 
-  params.HdfsResource(None, action="execute")
+  if resource_created:
+    params.HdfsResource(None, action="execute")
 
   Directory(params.hive_interactive_etc_dir_prefix,
             mode=0755

http://git-wip-us.apache.org/repos/asf/ambari/blob/f6785544/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py
index d909f6d..9d011a0 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py
@@ -29,6 +29,14 @@ from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions.check_process_status import check_process_status
 from resource_management.core.resources.system import Execute
+
+# Imports needed for Rolling/Express Upgrade
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
+
 from resource_management.core import shell
 from resource_management.core.exceptions import Fail
 from resource_management.core.logger import Logger
@@ -44,6 +52,16 @@ from hive_server import HiveServerDefault
 
 
 class HiveServerInteractive(Script):
+  pass
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HiveServerInteractiveDefault(HiveServerInteractive):
+
+    def get_stack_to_component(self):
+      import params
+      return {params.stack_name: "hive-server2-hive2"}
+
     def install(self, env):
       import params
       self.install_packages(env)
@@ -53,23 +71,30 @@ class HiveServerInteractive(Script):
       env.set_params(params)
       hive_interactive(name='hiveserver2')
 
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class HiveServerWindows(HiveServerInteractive):
-    def start(self, env):
-      pass
+    def pre_upgrade_restart(self, env, upgrade_type=None):
+      Logger.info("Executing Hive Server Interactive Stack Upgrade pre-restart")
+      import params
+      env.set_params(params)
 
-    def stop(self, env):
-      pass
+      if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+        stack_select.select("hive-server2-hive2", params.version)
+        conf_select.select(params.stack_name, "hive2", params.version)
 
-    def status(self, env):
-      pass
+        # Copy hive.tar.gz and tez.tar.gz used by Hive Interactive to HDFS
+        resource_created = copy_to_hdfs(
+          "hive2",
+          params.user_group,
+          params.hdfs_user,
+          host_sys_prepped=params.host_sys_prepped)
 
+        resource_created = copy_to_hdfs(
+          "tez_hive2",
+          params.user_group,
+          params.hdfs_user,
+          host_sys_prepped=params.host_sys_prepped) or resource_created
 
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class HiveServerDefault(HiveServerInteractive):
-    def get_stack_to_component(self):
-      import params
-      return {params.stack_name: "hive-server2-hive2"}
+        if resource_created:
+          params.HdfsResource(None, action="execute")
 
     def start(self, env, upgrade_type=None):
       import params
@@ -86,14 +111,13 @@ class HiveServerDefault(HiveServerInteractive):
       # Start LLAP before Hive Server Interactive start.
       # TODO, why does LLAP have to be started before Hive Server Interactive???
       status = self._llap_start(env)
-      if status:
-        # TODO : test the workability of Ranger and Hive2 during upgrade
-        # setup_ranger_hive(upgrade_type=upgrade_type)
+      if not status:
+        raise Fail("Skipping start of Hive Server Interactive since could not start LLAP.")
+
+      # TODO : test the workability of Ranger and Hive2 during upgrade
+      # setup_ranger_hive(upgrade_type=upgrade_type)
+      hive_service_interactive('hiveserver2', action='start', upgrade_type=upgrade_type)
 
-        hive_service_interactive('hiveserver2', action='start', upgrade_type=upgrade_type)
-      else:
-        Logger.info("Skipping start of Hive Server Interactive due to LLAP start issue.")
-        raise Exception("Problem starting HiveServer2")
 
     def stop(self, env, upgrade_type=None):
       import params
@@ -119,10 +143,6 @@ class HiveServerDefault(HiveServerInteractive):
       check_process_status(pid_file)
       # TODO : Check the LLAP app status as well.
 
-    def pre_upgrade_restart(self, env, upgrade_type=None):
-      # TODO: Make sure, the tez_hive2 is upgraded, while writing the upgrade code.
-      pass
-
     def security_status(self, env):
       HiveServerDefault.security_status(env)
 
@@ -135,7 +155,7 @@ class HiveServerDefault(HiveServerInteractive):
       env.set_params(params)
 
       if params.security_enabled:
-        self.do_kinit();
+        self.do_kinit()
 
       self._llap_stop(env)
       self._llap_start(env)
@@ -162,7 +182,7 @@ class HiveServerDefault(HiveServerInteractive):
       if code == 0:
         Logger.info(format("Successfully removed slider app {SLIDER_APP_NAME}."))
       else:
-        message = format("Could not remove slider app {SLIDER_APP_NAME}.")
+        message = format("Could not remove slider app {SLIDER_APP_NAME}. Please retry this task.")
         if error is not None:
           message += " " + error
         raise Fail(message)
@@ -239,7 +259,7 @@ class HiveServerDefault(HiveServerInteractive):
     def setup_security(self):
       import params
 
-      self.do_kinit();
+      self.do_kinit()
 
       # Copy params.hive_llap_keytab_file to hdfs://<host>:<port>/user/<hive_user>/.slider/keytabs/<hive_user> , required by LLAP
       slider_keytab_install_cmd = format("slider install-keytab --keytab {params.hive_llap_keytab_file} --folder {params.hive_user} --overwrite")
@@ -251,5 +271,12 @@ class HiveServerDefault(HiveServerInteractive):
       hive_interactive_kinit_cmd = format("{kinit_path_local} -kt {hive_server2_keytab} {hive_principal}; ")
       Execute(hive_interactive_kinit_cmd, user=params.hive_user)
 
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HiveServerInteractiveWindows(HiveServerInteractive):
+
+  def status(self, env):
+    pass
+
 if __name__ == "__main__":
-    HiveServerInteractive().execute()
\ No newline at end of file
+  HiveServerInteractive().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/f6785544/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
index f878141..03fd40c 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
@@ -88,7 +88,7 @@ hive_interactive_bin = '/usr/lib/hive2/bin'
 hive_interactive_lib = '/usr/lib/hive2/lib/'
 hive_interactive_var_lib = '/var/lib/hive2'
 
-# These tar folders were used in previous stack versions
+# These tar folders were used in previous stack versions, e.g., HDP 2.1
 hadoop_streaming_jars = '/usr/lib/hadoop-mapreduce/hadoop-streaming-*.jar'
 pig_tar_file = format('/usr/share/{stack_name_uppercase}-webhcat/pig.tar.gz')
 hive_tar_file = format('/usr/share/{stack_name_uppercase}-webhcat/hive.tar.gz')

http://git-wip-us.apache.org/repos/asf/ambari/blob/f6785544/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/pre_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/pre_upgrade.py b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/pre_upgrade.py
index 9d5868b..04d8be1 100644
--- a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/pre_upgrade.py
+++ b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/pre_upgrade.py
@@ -47,7 +47,7 @@ class TezPreUpgrade(Script):
         "tez",
         params.user_group,
         params.hdfs_user,
-        use_upgrading_version_during_uprade=False,
+        use_upgrading_version_during_upgrade=False,
         host_sys_prepped=params.host_sys_prepped)
       if resource_created:
         params.HdfsResource(None, action="execute")

http://git-wip-us.apache.org/repos/asf/ambari/blob/f6785544/ambari-server/src/main/resources/scripts/Ambaripreupload.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/scripts/Ambaripreupload.py b/ambari-server/src/main/resources/scripts/Ambaripreupload.py
index aff33d1..941e762 100644
--- a/ambari-server/src/main/resources/scripts/Ambaripreupload.py
+++ b/ambari-server/src/main/resources/scripts/Ambaripreupload.py
@@ -336,9 +336,15 @@ with Environment() as env:
     )
 
   print "Copying tarballs..."
+  # TODO, these shouldn't hardcode the stack root or destination stack name.
   copy_tarballs_to_hdfs(format("/usr/hdp/{stack_version}/hadoop/mapreduce.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ stack_version_formatted }}/mapreduce/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
   copy_tarballs_to_hdfs(format("/usr/hdp/{stack_version}/tez/lib/tez.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ stack_version_formatted }}/tez/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
   copy_tarballs_to_hdfs(format("/usr/hdp/{stack_version}/hive/hive.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ stack_version_formatted }}/hive/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
+
+  # These 2 are needed by Hive Interactive
+  copy_tarballs_to_hdfs(format("/usr/hdp/{stack_version}/hive2/hive.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ stack_version_formatted }}/hive2/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
+  copy_tarballs_to_hdfs(format("/usr/hdp/{stack_version}/tez_hive2/lib/tez.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ stack_version_formatted }}/tez_hive2/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
+
   copy_tarballs_to_hdfs(format("/usr/hdp/{stack_version}/pig/pig.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ stack_version_formatted }}/pig/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
   copy_tarballs_to_hdfs(format("/usr/hdp/{stack_version}/hadoop-mapreduce/hadoop-streaming.jar"), hdfs_path_prefix+"/hdp/apps/{{ stack_version_formatted }}/mapreduce/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
   copy_tarballs_to_hdfs(format("/usr/hdp/{stack_version}/sqoop/sqoop.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ stack_version_formatted }}/sqoop/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f6785544/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
index 6be568b..4f70b5a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
@@ -151,17 +151,6 @@ gpgcheck=0</value>
             <empty-value-valid>true</empty-value-valid>
         </value-attributes>
     </property>
-    
-    <property>
-        <name>tarball_map</name>
-        <value></value>
-        <description>Tarball mappings</description>
-        <property-type>VALUE_FROM_PROPERTY_FILE</property-type>
-        <value-attributes>
-            <property-file-name>tarball_map.json</property-file-name>
-            <property-file-type>json</property-file-type>
-        </value-attributes>
-    </property>
 
     <property>
         <name>stack_tools</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/f6785544/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/tarball_map.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/tarball_map.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/tarball_map.json
deleted file mode 100644
index b70af13..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/tarball_map.json
+++ /dev/null
@@ -1,42 +0,0 @@
-{
-  "slider": [
-    "{{ stack_root }}/{{ stack_version }}/slider/lib/slider.tar.gz",
-    "/{{ stack_name }}/apps/{{ stack_version }}/slider/slider.tar.gz"
-  ],
-  "tez": [
-    "{{ stack_root }}/{{ stack_version }}/tez/lib/tez.tar.gz",
-    "/{{ stack_name }}/apps/{{ stack_version }}/tez/tez.tar.gz"
-  ],
-  "tez_hive2": [
-    "{{ stack_root }}/{{ stack_version }}/tez_hive2/lib/tez.tar.gz",
-    "/{{ stack_name }}/apps/{{ stack_version }}/tez_hive2/tez.tar.gz"
-  ],
-  "hive": [
-    "{{ stack_root }}/{{ stack_version }}/hive/hive.tar.gz",
-    "/{{ stack_name }}/apps/{{ stack_version }}/hive/hive.tar.gz"
-  ],
-  "hive2": [
-    "{{ stack_root }}/{{ stack_version }}/hive/hive.tar.gz",
-    "/{{ stack_name }}/apps/{{ stack_version }}/hive2/hive.tar.gz"
-  ],
-  "pig": [
-    "{{ stack_root }}/{{ stack_version }}/pig/pig.tar.gz",
-    "/{{ stack_name }}/apps/{{ stack_version }}/pig/pig.tar.gz"
-  ],
-  "hadoop_streaming": [
-    "{{ stack_root }}/{{ stack_version }}/hadoop-mapreduce/hadoop-streaming.jar",
-    "/{{ stack_name }}/apps/{{ stack_version }}/mapreduce/hadoop-streaming.jar"
-  ],
-  "sqoop": [
-    "{{ stack_root }}/{{ stack_version }}/sqoop/sqoop.tar.gz",
-    "/{{ stack_name }}/apps/{{ stack_version }}/sqoop/sqoop.tar.gz"
-  ],
-  "mapreduce": [
-    "{{ stack_root }}/{{ stack_version }}/hadoop/mapreduce.tar.gz",
-    "/{{ stack_name }}/apps/{{ stack_version }}/mapreduce/mapreduce.tar.gz"
-  ],
-  "spark": [
-    "{{ stack_root }}/{{ stack_version }}/spark/lib/spark-{{ stack_name }}-assembly.jar",
-    "/{{ stack_name }}/apps/{{ stack_version }}/spark/spark-{{ stack_name }}-assembly.jar"
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/f6785544/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml
index c05e33f..0e1b30c 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml
@@ -117,6 +117,7 @@
       <service name="HIVE">
         <component>WEBHCAT_SERVER</component>
         <component>HIVE_SERVER</component>
+        <component>HIVE_SERVER_INTERACTIVE</component>
         <component>HIVE_METASTORE</component>
       </service>
 
@@ -435,6 +436,16 @@
       </priority>
     </group>
 
+    <!-- Slider must be upgraded before higher-level apps that need to run apps on Slider, such as Hive. -->
+    <group xsi:type="restart" name="SLIDER" title="Slider">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <parallel-scheduler/>
+      <service name="SLIDER">
+        <component>SLIDER</component>
+      </service>
+    </group>
+
     <group xsi:type="restart" name="HIVE_MASTERS" title="Hive Masters">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -446,6 +457,7 @@
       <service name="HIVE">
         <component>HIVE_METASTORE</component>
         <component>HIVE_SERVER</component>
+        <component>HIVE_SERVER_INTERACTIVE</component>
         <component>WEBHCAT_SERVER</component>
       </service>
     </group>
@@ -566,15 +578,6 @@
       </execute-stage>
     </group>
 
-    <group xsi:type="restart" name="SLIDER" title="Slider">
-      <service-check>false</service-check>
-      <skippable>true</skippable>
-      <parallel-scheduler/>
-      <service name="SLIDER">
-        <component>SLIDER</component>
-      </service>
-    </group>
-
     <group xsi:type="restart" name="FLUME" title="Flume">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -876,6 +879,12 @@
         </upgrade>
       </component>
 
+      <component name="HIVE_SERVER_INTERACTIVE">
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+      </component>
+
       <component name="WEBHCAT_SERVER">
         <upgrade>
           <task xsi:type="restart-task"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/f6785544/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.5.xml
index 2acffda..7a6e302 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.5.xml
@@ -216,12 +216,21 @@
       </exclude>
     </group>
 
+    <!-- Slider must be upgraded before higher-level apps that need to run apps on Slider, such as Hive. -->
+    <group name="SLIDER" title="Slider">
+      <skippable>true</skippable>
+      <service name="SLIDER">
+        <component>SLIDER</component>
+      </service>
+    </group>
+
     <group name="HIVE" title="Hive">
       <skippable>true</skippable>
       <supports-auto-skip-failure>false</supports-auto-skip-failure>
       <service name="HIVE">
         <component>HIVE_METASTORE</component>
         <component>HIVE_SERVER</component>
+        <component>HIVE_SERVER_INTERACTIVE</component>
         <component>WEBHCAT_SERVER</component>
       </service>
     </group>
@@ -367,13 +376,6 @@
       </service>
     </group>
 
-    <group name="SLIDER" title="Slider">
-      <skippable>true</skippable>
-      <service name="SLIDER">
-        <component>SLIDER</component>
-      </service>
-    </group>
-
     <group name="FLUME" title="Flume">
       <skippable>true</skippable>
       <service name="FLUME">
@@ -699,6 +701,12 @@
         </upgrade>
       </component>
 
+      <component name="HIVE_SERVER_INTERACTIVE">
+        <upgrade>
+          <task xsi:type="restart-task" />
+        </upgrade>
+      </component>
+
       <component name="WEBHCAT_SERVER">
         <upgrade>
           <task xsi:type="restart-task" />