You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jl...@apache.org on 2017/09/26 05:20:03 UTC

[12/50] [abbrv] ambari git commit: AMBARI-22011. Could not copy tez tarball to HDFS during prepare upgrade (EU) (ncole)

AMBARI-22011. Could not copy tez tarball to HDFS during prepare upgrade (EU) (ncole)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/001c77f5
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/001c77f5
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/001c77f5

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 001c77f5773e3ca778ee6ff78ba8c616f0e3007c
Parents: 3e5f60a
Author: Nate Cole <nc...@hortonworks.com>
Authored: Thu Sep 21 08:45:08 2017 -0400
Committer: Nate Cole <nc...@hortonworks.com>
Committed: Thu Sep 21 09:56:46 2017 -0400

----------------------------------------------------------------------
 .../libraries/functions/copy_tarball.py         | 103 ++++++++++++++-----
 .../libraries/functions/copy_tarball.py.rej     |  78 ++++++++++++++
 .../OOZIE/4.0.0.2.0/package/scripts/oozie.py    |   2 +-
 3 files changed, 157 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/001c77f5/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py b/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
index 61b03ca..219430a 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
@@ -41,26 +41,65 @@ STACK_VERSION_PATTERN = "{{ stack_version }}"
 # especially since it is an attribute of a stack and becomes
 # complicated to change during a Rolling/Express upgrade.
 TARBALL_MAP = {
-  "slider": ("{0}/{1}/slider/lib/slider.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
-             "/{0}/apps/{1}/slider/slider.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
-  "tez": ("{0}/{1}/tez/lib/tez.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
-          "/{0}/apps/{1}/tez/tez.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
-  "tez_hive2": ("{0}/{1}/tez_hive2/lib/tez.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
-          "/{0}/apps/{1}/tez_hive2/tez.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
-  "hive": ("{0}/{1}/hive/hive.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
-           "/{0}/apps/{1}/hive/hive.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
-  "pig": ("{0}/{1}/pig/pig.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
-          "/{0}/apps/{1}/pig/pig.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
-  "hadoop_streaming": ("{0}/{1}/hadoop-mapreduce/hadoop-streaming.jar".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
-                       "/{0}/apps/{1}/mapreduce/hadoop-streaming.jar".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
-  "sqoop": ("{0}/{1}/sqoop/sqoop.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
-            "/{0}/apps/{1}/sqoop/sqoop.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
-  "mapreduce": ("{0}/{1}/hadoop/mapreduce.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
+  "slider": {
+    "dirs": ("{0}/{1}/slider/lib/slider.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
+              "/{0}/apps/{1}/slider/slider.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
+    "service": "SLIDER"
+  },
+
+  "tez": {
+    "dirs": ("{0}/{1}/tez/lib/tez.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
+           "/{0}/apps/{1}/tez/tez.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
+    "service": "TEZ"
+  },
+
+  "tez_hive2": {
+    "dirs": ("{0}/{1}/tez_hive2/lib/tez.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
+           "/{0}/apps/{1}/tez_hive2/tez.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
+    "service": "HIVE"
+  },
+
+  "hive": {
+    "dirs": ("{0}/{1}/hive/hive.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
+            "/{0}/apps/{1}/hive/hive.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
+    "service": "HIVE"
+  },
+
+  "pig": {
+    "dirs": ("{0}/{1}/pig/pig.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
+           "/{0}/apps/{1}/pig/pig.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
+    "service": "PIG"
+  },
+
+  "hadoop_streaming": {
+    "dirs": ("{0}/{1}/hadoop-mapreduce/hadoop-streaming.jar".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
+                        "/{0}/apps/{1}/mapreduce/hadoop-streaming.jar".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
+    "service": "MAPREDUCE2"
+  },
+
+  "sqoop": {
+    "dirs": ("{0}/{1}/sqoop/sqoop.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
+             "/{0}/apps/{1}/sqoop/sqoop.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
+    "service": "SQOOP"
+  },
+
+  "mapreduce": {
+    "dirs": ("{0}/{1}/hadoop/mapreduce.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
                 "/{0}/apps/{1}/mapreduce/mapreduce.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
-  "spark": ("{0}/{1}/spark/lib/spark-{2}-assembly.jar".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN, STACK_NAME_PATTERN),
-            "/{0}/apps/{1}/spark/spark-{0}-assembly.jar".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
-  "spark2": ("/tmp/spark2/spark2-{0}-yarn-archive.tar.gz".format(STACK_NAME_PATTERN),
-             "/{0}/apps/{1}/spark2/spark2-{0}-yarn-archive.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN))
+    "service": "MAPREDUCE2"
+  },
+
+  "spark": {
+    "dirs": ("{0}/{1}/spark/lib/spark-{2}-assembly.jar".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN, STACK_NAME_PATTERN),
+             "/{0}/apps/{1}/spark/spark-{0}-assembly.jar".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
+    "service": "SPARK"
+  },
+
+  "spark2": {
+    "dirs": ("/tmp/spark2/spark2-{0}-yarn-archive.tar.gz".format(STACK_NAME_PATTERN),
+             "/{0}/apps/{1}/spark2/spark2-{0}-yarn-archive.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
+    "service": "SPARK2"
+  }
 }
 
 SERVICE_TO_CONFIG_MAP = {
@@ -101,7 +140,13 @@ def get_tarball_paths(name, use_upgrading_version_during_upgrade=True, custom_so
     Logger.error("Cannot copy {0} tarball to HDFS because stack name could not be determined.".format(str(name)))
     return (False, None, None)
 
-  stack_version = get_current_version(use_upgrading_version_during_upgrade)
+  if name is None or name.lower() not in TARBALL_MAP:
+    Logger.error("Cannot copy tarball to HDFS because {0} is not supported in stack {1} for this operation.".format(str(name), str(stack_name)))
+    return (False, None, None)
+
+  service = TARBALL_MAP[name.lower()]['service']
+
+  stack_version = get_current_version(service=service, use_upgrading_version_during_upgrade=use_upgrading_version_during_upgrade)
   if not stack_version:
     Logger.error("Cannot copy {0} tarball to HDFS because stack version could be be determined.".format(str(name)))
     return (False, None, None)
@@ -111,10 +156,7 @@ def get_tarball_paths(name, use_upgrading_version_during_upgrade=True, custom_so
     Logger.error("Cannot copy {0} tarball to HDFS because stack root could be be determined.".format(str(name)))
     return (False, None, None)
 
-  if name is None or name.lower() not in TARBALL_MAP:
-    Logger.error("Cannot copy tarball to HDFS because {0} is not supported in stack {1} for this operation.".format(str(name), str(stack_name)))
-    return (False, None, None)
-  (source_file, dest_file) = TARBALL_MAP[name.lower()]
+  (source_file, dest_file) = TARBALL_MAP[name.lower()]['dirs']
 
   if custom_source_file is not None:
     source_file = custom_source_file
@@ -134,14 +176,22 @@ def get_tarball_paths(name, use_upgrading_version_during_upgrade=True, custom_so
   return (True, source_file, dest_file)
 
 
-def get_current_version(use_upgrading_version_during_upgrade=True):
+def get_current_version(service=None, use_upgrading_version_during_upgrade=True):
   """
   Get the effective version to use to copy the tarballs to.
+  :param service: the service name when checking for an upgrade.  made optional for unknown \
+    code bases that may be using this function
   :param use_upgrading_version_during_upgrade: True, except when the RU/EU hasn't started yet.
   :return: Version, or False if an error occurred.
   """
+
+  from resource_management.libraries.functions import upgrade_summary
+
   # get the version for this command
   version = stack_features.get_stack_feature_version(Script.get_config())
+  if service is not None:
+    version = upgrade_summary.get_target_version(service_name=service, default_version=version)
+
 
   # if there is no upgrade, then use the command's version
   if not Script.in_stack_upgrade() or use_upgrading_version_during_upgrade:
@@ -152,6 +202,9 @@ def get_current_version(use_upgrading_version_during_upgrade=True):
 
   # we're in an upgrade and we need to use an older version
   current_version = stack_select.get_role_component_current_stack_version()
+  if service is not None:
+    current_version = upgrade_summary.get_source_version(service_name=service, default_version=current_version)
+
   if current_version is None:
     Logger.warning("Unable to determine the current version of the component for this command; unable to copy the tarball")
     return False

http://git-wip-us.apache.org/repos/asf/ambari/blob/001c77f5/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py.rej
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py.rej b/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py.rej
new file mode 100644
index 0000000..b2c11fc
--- /dev/null
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py.rej
@@ -0,0 +1,78 @@
+diff a/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py b/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py	(rejected hunks)
+@@ -41,26 +41,65 @@ STACK_VERSION_PATTERN = "{{ stack_version }}"
+ # especially since it is an attribute of a stack and becomes
+ # complicated to change during a Rolling/Express upgrade.
+ TARBALL_MAP = {
+-  "slider": ("{0}/{1}/slider/lib/slider.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
++  "slider": {
++    "dirs": ("{0}/{1}/slider/lib/slider.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
+              "/{0}/apps/{1}/slider/slider.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
+-  "tez": ("{0}/{1}/tez/lib/tez.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
++    "service": "SLIDER"
++  },
++
++  "tez": {
++    "dirs": ("{0}/{1}/tez/lib/tez.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
+           "/{0}/apps/{1}/tez/tez.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
+-  "tez_hive2": ("{0}/{1}/tez_hive2/lib/tez.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
++    "service": "TEZ"
++  },
++
++  "tez_hive2": {
++    "dirs": ("{0}/{1}/tez_hive2/lib/tez.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
+           "/{0}/apps/{1}/tez_hive2/tez.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
+-  "hive": ("{0}/{1}/hive/hive.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
++    "service": "HIVE"
++  },
++
++  "hive": {
++    "dirs": ("{0}/{1}/hive/hive.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
+            "/{0}/apps/{1}/hive/hive.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
+-  "pig": ("{0}/{1}/pig/pig.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
++    "service": "HIVE"
++  },
++
++  "pig": {
++    "dirs": ("{0}/{1}/pig/pig.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
+           "/{0}/apps/{1}/pig/pig.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
+-  "hadoop_streaming": ("{0}/{1}/hadoop-mapreduce/hadoop-streaming.jar".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
++    "service": "PIG"
++  },
++
++  "hadoop_streaming": {
++    "dirs": ("{0}/{1}/hadoop-mapreduce/hadoop-streaming.jar".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
+                        "/{0}/apps/{1}/mapreduce/hadoop-streaming.jar".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
+-  "sqoop": ("{0}/{1}/sqoop/sqoop.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
++    "service": "MAPREDUCE2"
++  },
++
++  "sqoop": {
++    "dirs": ("{0}/{1}/sqoop/sqoop.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
+             "/{0}/apps/{1}/sqoop/sqoop.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
+-  "mapreduce": ("{0}/{1}/hadoop/mapreduce.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
++    "service": "SQOOP"
++  },
++
++  "mapreduce": {
++    "dirs": ("{0}/{1}/hadoop/mapreduce.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
+                 "/{0}/apps/{1}/mapreduce/mapreduce.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
+-  "spark": ("{0}/{1}/spark/lib/spark-{2}-assembly.jar".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN, STACK_NAME_PATTERN),
++    "service": "MAPREDUCE2"
++  },
++
++  "spark": {
++    "dirs": ("{0}/{1}/spark/lib/spark-{2}-assembly.jar".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN, STACK_NAME_PATTERN),
+             "/{0}/apps/{1}/spark/spark-{0}-assembly.jar".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
+-  "spark2": ("/tmp/spark2/spark2-{0}-yarn-archive.tar.gz".format(STACK_NAME_PATTERN),
+-             "/{0}/apps/{1}/spark2/spark2-{0}-yarn-archive.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN))
++    "service": "SPARK"
++  },
++
++  "spark2": {
++    "dirs": ("/tmp/spark2/spark2-{0}-yarn-archive.tar.gz".format(STACK_NAME_PATTERN),
++             "/{0}/apps/{1}/spark2/spark2-{0}-yarn-archive.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
++    "service": "SPARK2"
++  }
+ }
+ 
+ 

http://git-wip-us.apache.org/repos/asf/ambari/blob/001c77f5/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
index 0796ad1..64f9d54 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
@@ -424,7 +424,7 @@ def copy_atlas_hive_hook_to_dfs_share_lib(upgrade_type=None, upgrade_direction=N
                  "and performing a Downgrade.")
     return
 
-  effective_version = get_current_version()
+  effective_version = get_current_version(service="ATLAS")
   atlas_hive_hook_dir = format("{stack_root}/{effective_version}/atlas/hook/hive/")
   if not os.path.exists(atlas_hive_hook_dir):
     Logger.error(format("ERROR. Atlas is installed in cluster but this Oozie server doesn't "