You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ao...@apache.org on 2015/12/09 10:35:35 UTC
[1/2] ambari git commit: AMBARI-14285. Modify Ambaripreupload.py to
upload tarballs lib dirs etc. during upgrade (aonishuk)
Repository: ambari
Updated Branches:
refs/heads/branch-2.2 6054d79b9 -> 74ca65a7a
refs/heads/trunk cfec7af6c -> 9f2a72aef
AMBARI-14285. Modify Ambaripreupload.py to upload tarballs lib dirs etc. during upgrade (aonishuk)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9f2a72ae
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9f2a72ae
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9f2a72ae
Branch: refs/heads/trunk
Commit: 9f2a72aefb8a33d59c0d0077794df3595427d348
Parents: cfec7af
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Wed Dec 9 11:34:45 2015 +0200
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Wed Dec 9 11:34:45 2015 +0200
----------------------------------------------------------------------
.../main/resources/scripts/Ambaripreupload.py | 362 ++++++++++---------
1 file changed, 191 insertions(+), 171 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/9f2a72ae/ambari-server/src/main/resources/scripts/Ambaripreupload.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/scripts/Ambaripreupload.py b/ambari-server/src/main/resources/scripts/Ambaripreupload.py
index fd72f24..021102e 100644
--- a/ambari-server/src/main/resources/scripts/Ambaripreupload.py
+++ b/ambari-server/src/main/resources/scripts/Ambaripreupload.py
@@ -19,6 +19,7 @@ limitations under the License.
"""
import os
import sys
+from optparse import OptionParser
os.environ["PATH"] += os.pathsep + "/var/lib/ambari-agent"
sys.path.append("/usr/lib/python2.6/site-packages")
@@ -54,177 +55,196 @@ E.g., 998.2.2.1.0-998
Please note that "-${build_number}" is optional.
"""
-def getPropertyValueFromConfigXMLFile(xmlfile, name, defaultValue=None):
- xmldoc = minidom.parse(xmlfile)
- propNodes = [node.parentNode for node in xmldoc.getElementsByTagName("name") if node.childNodes[0].nodeValue == name]
- if len(propNodes) > 0:
- for node in propNodes[-1].childNodes:
- if node.nodeName == "value":
- if len(node.childNodes) > 0:
- return node.childNodes[0].nodeValue
- else:
- return defaultValue
- return defaultValue
-
-def get_fs_root(fsdefaultName=None):
- fsdefaultName = "fake"
+with Environment() as env:
+ def get_hdp_version():
+ if not options.hdp_version:
+ # Ubuntu returns: "stdin: is not a tty", as subprocess output.
+ tmpfile = tempfile.NamedTemporaryFile()
+ out = None
+ with open(tmpfile.name, 'r+') as file:
+ get_hdp_version_cmd = '/usr/bin/hdp-select status %s > %s' % ('hadoop-mapreduce-historyserver', tmpfile.name)
+ code, stdoutdata = shell.call(get_hdp_version_cmd)
+ out = file.read()
+ pass
+ if code != 0 or out is None:
+ Logger.warning("Could not verify HDP version by calling '%s'. Return Code: %s, Output: %s." %
+ (get_hdp_version_cmd, str(code), str(out)))
+ return 1
+
+ matches = re.findall(r"([\d\.]+\-\d+)", out)
+ hdp_version = matches[0] if matches and len(matches) > 0 else None
+
+ if not hdp_version:
+ Logger.error("Could not parse HDP version from output of hdp-select: %s" % str(out))
+ return 1
+ else:
+ hdp_version = options.hdp_version
+
+ return hdp_version
+
+ parser = OptionParser()
+ parser.add_option("-v", "--hdp-version", dest="hdp_version", default="",
+ help="hdp-version used in path of tarballs")
+
+ (options, args) = parser.parse_args()
+
+ # See if hdfs path prefix is provided on the command line. If yes, use that value, if no
+ # use empty string as default.
+ hdfs_path_prefix = ""
+ if len(args) > 0:
+ hdfs_path_prefix = args[0]
+
+ hdp_version = get_hdp_version()
+
+ def getPropertyValueFromConfigXMLFile(xmlfile, name, defaultValue=None):
+ xmldoc = minidom.parse(xmlfile)
+ propNodes = [node.parentNode for node in xmldoc.getElementsByTagName("name") if node.childNodes[0].nodeValue == name]
+ if len(propNodes) > 0:
+ for node in propNodes[-1].childNodes:
+ if node.nodeName == "value":
+ if len(node.childNodes) > 0:
+ return node.childNodes[0].nodeValue
+ else:
+ return defaultValue
+ return defaultValue
+
+ def get_fs_root(fsdefaultName=None):
+ fsdefaultName = "fake"
+
+ while True:
+ fsdefaultName = getPropertyValueFromConfigXMLFile("/etc/hadoop/conf/core-site.xml", "fs.defaultFS")
+
+ if fsdefaultName and fsdefaultName.startswith("wasb://"):
+ break
+
+ print "Waiting to read appropriate value of fs.defaultFS from /etc/hadoop/conf/core-site.xml ..."
+ time.sleep(10)
+ pass
+
+ print "Returning fs.defaultFS -> " + fsdefaultName
+ return fsdefaultName
- while (not fsdefaultName.startswith("wasb://")):
- fsdefaultName = getPropertyValueFromConfigXMLFile("/etc/hadoop/conf/core-site.xml", "fs.defaultFS")
- if fsdefaultName is None:
- fsdefaultName = "fake"
- print "Waiting to read appropriate value of fs.defaultFS from /etc/hadoop/conf/core-site.xml ..."
- time.sleep(10)
- pass
-
- print "Returning fs.defaultFS -> " + fsdefaultName
- return fsdefaultName
-
-# These values must be the suffix of the properties in cluster-env.xml
-TAR_SOURCE_SUFFIX = "_tar_source"
-TAR_DESTINATION_FOLDER_SUFFIX = "_tar_destination_folder"
-
-class params:
- hdfs_user = "hdfs"
- mapred_user ="mapred"
- hadoop_bin_dir="/usr/hdp/current/hadoop-client/bin"
- hadoop_conf_dir = "/etc/hadoop/conf"
- user_group = "hadoop"
- security_enabled = False
- oozie_user = "oozie"
- execute_path = "/usr/hdp/current/hadoop-client/bin"
- ambari_libs_dir = "/var/lib/ambari-agent/lib"
- hdfs_site = ConfigDictionary({'dfs.webhdfs.enabled':False,
- })
- fs_default = get_fs_root()
- oozie_env_sh_template = \
-'''
-#!/bin/bash
-
-export OOZIE_CONFIG=${OOZIE_CONFIG:-/usr/hdp/current/oozie/conf}
-export OOZIE_DATA=${OOZIE_DATA:-/var/lib/oozie/data}
-export OOZIE_LOG=${OOZIE_LOG:-/var/log/oozie}
-export CATALINA_BASE=${CATALINA_BASE:-/usr/hdp/current/oozie-server/oozie-server}
-export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie}
-export CATALINA_PID=${CATALINA_PID:-/var/run/oozie/oozie.pid}
-export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat
-'''
+ # These values must be the suffix of the properties in cluster-env.xml
+ TAR_SOURCE_SUFFIX = "_tar_source"
+ TAR_DESTINATION_FOLDER_SUFFIX = "_tar_destination_folder"
- HdfsResource = functools.partial(
- HdfsResource,
- user=hdfs_user,
- security_enabled = False,
- keytab = None,
- kinit_path_local = None,
- hadoop_bin_dir = hadoop_bin_dir,
- hadoop_conf_dir = hadoop_conf_dir,
- principal_name = None,
- hdfs_site = hdfs_site,
- default_fs = fs_default
- )
-
-def _copy_files(source_and_dest_pairs, file_owner, group_owner, kinit_if_needed):
- """
- :param source_and_dest_pairs: List of tuples (x, y), where x is the source file in the local file system,
- and y is the destination file path in HDFS
- :param file_owner: Owner to set for the file copied to HDFS (typically hdfs account)
- :param group_owner: Owning group to set for the file copied to HDFS (typically hadoop group)
- :param kinit_if_needed: kinit command if it is needed, otherwise an empty string
- :return: Returns 0 if at least one file was copied and no exceptions occurred, and 1 otherwise.
-
- Must kinit before calling this function.
- """
-
- for (source, destination) in source_and_dest_pairs:
- params.HdfsResource(destination,
- action="create_on_execute",
- type = 'file',
- mode=0444,
- owner=file_owner,
- group=group_owner,
- source=source,
- )
-
-
-def copy_tarballs_to_hdfs(source, dest, hdp_select_component_name, component_user, file_owner, group_owner):
- """
- :param tarball_prefix: Prefix of the tarball must be one of tez, hive, mr, pig
- :param hdp_select_component_name: Component name to get the status to determine the version
- :param component_user: User that will execute the Hadoop commands
- :param file_owner: Owner of the files copied to HDFS (typically hdfs account)
- :param group_owner: Group owner of the files copied to HDFS (typically hadoop group)
- :return: Returns 0 on success, 1 if no files were copied, and in some cases may raise an exception.
-
- In order to call this function, params.py must have all of the following,
- hdp_stack_version, kinit_path_local, security_enabled, hdfs_user, hdfs_principal_name, hdfs_user_keytab,
- hadoop_bin_dir, hadoop_conf_dir, and HdfsDirectory as a partial function.
- """
-
- component_tar_source_file, component_tar_destination_folder = source, dest
-
- if not os.path.exists(component_tar_source_file):
- Logger.warning("Could not find file: %s" % str(component_tar_source_file))
- return 1
-
- # Ubuntu returns: "stdin: is not a tty", as subprocess output.
- tmpfile = tempfile.NamedTemporaryFile()
- out = None
- with open(tmpfile.name, 'r+') as file:
- get_hdp_version_cmd = '/usr/bin/hdp-select status %s > %s' % (hdp_select_component_name, tmpfile.name)
- code, stdoutdata = shell.call(get_hdp_version_cmd)
- out = file.read()
- pass
- if code != 0 or out is None:
- Logger.warning("Could not verify HDP version by calling '%s'. Return Code: %s, Output: %s." %
- (get_hdp_version_cmd, str(code), str(out)))
- return 1
-
- matches = re.findall(r"([\d\.]+\-\d+)", out)
- hdp_version = matches[0] if matches and len(matches) > 0 else None
-
- if not hdp_version:
- Logger.error("Could not parse HDP version from output of hdp-select: %s" % str(out))
- return 1
-
- file_name = os.path.basename(component_tar_source_file)
- destination_file = os.path.join(component_tar_destination_folder, file_name)
- destination_file = destination_file.replace("{{ hdp_stack_version }}", hdp_version)
-
-
- kinit_if_needed = ""
- if params.security_enabled:
- kinit_if_needed = format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name};")
-
- if kinit_if_needed:
- Execute(kinit_if_needed,
- user=component_user,
- path='/bin'
+ class params:
+ hdfs_user = "hdfs"
+ mapred_user ="mapred"
+ hadoop_bin_dir="/usr/hdp/" + hdp_version + "/hadoop-client/bin"
+ hadoop_conf_dir = "/etc/hadoop/conf"
+ user_group = "hadoop"
+ security_enabled = False
+ oozie_user = "oozie"
+ execute_path = "/usr/hdp/" + hdp_version + "/hadoop-client/bin"
+ ambari_libs_dir = "/var/lib/ambari-agent/lib"
+ hdfs_site = ConfigDictionary({'dfs.webhdfs.enabled':False,
+ })
+ fs_default = get_fs_root()
+ oozie_env_sh_template = \
+ '''
+ #!/bin/bash
+
+ export OOZIE_CONFIG=${{OOZIE_CONFIG:-/usr/hdp/{0}/oozie/conf}}
+ export OOZIE_DATA=${{OOZIE_DATA:-/var/lib/oozie/data}}
+ export OOZIE_LOG=${{OOZIE_LOG:-/var/log/oozie}}
+ export CATALINA_BASE=${{CATALINA_BASE:-/usr/hdp/{0}/oozie/oozie-server}}
+ export CATALINA_TMPDIR=${{CATALINA_TMPDIR:-/var/tmp/oozie}}
+ export CATALINA_PID=${{CATALINA_PID:-/var/run/oozie/oozie.pid}}
+ export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat
+ '''.format(hdp_version)
+
+ HdfsResource = functools.partial(
+ HdfsResource,
+ user=hdfs_user,
+ security_enabled = False,
+ keytab = None,
+ kinit_path_local = None,
+ hadoop_bin_dir = hadoop_bin_dir,
+ hadoop_conf_dir = hadoop_conf_dir,
+ principal_name = None,
+ hdfs_site = hdfs_site,
+ default_fs = fs_default
)
-
- source_and_dest_pairs = [(component_tar_source_file, destination_file), ]
- return _copy_files(source_and_dest_pairs, file_owner, group_owner, kinit_if_needed)
+
+ def _copy_files(source_and_dest_pairs, file_owner, group_owner, kinit_if_needed):
+ """
+ :param source_and_dest_pairs: List of tuples (x, y), where x is the source file in the local file system,
+ and y is the destination file path in HDFS
+ :param file_owner: Owner to set for the file copied to HDFS (typically hdfs account)
+ :param group_owner: Owning group to set for the file copied to HDFS (typically hadoop group)
+ :param kinit_if_needed: kinit command if it is needed, otherwise an empty string
+ :return: Returns 0 if at least one file was copied and no exceptions occurred, and 1 otherwise.
+
+ Must kinit before calling this function.
+ """
+
+ for (source, destination) in source_and_dest_pairs:
+ params.HdfsResource(destination,
+ action="create_on_execute",
+ type = 'file',
+ mode=0444,
+ owner=file_owner,
+ group=group_owner,
+ source=source,
+ )
+
+
+ def copy_tarballs_to_hdfs(source, dest, hdp_select_component_name, component_user, file_owner, group_owner):
+ """
+ :param tarball_prefix: Prefix of the tarball must be one of tez, hive, mr, pig
+ :param hdp_select_component_name: Component name to get the status to determine the version
+ :param component_user: User that will execute the Hadoop commands
+ :param file_owner: Owner of the files copied to HDFS (typically hdfs account)
+ :param group_owner: Group owner of the files copied to HDFS (typically hadoop group)
+ :return: Returns 0 on success, 1 if no files were copied, and in some cases may raise an exception.
+
+ In order to call this function, params.py must have all of the following,
+ hdp_stack_version, kinit_path_local, security_enabled, hdfs_user, hdfs_principal_name, hdfs_user_keytab,
+ hadoop_bin_dir, hadoop_conf_dir, and HdfsDirectory as a partial function.
+ """
+
+ component_tar_source_file, component_tar_destination_folder = source, dest
+
+ if not os.path.exists(component_tar_source_file):
+ Logger.warning("Could not find file: %s" % str(component_tar_source_file))
+ return 1
+
+
+
+ file_name = os.path.basename(component_tar_source_file)
+ destination_file = os.path.join(component_tar_destination_folder, file_name)
+ destination_file = destination_file.replace("{{ hdp_stack_version }}", hdp_version)
+
+
+ kinit_if_needed = ""
+ if params.security_enabled:
+ kinit_if_needed = format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name};")
+
+ if kinit_if_needed:
+ Execute(kinit_if_needed,
+ user=component_user,
+ path='/bin'
+ )
+
+ source_and_dest_pairs = [(component_tar_source_file, destination_file), ]
+ return _copy_files(source_and_dest_pairs, file_owner, group_owner, kinit_if_needed)
-# See if hdfs path prefix is provided on the command line. If yes, use that value, if no
-# use empty string as default.
-hdfs_path_prefix = ""
-if len(sys.argv) == 2:
- hdfs_path_prefix = sys.argv[1]
-hadoop_conf_dir = params.hadoop_conf_dir
-
-oozie_libext_dir = "/usr/hdp/current/oozie-server/libext"
-oozie_home="/usr/hdp/current/oozie-server"
-oozie_setup_sh="/usr/hdp/current/oozie-server/bin/oozie-setup.sh"
-oozie_tmp_dir = "/var/tmp/oozie"
-configure_cmds = []
-configure_cmds.append(('tar','-xvf', oozie_home + '/oozie-sharelib.tar.gz','-C', oozie_home))
-configure_cmds.append(('cp', "/usr/share/HDP-oozie/ext-2.2.zip", "/usr/hdp/current/oozie-server/libext"))
-configure_cmds.append(('chown', 'oozie:hadoop', oozie_libext_dir + "/ext-2.2.zip"))
-
-no_op_test = "ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1"
-with Environment() as env:
env.set_params(params)
+ hadoop_conf_dir = params.hadoop_conf_dir
+
+ oozie_libext_dir = format("/usr/hdp/{hdp_version}/oozie/libext")
+ oozie_home=format("/usr/hdp/{hdp_version}/oozie")
+ oozie_setup_sh=format("/usr/hdp/{hdp_version}/oozie/bin/oozie-setup.sh")
+ oozie_tmp_dir = "/var/tmp/oozie"
+ configure_cmds = []
+ configure_cmds.append(('tar','-xvf', oozie_home + '/oozie-sharelib.tar.gz','-C', oozie_home))
+ configure_cmds.append(('cp', "/usr/share/HDP-oozie/ext-2.2.zip", format("/usr/hdp/{hdp_version}/oozie/libext")))
+ configure_cmds.append(('chown', 'oozie:hadoop', oozie_libext_dir + "/ext-2.2.zip"))
+
+ no_op_test = "ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1"
File("/etc/oozie/conf/oozie-env.sh",
owner=params.oozie_user,
@@ -248,7 +268,7 @@ with Environment() as env:
mode = 0644,
)
- oozie_shared_lib = format("/usr/hdp/current/oozie-server/share")
+ oozie_shared_lib = format("/usr/hdp/{hdp_version}/oozie/share")
oozie_user = 'oozie'
oozie_hdfs_user_dir = format("{hdfs_path_prefix}/user/{oozie_user}")
kinit_if_needed = ''
@@ -268,12 +288,12 @@ with Environment() as env:
)
print "Copying tarballs..."
- copy_tarballs_to_hdfs("/usr/hdp/current/hadoop-client/mapreduce.tar.gz", hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/mapreduce/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
- copy_tarballs_to_hdfs("/usr/hdp/current/tez-client/lib/tez.tar.gz", hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/tez/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
- copy_tarballs_to_hdfs("/usr/hdp/current/hive-client/hive.tar.gz", hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/hive/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
- copy_tarballs_to_hdfs("/usr/hdp/current/pig-client/pig.tar.gz", hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/pig/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
- copy_tarballs_to_hdfs("/usr/hdp/current/hadoop-mapreduce-client/hadoop-streaming.jar", hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/mapreduce/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
- copy_tarballs_to_hdfs("/usr/hdp/current/sqoop-client/sqoop.tar.gz", hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/sqoop/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
+ copy_tarballs_to_hdfs(format("/usr/hdp/{hdp_version}/hadoop/mapreduce.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/mapreduce/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
+ copy_tarballs_to_hdfs(format("/usr/hdp/{hdp_version}/tez/lib/tez.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/tez/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
+ copy_tarballs_to_hdfs(format("/usr/hdp/{hdp_version}/hive/hive.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/hive/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
+ copy_tarballs_to_hdfs(format("/usr/hdp/{hdp_version}/pig/pig.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/pig/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
+ copy_tarballs_to_hdfs(format("/usr/hdp/{hdp_version}/hadoop-mapreduce/hadoop-streaming.jar"), hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/mapreduce/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
+ copy_tarballs_to_hdfs(format("/usr/hdp/{hdp_version}/sqoop/sqoop.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/sqoop/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
# jar shouldn't be used before (read comment below)
[2/2] ambari git commit: AMBARI-14285. Modify Ambaripreupload.py to
upload tarballs lib dirs etc. during upgrade (aonishuk)
Posted by ao...@apache.org.
AMBARI-14285. Modify Ambaripreupload.py to upload tarballs lib dirs etc. during upgrade (aonishuk)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/74ca65a7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/74ca65a7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/74ca65a7
Branch: refs/heads/branch-2.2
Commit: 74ca65a7aaec437d3360946eebba6f3d57805a33
Parents: 6054d79
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Wed Dec 9 11:35:22 2015 +0200
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Wed Dec 9 11:35:22 2015 +0200
----------------------------------------------------------------------
.../main/resources/scripts/Ambaripreupload.py | 362 ++++++++++---------
1 file changed, 191 insertions(+), 171 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/74ca65a7/ambari-server/src/main/resources/scripts/Ambaripreupload.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/scripts/Ambaripreupload.py b/ambari-server/src/main/resources/scripts/Ambaripreupload.py
index fd72f24..021102e 100644
--- a/ambari-server/src/main/resources/scripts/Ambaripreupload.py
+++ b/ambari-server/src/main/resources/scripts/Ambaripreupload.py
@@ -19,6 +19,7 @@ limitations under the License.
"""
import os
import sys
+from optparse import OptionParser
os.environ["PATH"] += os.pathsep + "/var/lib/ambari-agent"
sys.path.append("/usr/lib/python2.6/site-packages")
@@ -54,177 +55,196 @@ E.g., 998.2.2.1.0-998
Please note that "-${build_number}" is optional.
"""
-def getPropertyValueFromConfigXMLFile(xmlfile, name, defaultValue=None):
- xmldoc = minidom.parse(xmlfile)
- propNodes = [node.parentNode for node in xmldoc.getElementsByTagName("name") if node.childNodes[0].nodeValue == name]
- if len(propNodes) > 0:
- for node in propNodes[-1].childNodes:
- if node.nodeName == "value":
- if len(node.childNodes) > 0:
- return node.childNodes[0].nodeValue
- else:
- return defaultValue
- return defaultValue
-
-def get_fs_root(fsdefaultName=None):
- fsdefaultName = "fake"
+with Environment() as env:
+ def get_hdp_version():
+ if not options.hdp_version:
+ # Ubuntu returns: "stdin: is not a tty", as subprocess output.
+ tmpfile = tempfile.NamedTemporaryFile()
+ out = None
+ with open(tmpfile.name, 'r+') as file:
+ get_hdp_version_cmd = '/usr/bin/hdp-select status %s > %s' % ('hadoop-mapreduce-historyserver', tmpfile.name)
+ code, stdoutdata = shell.call(get_hdp_version_cmd)
+ out = file.read()
+ pass
+ if code != 0 or out is None:
+ Logger.warning("Could not verify HDP version by calling '%s'. Return Code: %s, Output: %s." %
+ (get_hdp_version_cmd, str(code), str(out)))
+ return 1
+
+ matches = re.findall(r"([\d\.]+\-\d+)", out)
+ hdp_version = matches[0] if matches and len(matches) > 0 else None
+
+ if not hdp_version:
+ Logger.error("Could not parse HDP version from output of hdp-select: %s" % str(out))
+ return 1
+ else:
+ hdp_version = options.hdp_version
+
+ return hdp_version
+
+ parser = OptionParser()
+ parser.add_option("-v", "--hdp-version", dest="hdp_version", default="",
+ help="hdp-version used in path of tarballs")
+
+ (options, args) = parser.parse_args()
+
+ # See if hdfs path prefix is provided on the command line. If yes, use that value, if no
+ # use empty string as default.
+ hdfs_path_prefix = ""
+ if len(args) > 0:
+ hdfs_path_prefix = args[0]
+
+ hdp_version = get_hdp_version()
+
+ def getPropertyValueFromConfigXMLFile(xmlfile, name, defaultValue=None):
+ xmldoc = minidom.parse(xmlfile)
+ propNodes = [node.parentNode for node in xmldoc.getElementsByTagName("name") if node.childNodes[0].nodeValue == name]
+ if len(propNodes) > 0:
+ for node in propNodes[-1].childNodes:
+ if node.nodeName == "value":
+ if len(node.childNodes) > 0:
+ return node.childNodes[0].nodeValue
+ else:
+ return defaultValue
+ return defaultValue
+
+ def get_fs_root(fsdefaultName=None):
+ fsdefaultName = "fake"
+
+ while True:
+ fsdefaultName = getPropertyValueFromConfigXMLFile("/etc/hadoop/conf/core-site.xml", "fs.defaultFS")
+
+ if fsdefaultName and fsdefaultName.startswith("wasb://"):
+ break
+
+ print "Waiting to read appropriate value of fs.defaultFS from /etc/hadoop/conf/core-site.xml ..."
+ time.sleep(10)
+ pass
+
+ print "Returning fs.defaultFS -> " + fsdefaultName
+ return fsdefaultName
- while (not fsdefaultName.startswith("wasb://")):
- fsdefaultName = getPropertyValueFromConfigXMLFile("/etc/hadoop/conf/core-site.xml", "fs.defaultFS")
- if fsdefaultName is None:
- fsdefaultName = "fake"
- print "Waiting to read appropriate value of fs.defaultFS from /etc/hadoop/conf/core-site.xml ..."
- time.sleep(10)
- pass
-
- print "Returning fs.defaultFS -> " + fsdefaultName
- return fsdefaultName
-
-# These values must be the suffix of the properties in cluster-env.xml
-TAR_SOURCE_SUFFIX = "_tar_source"
-TAR_DESTINATION_FOLDER_SUFFIX = "_tar_destination_folder"
-
-class params:
- hdfs_user = "hdfs"
- mapred_user ="mapred"
- hadoop_bin_dir="/usr/hdp/current/hadoop-client/bin"
- hadoop_conf_dir = "/etc/hadoop/conf"
- user_group = "hadoop"
- security_enabled = False
- oozie_user = "oozie"
- execute_path = "/usr/hdp/current/hadoop-client/bin"
- ambari_libs_dir = "/var/lib/ambari-agent/lib"
- hdfs_site = ConfigDictionary({'dfs.webhdfs.enabled':False,
- })
- fs_default = get_fs_root()
- oozie_env_sh_template = \
-'''
-#!/bin/bash
-
-export OOZIE_CONFIG=${OOZIE_CONFIG:-/usr/hdp/current/oozie/conf}
-export OOZIE_DATA=${OOZIE_DATA:-/var/lib/oozie/data}
-export OOZIE_LOG=${OOZIE_LOG:-/var/log/oozie}
-export CATALINA_BASE=${CATALINA_BASE:-/usr/hdp/current/oozie-server/oozie-server}
-export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie}
-export CATALINA_PID=${CATALINA_PID:-/var/run/oozie/oozie.pid}
-export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat
-'''
+ # These values must be the suffix of the properties in cluster-env.xml
+ TAR_SOURCE_SUFFIX = "_tar_source"
+ TAR_DESTINATION_FOLDER_SUFFIX = "_tar_destination_folder"
- HdfsResource = functools.partial(
- HdfsResource,
- user=hdfs_user,
- security_enabled = False,
- keytab = None,
- kinit_path_local = None,
- hadoop_bin_dir = hadoop_bin_dir,
- hadoop_conf_dir = hadoop_conf_dir,
- principal_name = None,
- hdfs_site = hdfs_site,
- default_fs = fs_default
- )
-
-def _copy_files(source_and_dest_pairs, file_owner, group_owner, kinit_if_needed):
- """
- :param source_and_dest_pairs: List of tuples (x, y), where x is the source file in the local file system,
- and y is the destination file path in HDFS
- :param file_owner: Owner to set for the file copied to HDFS (typically hdfs account)
- :param group_owner: Owning group to set for the file copied to HDFS (typically hadoop group)
- :param kinit_if_needed: kinit command if it is needed, otherwise an empty string
- :return: Returns 0 if at least one file was copied and no exceptions occurred, and 1 otherwise.
-
- Must kinit before calling this function.
- """
-
- for (source, destination) in source_and_dest_pairs:
- params.HdfsResource(destination,
- action="create_on_execute",
- type = 'file',
- mode=0444,
- owner=file_owner,
- group=group_owner,
- source=source,
- )
-
-
-def copy_tarballs_to_hdfs(source, dest, hdp_select_component_name, component_user, file_owner, group_owner):
- """
- :param tarball_prefix: Prefix of the tarball must be one of tez, hive, mr, pig
- :param hdp_select_component_name: Component name to get the status to determine the version
- :param component_user: User that will execute the Hadoop commands
- :param file_owner: Owner of the files copied to HDFS (typically hdfs account)
- :param group_owner: Group owner of the files copied to HDFS (typically hadoop group)
- :return: Returns 0 on success, 1 if no files were copied, and in some cases may raise an exception.
-
- In order to call this function, params.py must have all of the following,
- hdp_stack_version, kinit_path_local, security_enabled, hdfs_user, hdfs_principal_name, hdfs_user_keytab,
- hadoop_bin_dir, hadoop_conf_dir, and HdfsDirectory as a partial function.
- """
-
- component_tar_source_file, component_tar_destination_folder = source, dest
-
- if not os.path.exists(component_tar_source_file):
- Logger.warning("Could not find file: %s" % str(component_tar_source_file))
- return 1
-
- # Ubuntu returns: "stdin: is not a tty", as subprocess output.
- tmpfile = tempfile.NamedTemporaryFile()
- out = None
- with open(tmpfile.name, 'r+') as file:
- get_hdp_version_cmd = '/usr/bin/hdp-select status %s > %s' % (hdp_select_component_name, tmpfile.name)
- code, stdoutdata = shell.call(get_hdp_version_cmd)
- out = file.read()
- pass
- if code != 0 or out is None:
- Logger.warning("Could not verify HDP version by calling '%s'. Return Code: %s, Output: %s." %
- (get_hdp_version_cmd, str(code), str(out)))
- return 1
-
- matches = re.findall(r"([\d\.]+\-\d+)", out)
- hdp_version = matches[0] if matches and len(matches) > 0 else None
-
- if not hdp_version:
- Logger.error("Could not parse HDP version from output of hdp-select: %s" % str(out))
- return 1
-
- file_name = os.path.basename(component_tar_source_file)
- destination_file = os.path.join(component_tar_destination_folder, file_name)
- destination_file = destination_file.replace("{{ hdp_stack_version }}", hdp_version)
-
-
- kinit_if_needed = ""
- if params.security_enabled:
- kinit_if_needed = format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name};")
-
- if kinit_if_needed:
- Execute(kinit_if_needed,
- user=component_user,
- path='/bin'
+ class params:
+ hdfs_user = "hdfs"
+ mapred_user ="mapred"
+ hadoop_bin_dir="/usr/hdp/" + hdp_version + "/hadoop-client/bin"
+ hadoop_conf_dir = "/etc/hadoop/conf"
+ user_group = "hadoop"
+ security_enabled = False
+ oozie_user = "oozie"
+ execute_path = "/usr/hdp/" + hdp_version + "/hadoop-client/bin"
+ ambari_libs_dir = "/var/lib/ambari-agent/lib"
+ hdfs_site = ConfigDictionary({'dfs.webhdfs.enabled':False,
+ })
+ fs_default = get_fs_root()
+ oozie_env_sh_template = \
+ '''
+ #!/bin/bash
+
+ export OOZIE_CONFIG=${{OOZIE_CONFIG:-/usr/hdp/{0}/oozie/conf}}
+ export OOZIE_DATA=${{OOZIE_DATA:-/var/lib/oozie/data}}
+ export OOZIE_LOG=${{OOZIE_LOG:-/var/log/oozie}}
+ export CATALINA_BASE=${{CATALINA_BASE:-/usr/hdp/{0}/oozie/oozie-server}}
+ export CATALINA_TMPDIR=${{CATALINA_TMPDIR:-/var/tmp/oozie}}
+ export CATALINA_PID=${{CATALINA_PID:-/var/run/oozie/oozie.pid}}
+ export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat
+ '''.format(hdp_version)
+
+ HdfsResource = functools.partial(
+ HdfsResource,
+ user=hdfs_user,
+ security_enabled = False,
+ keytab = None,
+ kinit_path_local = None,
+ hadoop_bin_dir = hadoop_bin_dir,
+ hadoop_conf_dir = hadoop_conf_dir,
+ principal_name = None,
+ hdfs_site = hdfs_site,
+ default_fs = fs_default
)
-
- source_and_dest_pairs = [(component_tar_source_file, destination_file), ]
- return _copy_files(source_and_dest_pairs, file_owner, group_owner, kinit_if_needed)
+
+ def _copy_files(source_and_dest_pairs, file_owner, group_owner, kinit_if_needed):
+ """
+ :param source_and_dest_pairs: List of tuples (x, y), where x is the source file in the local file system,
+ and y is the destination file path in HDFS
+ :param file_owner: Owner to set for the file copied to HDFS (typically hdfs account)
+ :param group_owner: Owning group to set for the file copied to HDFS (typically hadoop group)
+ :param kinit_if_needed: kinit command if it is needed, otherwise an empty string
+ :return: Returns 0 if at least one file was copied and no exceptions occurred, and 1 otherwise.
+
+ Must kinit before calling this function.
+ """
+
+ for (source, destination) in source_and_dest_pairs:
+ params.HdfsResource(destination,
+ action="create_on_execute",
+ type = 'file',
+ mode=0444,
+ owner=file_owner,
+ group=group_owner,
+ source=source,
+ )
+
+
+ def copy_tarballs_to_hdfs(source, dest, hdp_select_component_name, component_user, file_owner, group_owner):
+ """
+ :param tarball_prefix: Prefix of the tarball must be one of tez, hive, mr, pig
+ :param hdp_select_component_name: Component name to get the status to determine the version
+ :param component_user: User that will execute the Hadoop commands
+ :param file_owner: Owner of the files copied to HDFS (typically hdfs account)
+ :param group_owner: Group owner of the files copied to HDFS (typically hadoop group)
+ :return: Returns 0 on success, 1 if no files were copied, and in some cases may raise an exception.
+
+ In order to call this function, params.py must have all of the following,
+ hdp_stack_version, kinit_path_local, security_enabled, hdfs_user, hdfs_principal_name, hdfs_user_keytab,
+ hadoop_bin_dir, hadoop_conf_dir, and HdfsDirectory as a partial function.
+ """
+
+ component_tar_source_file, component_tar_destination_folder = source, dest
+
+ if not os.path.exists(component_tar_source_file):
+ Logger.warning("Could not find file: %s" % str(component_tar_source_file))
+ return 1
+
+
+
+ file_name = os.path.basename(component_tar_source_file)
+ destination_file = os.path.join(component_tar_destination_folder, file_name)
+ destination_file = destination_file.replace("{{ hdp_stack_version }}", hdp_version)
+
+
+ kinit_if_needed = ""
+ if params.security_enabled:
+ kinit_if_needed = format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name};")
+
+ if kinit_if_needed:
+ Execute(kinit_if_needed,
+ user=component_user,
+ path='/bin'
+ )
+
+ source_and_dest_pairs = [(component_tar_source_file, destination_file), ]
+ return _copy_files(source_and_dest_pairs, file_owner, group_owner, kinit_if_needed)
-# See if hdfs path prefix is provided on the command line. If yes, use that value, if no
-# use empty string as default.
-hdfs_path_prefix = ""
-if len(sys.argv) == 2:
- hdfs_path_prefix = sys.argv[1]
-hadoop_conf_dir = params.hadoop_conf_dir
-
-oozie_libext_dir = "/usr/hdp/current/oozie-server/libext"
-oozie_home="/usr/hdp/current/oozie-server"
-oozie_setup_sh="/usr/hdp/current/oozie-server/bin/oozie-setup.sh"
-oozie_tmp_dir = "/var/tmp/oozie"
-configure_cmds = []
-configure_cmds.append(('tar','-xvf', oozie_home + '/oozie-sharelib.tar.gz','-C', oozie_home))
-configure_cmds.append(('cp', "/usr/share/HDP-oozie/ext-2.2.zip", "/usr/hdp/current/oozie-server/libext"))
-configure_cmds.append(('chown', 'oozie:hadoop', oozie_libext_dir + "/ext-2.2.zip"))
-
-no_op_test = "ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1"
-with Environment() as env:
env.set_params(params)
+ hadoop_conf_dir = params.hadoop_conf_dir
+
+ oozie_libext_dir = format("/usr/hdp/{hdp_version}/oozie/libext")
+ oozie_home=format("/usr/hdp/{hdp_version}/oozie")
+ oozie_setup_sh=format("/usr/hdp/{hdp_version}/oozie/bin/oozie-setup.sh")
+ oozie_tmp_dir = "/var/tmp/oozie"
+ configure_cmds = []
+ configure_cmds.append(('tar','-xvf', oozie_home + '/oozie-sharelib.tar.gz','-C', oozie_home))
+ configure_cmds.append(('cp', "/usr/share/HDP-oozie/ext-2.2.zip", format("/usr/hdp/{hdp_version}/oozie/libext")))
+ configure_cmds.append(('chown', 'oozie:hadoop', oozie_libext_dir + "/ext-2.2.zip"))
+
+ no_op_test = "ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1"
File("/etc/oozie/conf/oozie-env.sh",
owner=params.oozie_user,
@@ -248,7 +268,7 @@ with Environment() as env:
mode = 0644,
)
- oozie_shared_lib = format("/usr/hdp/current/oozie-server/share")
+ oozie_shared_lib = format("/usr/hdp/{hdp_version}/oozie/share")
oozie_user = 'oozie'
oozie_hdfs_user_dir = format("{hdfs_path_prefix}/user/{oozie_user}")
kinit_if_needed = ''
@@ -268,12 +288,12 @@ with Environment() as env:
)
print "Copying tarballs..."
- copy_tarballs_to_hdfs("/usr/hdp/current/hadoop-client/mapreduce.tar.gz", hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/mapreduce/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
- copy_tarballs_to_hdfs("/usr/hdp/current/tez-client/lib/tez.tar.gz", hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/tez/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
- copy_tarballs_to_hdfs("/usr/hdp/current/hive-client/hive.tar.gz", hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/hive/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
- copy_tarballs_to_hdfs("/usr/hdp/current/pig-client/pig.tar.gz", hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/pig/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
- copy_tarballs_to_hdfs("/usr/hdp/current/hadoop-mapreduce-client/hadoop-streaming.jar", hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/mapreduce/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
- copy_tarballs_to_hdfs("/usr/hdp/current/sqoop-client/sqoop.tar.gz", hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/sqoop/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
+ copy_tarballs_to_hdfs(format("/usr/hdp/{hdp_version}/hadoop/mapreduce.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/mapreduce/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
+ copy_tarballs_to_hdfs(format("/usr/hdp/{hdp_version}/tez/lib/tez.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/tez/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
+ copy_tarballs_to_hdfs(format("/usr/hdp/{hdp_version}/hive/hive.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/hive/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
+ copy_tarballs_to_hdfs(format("/usr/hdp/{hdp_version}/pig/pig.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/pig/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
+ copy_tarballs_to_hdfs(format("/usr/hdp/{hdp_version}/hadoop-mapreduce/hadoop-streaming.jar"), hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/mapreduce/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
+ copy_tarballs_to_hdfs(format("/usr/hdp/{hdp_version}/sqoop/sqoop.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/sqoop/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
# jar shouldn't be used before (read comment below)