You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jl...@apache.org on 2016/03/10 00:50:16 UTC
[24/51] [abbrv] ambari git commit: AMBARI-15329: Code Cleanup: Remove
hdp hardcodings in functions, variables etc. (jluniya)
AMBARI-15329: Code Cleanup: Remove hdp hardcodings in functions, variables etc. (jluniya)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f7221e5a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f7221e5a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f7221e5a
Branch: refs/heads/AMBARI-13364
Commit: f7221e5a601abb74977389f453754432f4008616
Parents: 2efe894
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Tue Mar 8 15:33:30 2016 -0800
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Tue Mar 8 15:33:30 2016 -0800
----------------------------------------------------------------------
.../ambari_agent/HostCheckReportFileHandler.py | 4 +-
.../libraries/functions/__init__.py | 4 +-
.../libraries/functions/conf_select.py | 26 +-
.../libraries/functions/copy_tarball.py | 22 +-
.../dynamic_variable_interpretation.py | 32 +-
.../libraries/functions/get_hdp_version.py | 91 ------
.../libraries/functions/get_lzo_packages.py | 6 +-
.../libraries/functions/get_stack_version.py | 91 ++++++
.../libraries/functions/hdp_select.py | 307 -------------------
.../libraries/functions/install_hdp_msi.py | 215 -------------
.../libraries/functions/install_windows_msi.py | 215 +++++++++++++
.../libraries/functions/setup_ranger_plugin.py | 8 +-
.../functions/setup_ranger_plugin_xml.py | 28 +-
.../libraries/functions/stack_select.py | 307 +++++++++++++++++++
.../libraries/functions/version.py | 8 +-
.../libraries/functions/version_select_util.py | 10 +-
.../libraries/script/script.py | 50 +--
.../package/scripts/accumulo_client.py | 6 +-
.../package/scripts/accumulo_script.py | 22 +-
.../1.6.1.2.2.0/package/scripts/params.py | 12 +-
.../0.1.0.2.3/package/scripts/atlas_client.py | 6 +-
.../package/scripts/metadata_server.py | 8 +-
.../ATLAS/0.1.0.2.3/package/scripts/params.py | 4 +-
.../0.5.0.2.1/package/scripts/falcon_client.py | 6 +-
.../0.5.0.2.1/package/scripts/falcon_server.py | 6 +-
.../0.5.0.2.1/package/scripts/params_linux.py | 12 +-
.../0.5.0.2.1/package/scripts/status_params.py | 2 +-
.../1.4.0.2.0/package/scripts/flume_handler.py | 6 +-
.../FLUME/1.4.0.2.0/package/scripts/params.py | 6 +-
.../1.4.0.2.0/package/scripts/params_linux.py | 2 +-
.../HAWQ/2.0.0/package/scripts/hawqmaster.py | 2 +-
.../0.96.0.2.0/package/scripts/hbase_client.py | 10 +-
.../0.96.0.2.0/package/scripts/params_linux.py | 10 +-
.../package/scripts/phoenix_queryserver.py | 6 +-
.../0.96.0.2.0/package/scripts/status_params.py | 2 +-
.../HBASE/0.96.0.2.0/package/scripts/upgrade.py | 8 +-
.../HDFS/2.1.0.2.0/package/scripts/datanode.py | 8 +-
.../2.1.0.2.0/package/scripts/hdfs_client.py | 6 +-
.../2.1.0.2.0/package/scripts/journalnode.py | 8 +-
.../HDFS/2.1.0.2.0/package/scripts/namenode.py | 8 +-
.../2.1.0.2.0/package/scripts/nfsgateway.py | 8 +-
.../2.1.0.2.0/package/scripts/params_linux.py | 20 +-
.../HDFS/2.1.0.2.0/package/scripts/snamenode.py | 8 +-
.../HDFS/2.1.0.2.0/package/scripts/utils.py | 6 +-
.../0.12.0.2.0/package/scripts/hcat_client.py | 4 +-
.../HIVE/0.12.0.2.0/package/scripts/hive.py | 4 +-
.../0.12.0.2.0/package/scripts/hive_client.py | 6 +-
.../package/scripts/hive_metastore.py | 12 +-
.../0.12.0.2.0/package/scripts/hive_server.py | 10 +-
.../package/scripts/hive_server_interactive.py | 6 +-
.../package/scripts/hive_server_upgrade.py | 16 +-
.../0.12.0.2.0/package/scripts/params_linux.py | 16 +-
.../package/scripts/params_windows.py | 2 +-
.../0.12.0.2.0/package/scripts/status_params.py | 8 +-
.../HIVE/0.12.0.2.0/package/scripts/webhcat.py | 2 +-
.../package/scripts/webhcat_server.py | 6 +-
.../KAFKA/0.8.1.2.2/package/scripts/kafka.py | 4 +-
.../0.8.1.2.2/package/scripts/kafka_broker.py | 18 +-
.../KAFKA/0.8.1.2.2/package/scripts/params.py | 16 +-
.../KNOX/0.5.0.2.2/package/scripts/knox.py | 4 +-
.../0.5.0.2.2/package/scripts/knox_gateway.py | 10 +-
.../0.5.0.2.2/package/scripts/params_linux.py | 18 +-
.../0.5.0.2.2/package/scripts/status_params.py | 2 +-
.../KNOX/0.5.0.2.2/package/scripts/upgrade.py | 4 +-
.../1.0.0.2.3/package/scripts/mahout_client.py | 4 +-
.../MAHOUT/1.0.0.2.3/package/scripts/params.py | 10 +-
.../OOZIE/4.0.0.2.0/package/scripts/oozie.py | 4 +-
.../4.0.0.2.0/package/scripts/oozie_client.py | 6 +-
.../4.0.0.2.0/package/scripts/oozie_server.py | 18 +-
.../package/scripts/oozie_server_upgrade.py | 12 +-
.../4.0.0.2.0/package/scripts/params_linux.py | 18 +-
.../4.0.0.2.0/package/scripts/status_params.py | 2 +-
.../0.12.0.2.0/package/scripts/params_linux.py | 12 +-
.../0.12.0.2.0/package/scripts/pig_client.py | 6 +-
.../0.12.0.2.0/package/scripts/service_check.py | 2 +-
.../RANGER/0.4.0/package/scripts/params.py | 10 +-
.../0.4.0/package/scripts/ranger_admin.py | 6 +-
.../RANGER/0.4.0/package/scripts/upgrade.py | 4 +-
.../0.5.0.2.3/package/scripts/params.py | 6 +-
.../0.5.0.2.3/package/scripts/upgrade.py | 4 +-
.../SLIDER/0.60.0.2.2/package/scripts/params.py | 4 +-
.../0.60.0.2.2/package/scripts/params_linux.py | 6 +-
.../0.60.0.2.2/package/scripts/service_check.py | 2 +-
.../SLIDER/0.60.0.2.2/package/scripts/slider.py | 2 +-
.../0.60.0.2.2/package/scripts/slider_client.py | 8 +-
.../package/scripts/job_history_server.py | 10 +-
.../SPARK/1.2.0.2.2/package/scripts/params.py | 18 +-
.../1.2.0.2.2/package/scripts/setup_spark.py | 6 +-
.../1.2.0.2.2/package/scripts/spark_client.py | 8 +-
.../1.2.0.2.2/package/scripts/spark_service.py | 8 +-
.../package/scripts/spark_thrift_server.py | 8 +-
.../1.4.4.2.0/package/scripts/params_linux.py | 6 +-
.../1.4.4.2.0/package/scripts/sqoop_client.py | 8 +-
.../0.9.1.2.1/package/scripts/drpc_server.py | 8 +-
.../STORM/0.9.1.2.1/package/scripts/nimbus.py | 10 +-
.../0.9.1.2.1/package/scripts/nimbus_prod.py | 10 +-
.../0.9.1.2.1/package/scripts/params_linux.py | 10 +-
.../0.9.1.2.1/package/scripts/params_windows.py | 2 +-
.../STORM/0.9.1.2.1/package/scripts/rest_api.py | 4 +-
.../0.9.1.2.1/package/scripts/status_params.py | 2 +-
.../STORM/0.9.1.2.1/package/scripts/storm.py | 2 +-
.../0.9.1.2.1/package/scripts/supervisor.py | 10 +-
.../package/scripts/supervisor_prod.py | 10 +-
.../0.9.1.2.1/package/scripts/ui_server.py | 8 +-
.../0.4.0.2.1/package/scripts/params_linux.py | 12 +-
.../0.4.0.2.1/package/scripts/params_windows.py | 8 +-
.../0.4.0.2.1/package/scripts/pre_upgrade.py | 4 +-
.../0.4.0.2.1/package/scripts/service_check.py | 2 +-
.../TEZ/0.4.0.2.1/package/scripts/tez_client.py | 10 +-
.../scripts/application_timeline_server.py | 8 +-
.../2.1.0.2.0/package/scripts/historyserver.py | 10 +-
.../package/scripts/mapreduce2_client.py | 6 +-
.../2.1.0.2.0/package/scripts/nodemanager.py | 8 +-
.../2.1.0.2.0/package/scripts/params_linux.py | 16 +-
.../package/scripts/resourcemanager.py | 8 +-
.../2.1.0.2.0/package/scripts/service_check.py | 2 +-
.../YARN/2.1.0.2.0/package/scripts/yarn.py | 2 +-
.../2.1.0.2.0/package/scripts/yarn_client.py | 6 +-
.../3.4.5.2.0/package/scripts/params_linux.py | 6 +-
.../3.4.5.2.0/package/scripts/status_params.py | 2 +-
.../3.4.5.2.0/package/scripts/zookeeper.py | 8 +-
.../package/scripts/zookeeper_client.py | 8 +-
.../package/scripts/zookeeper_server.py | 8 +-
.../package/scripts/zookeeper_service.py | 8 +-
.../custom_actions/scripts/install_packages.py | 18 +-
.../custom_actions/scripts/ru_set_all.py | 18 +-
.../main/resources/scripts/Ambaripreupload.py | 56 ++--
.../0.8/services/HIVE/package/scripts/params.py | 6 +-
.../2.0.6/hooks/after-INSTALL/scripts/params.py | 10 +-
.../scripts/shared_initialization.py | 8 +-
.../2.0.6/hooks/before-ANY/scripts/params.py | 14 +-
.../before-ANY/scripts/shared_initialization.py | 4 +-
.../hooks/before-INSTALL/scripts/params.py | 4 +-
.../scripts/shared_initialization.py | 2 +-
.../2.0.6/hooks/before-START/scripts/params.py | 20 +-
.../services/ECS/package/scripts/params.py | 8 +-
ambari-server/src/test/python/TestVersion.py | 4 +-
.../custom_actions/TestInstallPackages.py | 20 +-
.../AMBARI_METRICS/test_metrics_collector.py | 4 +-
.../AMBARI_METRICS/test_metrics_grafana.py | 2 +-
.../python/stacks/2.0.6/FLUME/test_flume.py | 28 +-
.../stacks/2.0.6/FLUME/test_service_check.py | 2 +-
.../2.0.6/GANGLIA/test_ganglia_monitor.py | 10 +-
.../stacks/2.0.6/GANGLIA/test_ganglia_server.py | 8 +-
.../stacks/2.0.6/HBASE/test_hbase_client.py | 8 +-
.../stacks/2.0.6/HBASE/test_hbase_master.py | 40 +--
.../2.0.6/HBASE/test_hbase_regionserver.py | 32 +-
.../2.0.6/HBASE/test_hbase_service_check.py | 6 +-
.../2.0.6/HBASE/test_phoenix_queryserver.py | 16 +-
.../python/stacks/2.0.6/HDFS/test_datanode.py | 44 +--
.../stacks/2.0.6/HDFS/test_hdfs_client.py | 18 +-
.../stacks/2.0.6/HDFS/test_journalnode.py | 32 +-
.../python/stacks/2.0.6/HDFS/test_namenode.py | 84 ++---
.../python/stacks/2.0.6/HDFS/test_nfsgateway.py | 24 +-
.../stacks/2.0.6/HDFS/test_service_check.py | 4 +-
.../python/stacks/2.0.6/HDFS/test_snamenode.py | 22 +-
.../test/python/stacks/2.0.6/HDFS/test_zkfc.py | 22 +-
.../stacks/2.0.6/HIVE/test_hcat_client.py | 6 +-
.../stacks/2.0.6/HIVE/test_hive_client.py | 8 +-
.../stacks/2.0.6/HIVE/test_hive_metastore.py | 20 +-
.../stacks/2.0.6/HIVE/test_hive_server.py | 72 ++---
.../2.0.6/HIVE/test_hive_service_check.py | 6 +-
.../stacks/2.0.6/HIVE/test_mysql_server.py | 16 +-
.../stacks/2.0.6/HIVE/test_webhcat_server.py | 28 +-
.../stacks/2.0.6/OOZIE/test_oozie_client.py | 10 +-
.../stacks/2.0.6/OOZIE/test_oozie_server.py | 40 +--
.../2.0.6/OOZIE/test_oozie_service_check.py | 2 +-
.../stacks/2.0.6/OOZIE/test_service_check.py | 4 +-
.../python/stacks/2.0.6/PIG/test_pig_client.py | 10 +-
.../stacks/2.0.6/PIG/test_pig_service_check.py | 4 +-
.../stacks/2.0.6/SQOOP/test_service_check.py | 4 +-
.../python/stacks/2.0.6/SQOOP/test_sqoop.py | 6 +-
.../stacks/2.0.6/YARN/test_historyserver.py | 28 +-
.../stacks/2.0.6/YARN/test_mapreduce2_client.py | 12 +-
.../2.0.6/YARN/test_mapreduce2_service_check.py | 4 +-
.../stacks/2.0.6/YARN/test_nodemanager.py | 34 +-
.../stacks/2.0.6/YARN/test_resourcemanager.py | 32 +-
.../stacks/2.0.6/YARN/test_yarn_client.py | 14 +-
.../2.0.6/YARN/test_yarn_service_check.py | 4 +-
.../2.0.6/ZOOKEEPER/test_zookeeper_client.py | 8 +-
.../2.0.6/ZOOKEEPER/test_zookeeper_server.py | 28 +-
.../ZOOKEEPER/test_zookeeper_service_check.py | 6 +-
.../hooks/after-INSTALL/test_after_install.py | 2 +-
.../stacks/2.1/FALCON/test_falcon_client.py | 10 +-
.../stacks/2.1/FALCON/test_falcon_server.py | 22 +-
.../stacks/2.1/FALCON/test_service_check.py | 4 +-
.../stacks/2.1/HIVE/test_hive_metastore.py | 58 ++--
.../stacks/2.1/STORM/test_service_check.py | 2 +-
.../stacks/2.1/STORM/test_storm_drpc_server.py | 26 +-
.../2.1/STORM/test_storm_jaas_configuration.py | 8 +-
.../stacks/2.1/STORM/test_storm_nimbus.py | 28 +-
.../stacks/2.1/STORM/test_storm_nimbus_prod.py | 16 +-
.../2.1/STORM/test_storm_rest_api_service.py | 12 +-
.../stacks/2.1/STORM/test_storm_supervisor.py | 16 +-
.../2.1/STORM/test_storm_supervisor_prod.py | 16 +-
.../stacks/2.1/STORM/test_storm_ui_server.py | 24 +-
.../python/stacks/2.1/TEZ/test_service_check.py | 4 +-
.../python/stacks/2.1/TEZ/test_tez_client.py | 20 +-
.../stacks/2.1/YARN/test_apptimelineserver.py | 22 +-
.../stacks/2.2/ACCUMULO/test_accumulo_client.py | 4 +-
.../stacks/2.2/KAFKA/test_kafka_broker.py | 8 +-
.../stacks/2.2/KERBEROS/test_kerberos_client.py | 14 +-
.../stacks/2.2/KERBEROS/test_kerberos_server.py | 8 +-
.../python/stacks/2.2/KNOX/test_knox_gateway.py | 22 +-
.../stacks/2.2/PIG/test_pig_service_check.py | 2 +-
.../stacks/2.2/RANGER/test_ranger_admin.py | 14 +-
.../stacks/2.2/RANGER/test_ranger_usersync.py | 16 +-
.../stacks/2.2/SLIDER/test_slider_client.py | 10 +-
.../stacks/2.2/SPARK/test_job_history_server.py | 16 +-
.../stacks/2.2/SPARK/test_spark_client.py | 8 +-
.../2.2/SPARK/test_spark_service_check.py | 6 +-
.../stacks/2.3/ATLAS/test_metadata_server.py | 8 +-
.../python/stacks/2.3/HAWQ/test_hawqmaster.py | 10 +-
.../python/stacks/2.3/HAWQ/test_hawqsegment.py | 8 +-
.../python/stacks/2.3/HAWQ/test_hawqstandby.py | 8 +-
.../stacks/2.3/MAHOUT/test_mahout_client.py | 6 +-
.../2.3/MAHOUT/test_mahout_service_check.py | 2 +-
.../src/test/python/stacks/2.3/PXF/test_pxf.py | 10 +-
.../2.3/SPARK/test_spark_thrift_server.py | 10 +-
.../stacks/2.3/STORM/test_service_check.py | 2 +-
.../stacks/2.3/STORM/test_storm_upgrade.py | 4 +-
.../test/python/stacks/2.3/YARN/test_ats_1_5.py | 6 +-
.../src/test/python/stacks/utils/RMFTestCase.py | 4 +-
223 files changed, 1868 insertions(+), 1868 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py b/ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py
index 1f87a73..ee7db0a 100644
--- a/ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py
+++ b/ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py
@@ -87,7 +87,7 @@ class HostCheckReportFileHandler:
logger.error("Can't write host check file at %s :%s " % (self.hostCheckCustomActionsFilePath, err.message))
traceback.print_exc()
- def _hdp_list_directory(self):
+ def _stack_list_directory(self):
"""
Return filtered list of /usr/hdp directory allowed to be removed
:rtype list
@@ -152,7 +152,7 @@ class HostCheckReportFileHandler:
items = []
for itemDetail in hostInfo['stackFoldersAndFiles']:
items.append(itemDetail['name'])
- items += self._hdp_list_directory()
+ items += self._stack_list_directory()
config.add_section('directories')
config.set('directories', 'dir_list', ','.join(items))
http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-common/src/main/python/resource_management/libraries/functions/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/__init__.py b/ambari-common/src/main/python/resource_management/libraries/functions/__init__.py
index 1ab0ff1..e886fe4 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/__init__.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/__init__.py
@@ -38,7 +38,7 @@ from resource_management.libraries.functions.hive_check import *
from resource_management.libraries.functions.version import *
from resource_management.libraries.functions.format_jvm_option import *
from resource_management.libraries.functions.constants import *
-from resource_management.libraries.functions.get_hdp_version import *
+from resource_management.libraries.functions.get_stack_version import *
from resource_management.libraries.functions.get_lzo_packages import *
from resource_management.libraries.functions.setup_ranger_plugin import *
from resource_management.libraries.functions.curl_krb_request import *
@@ -47,6 +47,6 @@ IS_WINDOWS = platform.system() == "Windows"
if IS_WINDOWS:
from resource_management.libraries.functions.windows_service_utils import *
- from resource_management.libraries.functions.install_hdp_msi import *
+ from resource_management.libraries.functions.install_stack_msi import *
from resource_management.libraries.functions.install_jdbc_driver import *
from resource_management.libraries.functions.reload_windows_env import *
http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
index dc7fa6e..59c717b 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
@@ -22,7 +22,7 @@ __all__ = ["select", "create", "get_hadoop_conf_dir", "get_hadoop_dir"]
import os
import version
-import hdp_select
+import stack_select
import subprocess
from resource_management.core import shell
@@ -34,7 +34,7 @@ from resource_management.core.resources.system import Execute
from resource_management.core.resources.system import Link
from resource_management.libraries.functions.default import default
from resource_management.core.exceptions import Fail
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
from resource_management.core.shell import as_sudo
@@ -192,7 +192,7 @@ def _valid(stack_name, package, ver):
if stack_name != "HDP":
return False
- if version.compare_versions(version.format_hdp_stack_version(ver), "2.3.0.0") < 0:
+ if version.compare_versions(version.format_stack_version(ver), "2.3.0.0") < 0:
return False
return True
@@ -298,10 +298,10 @@ def get_hadoop_conf_dir(force_latest_on_upgrade=False):
if not Script.in_stack_upgrade():
# During normal operation, the HDP stack must be 2.3 or higher
- if Script.is_hdp_stack_greater_or_equal("2.2"):
+ if Script.is_stack_greater_or_equal("2.2"):
hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"
- if Script.is_hdp_stack_greater_or_equal("2.3"):
+ if Script.is_stack_greater_or_equal("2.3"):
hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"
stack_name = default("/hostLevelParams/stack_name", None)
version = default("/commandParams/version", None)
@@ -326,16 +326,16 @@ def get_hadoop_conf_dir(force_latest_on_upgrade=False):
EU/RU | 2.3 | 2.3.* | Any | Use /usr/hdp/$version/hadoop/conf, which should be a symlink destination
'''
- # The method "is_hdp_stack_greater_or_equal" uses "stack_version" which is the desired stack, e.g., 2.2 or 2.3
+ # The method "is_stack_greater_or_equal" uses "stack_version" which is the desired stack, e.g., 2.2 or 2.3
# In an RU, it is always the desired stack, and doesn't change even during the Downgrade!
# In an RU Downgrade from HDP 2.3 to 2.2, the first thing we do is
# rm /etc/[component]/conf and then mv /etc/[component]/conf.backup /etc/[component]/conf
- if Script.is_hdp_stack_greater_or_equal("2.2"):
+ if Script.is_stack_greater_or_equal("2.2"):
hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"
# This contains the "version", including the build number, that is actually used during a stack upgrade and
# is the version upgrading/downgrading to.
- stack_info = hdp_select._get_upgrade_stack()
+ stack_info = stack_select._get_upgrade_stack()
if stack_info is not None:
stack_name = stack_info[0]
@@ -345,14 +345,14 @@ def get_hadoop_conf_dir(force_latest_on_upgrade=False):
Logger.info("In the middle of a stack upgrade/downgrade for Stack {0} and destination version {1}, determining which hadoop conf dir to use.".format(stack_name, version))
# This is the version either upgrading or downgrading to.
- if compare_versions(format_hdp_stack_version(version), "2.3.0.0") >= 0:
+ if compare_versions(format_stack_version(version), "2.3.0.0") >= 0:
# Determine if hdp-select has been run and if not, then use the current
# hdp version until this component is upgraded.
if not force_latest_on_upgrade:
- current_hdp_version = hdp_select.get_role_component_current_hdp_version()
- if current_hdp_version is not None and version != current_hdp_version:
- version = current_hdp_version
- Logger.info("hdp-select has not yet been called to update the symlink for this component, keep using version {0}".format(current_hdp_version))
+ current_stack_version = stack_select.get_role_component_current_stack_version()
+ if current_stack_version is not None and version != current_stack_version:
+ version = current_stack_version
+ Logger.info("hdp-select has not yet been called to update the symlink for this component, keep using version {0}".format(current_stack_version))
# Only change the hadoop_conf_dir path, don't conf-select this older version
hadoop_conf_dir = "/usr/hdp/{0}/hadoop/conf".format(version)
http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py b/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
index b4c8bc8..647b8b6 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
@@ -61,7 +61,7 @@ TARBALL_MAP = {
}
-def _get_single_version_from_hdp_select():
+def _get_single_version_from_stack_select():
"""
Call "hdp-select versions" and return the version string if only one version is available.
:return: Returns a version string if successful, and None otherwise.
@@ -70,12 +70,12 @@ def _get_single_version_from_hdp_select():
tmpfile = tempfile.NamedTemporaryFile()
tmp_dir = Script.get_tmp_dir()
tmp_file = os.path.join(tmp_dir, "copy_tarball_out.txt")
- hdp_version = None
+ stack_version = None
out = None
- get_hdp_versions_cmd = "/usr/bin/hdp-select versions > {0}".format(tmp_file)
+ get_stack_versions_cmd = "/usr/bin/hdp-select versions > {0}".format(tmp_file)
try:
- code, stdoutdata = shell.call(get_hdp_versions_cmd, logoutput=True)
+ code, stdoutdata = shell.call(get_stack_versions_cmd, logoutput=True)
with open(tmp_file, 'r+') as file:
out = file.read()
except Exception, e:
@@ -88,17 +88,17 @@ def _get_single_version_from_hdp_select():
Logger.logger.exception("Could not remove file {0}. Error: {1}".format(str(tmp_file), str(e)))
if code != 0 or out is None or out == "":
- Logger.error("Could not verify HDP version by calling '{0}'. Return Code: {1}, Output: {2}.".format(get_hdp_versions_cmd, str(code), str(out)))
+ Logger.error("Could not verify HDP version by calling '{0}'. Return Code: {1}, Output: {2}.".format(get_stack_versions_cmd, str(code), str(out)))
return None
matches = re.findall(r"([\d\.]+\-\d+)", out)
if matches and len(matches) == 1:
- hdp_version = matches[0]
+ stack_version = matches[0]
elif matches and len(matches) > 1:
Logger.error("Found multiple matches for HDP version, cannot identify the correct one from: {0}".format(", ".join(matches)))
- return hdp_version
+ return stack_version
def copy_to_hdfs(name, user_group, owner, file_mode=0444, custom_source_file=None, custom_dest_file=None, force_execute=False,
use_upgrading_version_during_uprade=True, replace_existing_files=False, host_sys_prepped=False):
@@ -152,10 +152,10 @@ def copy_to_hdfs(name, user_group, owner, file_mode=0444, custom_source_file=Non
if current_version is None:
# During normal operation, the first installation of services won't yet know about the version, so must rely
# on hdp-select to get it.
- hdp_version = _get_single_version_from_hdp_select()
- if hdp_version:
- Logger.info("Will use stack version {0}".format(hdp_version))
- current_version = hdp_version
+ stack_version = _get_single_version_from_stack_select()
+ if stack_version:
+ Logger.info("Will use stack version {0}".format(stack_version))
+ current_version = stack_version
if current_version is None:
message_suffix = "during rolling %s" % str(upgrade_direction) if is_stack_upgrade else ""
http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-common/src/main/python/resource_management/libraries/functions/dynamic_variable_interpretation.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/dynamic_variable_interpretation.py b/ambari-common/src/main/python/resource_management/libraries/functions/dynamic_variable_interpretation.py
index 31f0c4a..a20b03c 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/dynamic_variable_interpretation.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/dynamic_variable_interpretation.py
@@ -35,10 +35,10 @@ from resource_management.core import shell
"""
This file provides helper methods needed for the versioning of RPMs. Specifically, it does dynamic variable
-interpretation to replace strings like {{ hdp_stack_version }} where the value of the
+interpretation to replace strings like {{ stack_version_formatted }} where the value of the
variables cannot be determined ahead of time, but rather, depends on what files are found.
-It assumes that {{ hdp_stack_version }} is constructed as ${major.minor.patch.rev}-${build_number}
+It assumes that {{ stack_version_formatted }} is constructed as ${major.minor.patch.rev}-${build_number}
E.g., 998.2.2.1.0-998
Please note that "-${build_number}" is optional.
"""
@@ -54,10 +54,10 @@ def _get_tar_source_and_dest_folder(tarball_prefix):
:return: Returns a tuple of (x, y) after verifying the properties
"""
component_tar_source_file = default("/configurations/cluster-env/%s%s" % (tarball_prefix.lower(), TAR_SOURCE_SUFFIX), None)
- # E.g., /usr/hdp/current/hadoop-client/tez-{{ hdp_stack_version }}.tar.gz
+ # E.g., /usr/hdp/current/hadoop-client/tez-{{ stack_version_formatted }}.tar.gz
component_tar_destination_folder = default("/configurations/cluster-env/%s%s" % (tarball_prefix.lower(), TAR_DESTINATION_FOLDER_SUFFIX), None)
- # E.g., hdfs:///hdp/apps/{{ hdp_stack_version }}/mapreduce/
+ # E.g., hdfs:///hdp/apps/{{ stack_version_formatted }}/mapreduce/
if not component_tar_source_file or not component_tar_destination_folder:
Logger.warning("Did not find %s tar source file and destination folder properties in cluster-env.xml" %
@@ -137,10 +137,10 @@ def _copy_files(source_and_dest_pairs, component_user, file_owner, group_owner,
return return_value
-def copy_tarballs_to_hdfs(tarball_prefix, hdp_select_component_name, component_user, file_owner, group_owner, ignore_sysprep=False):
+def copy_tarballs_to_hdfs(tarball_prefix, stack_select_component_name, component_user, file_owner, group_owner, ignore_sysprep=False):
"""
:param tarball_prefix: Prefix of the tarball must be one of tez, hive, mr, pig
- :param hdp_select_component_name: Component name to get the status to determine the version
+ :param stack_select_component_name: Component name to get the status to determine the version
:param component_user: User that will execute the Hadoop commands, usually smokeuser
:param file_owner: Owner of the files copied to HDFS (typically hdfs user)
:param group_owner: Group owner of the files copied to HDFS (typically hadoop group)
@@ -148,17 +148,17 @@ def copy_tarballs_to_hdfs(tarball_prefix, hdp_select_component_name, component_u
:return: Returns 0 on success, 1 if no files were copied, and in some cases may raise an exception.
In order to call this function, params.py must have all of the following,
- hdp_stack_version, kinit_path_local, security_enabled, hdfs_user, hdfs_principal_name, hdfs_user_keytab,
+ stack_version_formatted, kinit_path_local, security_enabled, hdfs_user, hdfs_principal_name, hdfs_user_keytab,
hadoop_bin_dir, hadoop_conf_dir, and HdfsDirectory as a partial function.
"""
import params
if not ignore_sysprep and hasattr(params, "host_sys_prepped") and params.host_sys_prepped:
- Logger.info("Host is sys-prepped. Tarball %s will not be copied for %s." % (tarball_prefix, hdp_select_component_name))
+ Logger.info("Host is sys-prepped. Tarball %s will not be copied for %s." % (tarball_prefix, stack_select_component_name))
return 0
- if not hasattr(params, "hdp_stack_version") or params.hdp_stack_version is None:
- Logger.warning("Could not find hdp_stack_version")
+ if not hasattr(params, "stack_version_formatted") or params.stack_version_formatted is None:
+ Logger.warning("Could not find stack_version_formatted")
return 1
component_tar_source_file, component_tar_destination_folder = _get_tar_source_and_dest_folder(tarball_prefix)
@@ -174,25 +174,25 @@ def copy_tarballs_to_hdfs(tarball_prefix, hdp_select_component_name, component_u
tmpfile = tempfile.NamedTemporaryFile()
out = None
with open(tmpfile.name, 'r+') as file:
- get_hdp_version_cmd = '/usr/bin/hdp-select status %s > %s' % (hdp_select_component_name, tmpfile.name)
- code, stdoutdata = shell.call(get_hdp_version_cmd)
+ get_stack_version_cmd = '/usr/bin/hdp-select status %s > %s' % (stack_select_component_name, tmpfile.name)
+ code, stdoutdata = shell.call(get_stack_version_cmd)
out = file.read()
pass
if code != 0 or out is None:
Logger.warning("Could not verify HDP version by calling '%s'. Return Code: %s, Output: %s." %
- (get_hdp_version_cmd, str(code), str(out)))
+ (get_stack_version_cmd, str(code), str(out)))
return 1
matches = re.findall(r"([\d\.]+\-\d+)", out)
- hdp_version = matches[0] if matches and len(matches) > 0 else None
+ stack_version = matches[0] if matches and len(matches) > 0 else None
- if not hdp_version:
+ if not stack_version:
Logger.error("Could not parse HDP version from output of hdp-select: %s" % str(out))
return 1
file_name = os.path.basename(component_tar_source_file)
destination_file = os.path.join(component_tar_destination_folder, file_name)
- destination_file = destination_file.replace("{{ hdp_stack_version }}", hdp_version)
+ destination_file = destination_file.replace("{{ stack_version_formatted }}", stack_version)
does_hdfs_file_exist_cmd = "fs -ls %s" % destination_file
http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-common/src/main/python/resource_management/libraries/functions/get_hdp_version.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/get_hdp_version.py b/ambari-common/src/main/python/resource_management/libraries/functions/get_hdp_version.py
deleted file mode 100644
index a56d33a..0000000
--- a/ambari-common/src/main/python/resource_management/libraries/functions/get_hdp_version.py
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-__all__ = ["get_hdp_version"]
-
-import os
-import re
-
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-
-from resource_management.core.logger import Logger
-from resource_management.core.exceptions import Fail
-from resource_management.core import shell
-
-HDP_SELECT_BINARY = "/usr/bin/hdp-select"
-
-@OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
-def get_hdp_version(package_name):
- """
- @param package_name, name of the package, from which, function will try to get hdp version
- """
- try:
- component_home_dir = os.environ[package_name.upper() + "_HOME"]
- except KeyError:
- Logger.info('Skipping get_hdp_version since the component {0} is not yet available'.format(package_name))
- return None # lazy fail
-
- #As a rule, component_home_dir is of the form <hdp_root_dir>\[\]<component_versioned_subdir>[\]
- home_dir_split = os.path.split(component_home_dir)
- iSubdir = len(home_dir_split) - 1
- while not home_dir_split[iSubdir]:
- iSubdir -= 1
-
- #The component subdir is expected to be of the form <package_name>-<package_version>.<hdp_stack_version>
- # with package_version = #.#.# and hdp_stack_version=#.#.#.#-<build_number>
- match = re.findall('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', home_dir_split[iSubdir])
- if not match:
- Logger.info('Failed to get extracted version for component {0}. Home dir not in expected format.'.format(package_name))
- return None # lazy fail
-
- return match[0]
-
-@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
-def get_hdp_version(package_name):
- """
- @param package_name, name of the package, from which, function will try to get hdp version
- """
-
- if not os.path.exists(HDP_SELECT_BINARY):
- Logger.info('Skipping get_hdp_version since hdp-select is not yet available')
- return None # lazy fail
-
- try:
- command = 'ambari-python-wrap {HDP_SELECT_BINARY} status {package_name}'.format(HDP_SELECT_BINARY=HDP_SELECT_BINARY, package_name=package_name)
- return_code, hdp_output = shell.call(command, timeout=20)
- except Exception, e:
- Logger.error(str(e))
- raise Fail('Unable to execute hdp-select command to retrieve the version.')
-
- if return_code != 0:
- raise Fail(
- 'Unable to determine the current version because of a non-zero return code of {0}'.format(str(return_code)))
-
- hdp_version = re.sub(package_name + ' - ', '', hdp_output)
- hdp_version = hdp_version.rstrip()
- match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', hdp_version)
-
- if match is None:
- Logger.info('Failed to get extracted version with hdp-select')
- return None # lazy fail
-
- return hdp_version
http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-common/src/main/python/resource_management/libraries/functions/get_lzo_packages.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/get_lzo_packages.py b/ambari-common/src/main/python/resource_management/libraries/functions/get_lzo_packages.py
index afb4314..870bb0c 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/get_lzo_packages.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/get_lzo_packages.py
@@ -22,7 +22,7 @@ Ambari Agent
__all__ = ["get_lzo_packages"]
from ambari_commons.os_check import OSCheck
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
from resource_management.libraries.functions.format import format
def get_lzo_packages(stack_version_unformatted):
@@ -35,9 +35,9 @@ def get_lzo_packages(stack_version_unformatted):
underscored_version = stack_version_unformatted.replace('.', '_')
dashed_version = stack_version_unformatted.replace('.', '-')
- hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+ stack_version_formatted = format_stack_version(stack_version_unformatted)
- if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+ if stack_version_formatted != "" and compare_versions(stack_version_formatted, '2.2') >= 0:
lzo_packages += ["hadooplzo_*"]
else:
lzo_packages += ["hadoop-lzo"]
http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py b/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py
new file mode 100644
index 0000000..c00b541
--- /dev/null
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+__all__ = ["get_stack_version"]
+
+import os
+import re
+
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.core import shell
+
+STACK_SELECT_BINARY = "/usr/bin/hdp-select"
+
+@OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
+def get_stack_version(package_name):
+ """
+ @param package_name, name of the package, from which, function will try to get stack version
+ """
+ try:
+ component_home_dir = os.environ[package_name.upper() + "_HOME"]
+ except KeyError:
+ Logger.info('Skipping get_stack_version since the component {0} is not yet available'.format(package_name))
+ return None # lazy fail
+
+ #As a rule, component_home_dir is of the form <stack_root_dir>\[\]<component_versioned_subdir>[\]
+ home_dir_split = os.path.split(component_home_dir)
+ iSubdir = len(home_dir_split) - 1
+ while not home_dir_split[iSubdir]:
+ iSubdir -= 1
+
+ #The component subdir is expected to be of the form <package_name>-<package_version>.<stack_version>
+ # with package_version = #.#.# and stack_version=#.#.#.#-<build_number>
+ match = re.findall('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', home_dir_split[iSubdir])
+ if not match:
+ Logger.info('Failed to get extracted version for component {0}. Home dir not in expected format.'.format(package_name))
+ return None # lazy fail
+
+ return match[0]
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def get_stack_version(package_name):
+ """
+ @param package_name, name of the package, from which, function will try to get stack version
+ """
+
+ if not os.path.exists(STACK_SELECT_BINARY):
+ Logger.info('Skipping get_stack_version since " + STACK_SELECT_BINARY + " is not yet available')
+ return None # lazy fail
+
+ try:
+ command = 'ambari-python-wrap {STACK_SELECT_BINARY} status {package_name}'.format(STACK_SELECT_BINARY=STACK_SELECT_BINARY, package_name=package_name)
+ return_code, stack_output = shell.call(command, timeout=20)
+ except Exception, e:
+ Logger.error(str(e))
+ raise Fail('Unable to execute " + STACK_SELECT_BINARY + " command to retrieve the version.')
+
+ if return_code != 0:
+ raise Fail(
+ 'Unable to determine the current version because of a non-zero return code of {0}'.format(str(return_code)))
+
+ stack_version = re.sub(package_name + ' - ', '', stack_output)
+ stack_version = stack_output.rstrip()
+ match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', stack_version)
+
+ if match is None:
+ Logger.info('Failed to get extracted version with ' + STACK_SELECT_BINARY)
+ return None # lazy fail
+
+ return stack_version
http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-common/src/main/python/resource_management/libraries/functions/hdp_select.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/hdp_select.py b/ambari-common/src/main/python/resource_management/libraries/functions/hdp_select.py
deleted file mode 100644
index f5ad7e2..0000000
--- a/ambari-common/src/main/python/resource_management/libraries/functions/hdp_select.py
+++ /dev/null
@@ -1,307 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-import sys
-import re
-from resource_management.core.logger import Logger
-from resource_management.core.exceptions import Fail
-from resource_management.core.resources.system import Execute
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions.get_hdp_version import get_hdp_version
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.script.script import Script
-from resource_management.core.shell import call
-from resource_management.libraries.functions.version import format_hdp_stack_version
-from resource_management.libraries.functions.version_select_util import get_versions_from_stack_root
-
-HDP_SELECT = '/usr/bin/hdp-select'
-HDP_SELECT_PREFIX = ('ambari-python-wrap', HDP_SELECT)
-
-# hdp-select set oozie-server 2.2.0.0-1234
-TEMPLATE = HDP_SELECT_PREFIX + ('set',)
-
-# a mapping of Ambari server role to hdp-select component name for all
-# non-clients
-SERVER_ROLE_DIRECTORY_MAP = {
- 'ACCUMULO_MASTER' : 'accumulo-master',
- 'ACCUMULO_MONITOR' : 'accumulo-monitor',
- 'ACCUMULO_GC' : 'accumulo-gc',
- 'ACCUMULO_TRACER' : 'accumulo-tracer',
- 'ACCUMULO_TSERVER' : 'accumulo-tablet',
- 'ATLAS_SERVER' : 'atlas-server',
- 'FLUME_HANDLER' : 'flume-server',
- 'FALCON_SERVER' : 'falcon-server',
- 'NAMENODE' : 'hadoop-hdfs-namenode',
- 'DATANODE' : 'hadoop-hdfs-datanode',
- 'SECONDARY_NAMENODE' : 'hadoop-hdfs-secondarynamenode',
- 'NFS_GATEWAY' : 'hadoop-hdfs-nfs3',
- 'JOURNALNODE' : 'hadoop-hdfs-journalnode',
- 'HBASE_MASTER' : 'hbase-master',
- 'HBASE_REGIONSERVER' : 'hbase-regionserver',
- 'HIVE_METASTORE' : 'hive-metastore',
- 'HIVE_SERVER' : 'hive-server2',
- 'WEBHCAT_SERVER' : 'hive-webhcat',
- 'KAFKA_BROKER' : 'kafka-broker',
- 'KNOX_GATEWAY' : 'knox-server',
- 'OOZIE_SERVER' : 'oozie-server',
- 'RANGER_ADMIN' : 'ranger-admin',
- 'RANGER_USERSYNC' : 'ranger-usersync',
- 'SPARK_JOBHISTORYSERVER' : 'spark-historyserver',
- 'SPARK_THRIFTSERVER' : 'spark-thriftserver',
- 'NIMBUS' : 'storm-nimbus',
- 'SUPERVISOR' : 'storm-supervisor',
- 'HISTORYSERVER' : 'hadoop-mapreduce-historyserver',
- 'APP_TIMELINE_SERVER' : 'hadoop-yarn-timelineserver',
- 'NODEMANAGER' : 'hadoop-yarn-nodemanager',
- 'RESOURCEMANAGER' : 'hadoop-yarn-resourcemanager',
- 'ZOOKEEPER_SERVER' : 'zookeeper-server',
-
- # ZKFC is tied to NN since it doesn't have its own componnet in hdp-select and there is
- # a requirement that the ZKFC is installed on each NN
- 'ZKFC' : 'hadoop-hdfs-namenode'
-}
-
-# mapping of service check to hdp-select component
-SERVICE_CHECK_DIRECTORY_MAP = {
- "HDFS_SERVICE_CHECK" : "hadoop-client",
- "TEZ_SERVICE_CHECK" : "hadoop-client",
- "PIG_SERVICE_CHECK" : "hadoop-client",
- "HIVE_SERVICE_CHECK" : "hadoop-client",
- "OOZIE_SERVICE_CHECK" : "hadoop-client",
- "MAHOUT_SERVICE_CHECK" : "mahout-client"
-}
-
-# /usr/hdp/current/hadoop-client/[bin|sbin|libexec|lib]
-# /usr/hdp/2.3.0.0-1234/hadoop/[bin|sbin|libexec|lib]
-HADOOP_DIR_TEMPLATE = "/usr/hdp/{0}/{1}/{2}"
-
-# /usr/hdp/current/hadoop-client
-# /usr/hdp/2.3.0.0-1234/hadoop
-HADOOP_HOME_DIR_TEMPLATE = "/usr/hdp/{0}/{1}"
-
-HADOOP_DIR_DEFAULTS = {
- "home": "/usr/lib/hadoop",
- "libexec": "/usr/lib/hadoop/libexec",
- "sbin": "/usr/lib/hadoop/sbin",
- "bin": "/usr/bin",
- "lib": "/usr/lib/hadoop/lib"
-}
-
-def select_all(version_to_select):
- """
- Executes hdp-select on every component for the specified version. If the value passed in is a
- stack version such as "2.3", then this will find the latest installed version which
- could be "2.3.0.0-9999". If a version is specified instead, such as 2.3.0.0-1234, it will use
- that exact version.
- :param version_to_select: the version to hdp-select on, such as "2.3" or "2.3.0.0-1234"
- """
- # it's an error, but it shouldn't really stop anything from working
- if version_to_select is None:
- Logger.error("Unable to execute hdp-select after installing because there was no version specified")
- return
-
- Logger.info("Executing hdp-select set all on {0}".format(version_to_select))
-
- command = format('{sudo} /usr/bin/hdp-select set all `ambari-python-wrap /usr/bin/hdp-select versions | grep ^{version_to_select} | tail -1`')
- only_if_command = format('ls -d /usr/hdp/{version_to_select}*')
- Execute(command, only_if = only_if_command)
-
-
-def select(component, version):
- """
- Executes hdp-select on the specific component and version. Some global
- variables that are imported via params/status_params/params_linux will need
- to be recalcuated after the hdp-select. However, python does not re-import
- existing modules. The only way to ensure that the configuration variables are
- recalculated is to call reload(...) on each module that has global parameters.
- After invoking hdp-select, this function will also reload params, status_params,
- and params_linux.
- :param component: the hdp-select component, such as oozie-server. If "all", then all components
- will be updated.
- :param version: the version to set the component to, such as 2.2.0.0-1234
- """
- command = TEMPLATE + (component, version)
- Execute(command, sudo=True)
-
- # don't trust the ordering of modules:
- # 1) status_params
- # 2) params_linux
- # 3) params
- modules = sys.modules
- param_modules = "status_params", "params_linux", "params"
- for moduleName in param_modules:
- if moduleName in modules:
- module = modules.get(moduleName)
- reload(module)
- Logger.info("After {0}, reloaded module {1}".format(command, moduleName))
-
-
-def get_role_component_current_hdp_version():
- """
- Gets the current HDP version of the component that this role command is for.
- :return: the current HDP version of the specified component or None
- """
- hdp_select_component = None
- role = default("/role", "")
- role_command = default("/roleCommand", "")
-
- if role in SERVER_ROLE_DIRECTORY_MAP:
- hdp_select_component = SERVER_ROLE_DIRECTORY_MAP[role]
- elif role_command == "SERVICE_CHECK" and role in SERVICE_CHECK_DIRECTORY_MAP:
- hdp_select_component = SERVICE_CHECK_DIRECTORY_MAP[role]
-
- if hdp_select_component is None:
- return None
-
- current_hdp_version = get_hdp_version(hdp_select_component)
-
- if current_hdp_version is None:
- Logger.warning("Unable to determine hdp-select version for {0}".format(
- hdp_select_component))
- else:
- Logger.info("{0} is currently at version {1}".format(
- hdp_select_component, current_hdp_version))
-
- return current_hdp_version
-
-
-def get_hadoop_dir(target, force_latest_on_upgrade=False):
- """
- Return the hadoop shared directory in the following override order
- 1. Use default for 2.1 and lower
- 2. If 2.2 and higher, use /usr/hdp/current/hadoop-client/{target}
- 3. If 2.2 and higher AND for an upgrade, use /usr/hdp/<version>/hadoop/{target}.
- However, if the upgrade has not yet invoked hdp-select, return the current
- version of the component.
- :target: the target directory
- :force_latest_on_upgrade: if True, then this will return the "current" directory
- without the HDP version built into the path, such as /usr/hdp/current/hadoop-client
- """
-
- if not target in HADOOP_DIR_DEFAULTS:
- raise Fail("Target {0} not defined".format(target))
-
- hadoop_dir = HADOOP_DIR_DEFAULTS[target]
-
- if Script.is_hdp_stack_greater_or_equal("2.2"):
- # home uses a different template
- if target == "home":
- hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format("current", "hadoop-client")
- else:
- hadoop_dir = HADOOP_DIR_TEMPLATE.format("current", "hadoop-client", target)
-
- # if we are not forcing "current" for HDP 2.2, then attempt to determine
- # if the exact version needs to be returned in the directory
- if not force_latest_on_upgrade:
- stack_info = _get_upgrade_stack()
-
- if stack_info is not None:
- stack_version = stack_info[1]
-
- # determine if hdp-select has been run and if not, then use the current
- # hdp version until this component is upgraded
- current_hdp_version = get_role_component_current_hdp_version()
- if current_hdp_version is not None and stack_version != current_hdp_version:
- stack_version = current_hdp_version
-
- if target == "home":
- # home uses a different template
- hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_version, "hadoop")
- else:
- hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_version, "hadoop", target)
-
- return hadoop_dir
-
-def get_hadoop_dir_for_stack_version(target, stack_version):
- """
- Return the hadoop shared directory for the provided stack version. This is necessary
- when folder paths of downgrade-source stack-version are needed after hdp-select.
- :target: the target directory
- :stack_version: stack version to get hadoop dir for
- """
-
- if not target in HADOOP_DIR_DEFAULTS:
- raise Fail("Target {0} not defined".format(target))
-
- hadoop_dir = HADOOP_DIR_DEFAULTS[target]
-
- formatted_stack_version = format_hdp_stack_version(stack_version)
- if Script.is_hdp_stack_greater_or_equal_to(formatted_stack_version, "2.2"):
- # home uses a different template
- if target == "home":
- hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_version, "hadoop")
- else:
- hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_version, "hadoop", target)
-
- return hadoop_dir
-
-
-def _get_upgrade_stack():
- """
- Gets the stack name and stack version if an upgrade is currently in progress.
- :return: the stack name and stack version as a tuple, or None if an
- upgrade is not in progress.
- """
- from resource_management.libraries.functions.default import default
- direction = default("/commandParams/upgrade_direction", None)
- stack_name = default("/hostLevelParams/stack_name", None)
- stack_version = default("/commandParams/version", None)
-
- if direction and stack_name and stack_version:
- return (stack_name, stack_version)
-
- return None
-
-
-def get_hdp_versions(stack_root):
- """
- Gets list of stack versions installed on the host.
- Be default a call to hdp-select versions is made to get the list of installed stack versions.
- As a fallback list of installed versions is collected from stack version directories in stack install root.
- :param stack_root: Stack install root
- :return: Returns list of installed stack versions.
- """
- code, out = call(HDP_SELECT_PREFIX + ('versions',))
- versions = []
- if 0 == code:
- for line in out.splitlines():
- versions.append(line.rstrip('\n'))
- if not versions:
- versions = get_versions_from_stack_root(stack_root)
- return versions
-
-def get_hdp_version_before_install(component_name):
- """
- Works in the similar way to 'hdp-select status component',
- but also works for not yet installed packages.
-
- Note: won't work if doing initial install.
- """
- component_dir = HADOOP_HOME_DIR_TEMPLATE.format("current", component_name)
- if os.path.islink(component_dir):
- hdp_version = os.path.basename(os.path.dirname(os.readlink(component_dir)))
- match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', hdp_version)
- if match is None:
- Logger.info('Failed to get extracted version with hdp-select in method get_hdp_version_before_install')
- return None # lazy fail
- return hdp_version
- else:
- return None
http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-common/src/main/python/resource_management/libraries/functions/install_hdp_msi.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/install_hdp_msi.py b/ambari-common/src/main/python/resource_management/libraries/functions/install_hdp_msi.py
deleted file mode 100644
index 7e94b5d..0000000
--- a/ambari-common/src/main/python/resource_management/libraries/functions/install_hdp_msi.py
+++ /dev/null
@@ -1,215 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from ambari_commons import os_utils
-from ambari_commons.inet_utils import download_file
-from ambari_commons.os_windows import SystemWideLock
-
-from resource_management.core.resources.system import Execute
-from resource_management.core.resources.system import File
-from resource_management.core.logger import Logger
-from resource_management.core.exceptions import Fail
-from resource_management.libraries.functions.reload_windows_env import reload_windows_env
-from resource_management.libraries.functions.windows_service_utils import check_windows_service_exists
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
-import socket
-import os
-import glob
-import urlparse
-
-
-__all__ = ['install_windows_msi']
-
-msi_save_dir = None
-hdp_log_dir = "c:\\hadoop\\logs"
-hdp_data_dir = "c:\\hadoopDefaultData"
-local_host = socket.getfqdn()
-db_flavor = "DERBY"
-hdp_22 = """#Namenode Data directory
-HDFS_NAMENODE_DATA_DIR={hdp_data_dir}\\hdpdatann
-
-#Datanode Data directory
-HDFS_DATANODE_DATA_DIR={hdp_data_dir}\\hdpdatadn
-
-IS_SLIDER=yes
-IS_PHOENIX=no
-"""
-cluster_properties = """#Log directory
-HDP_LOG_DIR={hdp_log_dir}
-
-#Data directory
-HDP_DATA_DIR={hdp_data_dir}
-
-{hdp_22_specific_props}
-
-#hosts
-NAMENODE_HOST={local_host}
-SECONDARY_NAMENODE_HOST={local_host}
-RESOURCEMANAGER_HOST={local_host}
-HIVE_SERVER_HOST={local_host}
-OOZIE_SERVER_HOST={local_host}
-WEBHCAT_HOST={local_host}
-SLAVE_HOSTS={local_host}
-ZOOKEEPER_HOSTS={local_host}
-CLIENT_HOSTS={local_host}
-HBASE_MASTER={local_host}
-HBASE_REGIONSERVERS={local_host}
-FLUME_HOSTS={local_host}
-FALCON_HOST={local_host}
-KNOX_HOST={local_host}
-STORM_NIMBUS={local_host}
-STORM_SUPERVISORS={local_host}
-
-#Database host
-DB_FLAVOR={db_flavor}
-DB_HOSTNAME={local_host}
-DB_PORT=1527
-
-#Hive properties
-HIVE_DB_NAME=hive
-HIVE_DB_USERNAME=hive
-HIVE_DB_PASSWORD=hive
-
-#Oozie properties
-OOZIE_DB_NAME=oozie
-OOZIE_DB_USERNAME=oozie
-OOZIE_DB_PASSWORD=oozie
-"""
-
-INSTALL_MSI_CMD = 'cmd /C start /wait msiexec /qn /i {hdp_msi_path} /lv {hdp_log_path} MSIUSEREALADMINDETECTION=1 ' \
- 'HDP_LAYOUT={hdp_layout_path} DESTROY_DATA=yes HDP_USER={hadoop_user} HDP_USER_PASSWORD={hadoop_password_arg} HDP=yes ' \
- 'KNOX=yes KNOX_MASTER_SECRET="AmbariHDP2Windows" FALCON=yes STORM=yes HBase=yes STORM=yes FLUME=yes SLIDER=yes PHOENIX=no RANGER=no'
-CREATE_SERVICE_SCRIPT = os.path.abspath("sbin\createservice.ps1")
-CREATE_SERVICE_CMD = 'cmd /C powershell -ExecutionPolicy Bypass -File "{script}" -username {username} -password "{password}" -servicename ' \
- '{servicename} -hdpresourcesdir "{resourcedir}" -servicecmdpath "{servicecmd}"'
-INSTALL_MARKER_OK = "msi.installed"
-INSTALL_MARKER_FAILED = "msi.failed"
-_working_dir = None
-
-
-def _ensure_services_created(hadoop_user, hadoop_password):
- resource_dir_hdfs = os.path.join(os.environ["HADOOP_HDFS_HOME"], "bin")
- service_cmd_hdfs = os.path.join(os.environ["HADOOP_HDFS_HOME"], "bin", "hdfs.cmd")
- if not check_windows_service_exists("journalnode"):
- Execute(CREATE_SERVICE_CMD.format(script=CREATE_SERVICE_SCRIPT, username=hadoop_user, password=hadoop_password, servicename="journalnode",
- resourcedir=resource_dir_hdfs, servicecmd=service_cmd_hdfs), logoutput=True)
- if not check_windows_service_exists("zkfc"):
- Execute(CREATE_SERVICE_CMD.format(script=CREATE_SERVICE_SCRIPT, username=hadoop_user, password=hadoop_password, servicename="zkfc",
- resourcedir=resource_dir_hdfs, servicecmd=service_cmd_hdfs), logoutput=True)
-
-
-# creating symlinks to services folders to avoid using stack-dependent paths
-def _create_symlinks(stack_version):
- # folders
- Execute("cmd /c mklink /d %HADOOP_NODE%\\hadoop %HADOOP_HOME%")
- Execute("cmd /c mklink /d %HADOOP_NODE%\\hive %HIVE_HOME%")
- hdp_stack_version = format_hdp_stack_version(stack_version)
- if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
- Execute("cmd /c mklink /d %HADOOP_NODE%\\knox %KNOX_HOME%")
- # files pairs (symlink_path, path_template_to_target_file), use * to replace file version
- links_pairs = [
- ("%HADOOP_HOME%\\share\\hadoop\\tools\\lib\\hadoop-streaming.jar",
- "%HADOOP_HOME%\\share\\hadoop\\tools\\lib\\hadoop-streaming-*.jar"),
- ("%HIVE_HOME%\\hcatalog\\share\\webhcat\\svr\\lib\\hive-webhcat.jar",
- "%HIVE_HOME%\\hcatalog\\share\\webhcat\\svr\\lib\\hive-webhcat-*.jar"),
- ("%HIVE_HOME%\\lib\\zookeeper.jar", "%HIVE_HOME%\\lib\\zookeeper-*.jar")
- ]
- for link_pair in links_pairs:
- link, target = link_pair
- target = glob.glob(os.path.expandvars(target))[0].replace("\\\\", "\\")
- Execute('cmd /c mklink "{0}" "{1}"'.format(link, target))
-
-
-# check if services exists and marker file present
-def _is_msi_installed():
- return os.path.exists(os.path.join(_working_dir, INSTALL_MARKER_OK)) and check_windows_service_exists("namenode")
-
-
-# check if msi was installed correctly and raise Fail in case of broken install
-def _validate_msi_install():
- if not _is_msi_installed() and os.path.exists(os.path.join(_working_dir, INSTALL_MARKER_FAILED)):
- raise Fail("Current or previous hdp.msi install failed. Check hdp.msi install logs")
- return _is_msi_installed()
-
-
-def _write_marker():
- if check_windows_service_exists("namenode"):
- open(os.path.join(_working_dir, INSTALL_MARKER_OK), "w").close()
- else:
- open(os.path.join(_working_dir, INSTALL_MARKER_FAILED), "w").close()
-
-
-def install_windows_msi(url_base, save_dir, save_files, hadoop_user, hadoop_password, stack_version):
- global _working_dir
- _working_dir = save_dir
- save_dir = os.path.abspath(save_dir)
- msi_save_dir = save_dir
- # system wide lock to prevent simultaneous installations(when first task failed on timeout)
- install_lock = SystemWideLock("Global\\hdp_msi_lock")
- try:
- # try to acquire lock
- if not install_lock.lock():
- Logger.info("Some other task currently installing hdp.msi, waiting for 10 min for finish")
- if not install_lock.lock(600000):
- raise Fail("Timeout on acquiring lock")
- if _validate_msi_install():
- Logger.info("hdp.msi already installed")
- return
-
- hdp_stack_version = format_hdp_stack_version(stack_version)
- hdp_22_specific_props = ''
- if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
- hdp_22_specific_props = hdp_22.format(hdp_data_dir=hdp_data_dir)
-
- # MSIs cannot be larger than 2GB. HDPWIN 2.3 needed split in order to accommodate this limitation
- hdp_msi_file = ''
- for save_file in save_files:
- if save_file.lower().endswith(".msi"):
- hdp_msi_file = save_file
- file_url = urlparse.urljoin(url_base, save_file)
- try:
- download_file(file_url, os.path.join(msi_save_dir, save_file))
- except:
- raise Fail("Failed to download {url}".format(url=file_url))
-
- File(os.path.join(msi_save_dir, "properties.txt"), content=cluster_properties.format(hdp_log_dir=hdp_log_dir,
- hdp_data_dir=hdp_data_dir,
- local_host=local_host,
- db_flavor=db_flavor,
- hdp_22_specific_props=hdp_22_specific_props))
-
- # install msi
- hdp_msi_path = os_utils.quote_path(os.path.join(save_dir, hdp_msi_file))
- hdp_log_path = os_utils.quote_path(os.path.join(save_dir, hdp_msi_file[:-3] + "log"))
- hdp_layout_path = os_utils.quote_path(os.path.join(save_dir, "properties.txt"))
- hadoop_password_arg = os_utils.quote_path(hadoop_password)
-
- Execute(
- INSTALL_MSI_CMD.format(hdp_msi_path=hdp_msi_path, hdp_log_path=hdp_log_path, hdp_layout_path=hdp_layout_path,
- hadoop_user=hadoop_user, hadoop_password_arg=hadoop_password_arg))
- reload_windows_env()
- # create additional services manually due to hdp.msi limitaitons
- _ensure_services_created(hadoop_user, hadoop_password)
- _create_symlinks(stack_version)
- # finalizing install
- _write_marker()
- _validate_msi_install()
- finally:
- install_lock.unlock()
http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-common/src/main/python/resource_management/libraries/functions/install_windows_msi.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/install_windows_msi.py b/ambari-common/src/main/python/resource_management/libraries/functions/install_windows_msi.py
new file mode 100644
index 0000000..f1cd9cb
--- /dev/null
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/install_windows_msi.py
@@ -0,0 +1,215 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from ambari_commons import os_utils
+from ambari_commons.inet_utils import download_file
+from ambari_commons.os_windows import SystemWideLock
+
+from resource_management.core.resources.system import Execute
+from resource_management.core.resources.system import File
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.libraries.functions.reload_windows_env import reload_windows_env
+from resource_management.libraries.functions.windows_service_utils import check_windows_service_exists
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
+import socket
+import os
+import glob
+import urlparse
+
+
+__all__ = ['install_windows_msi']
+
+msi_save_dir = None
+hdp_log_dir = "c:\\hadoop\\logs"
+hdp_data_dir = "c:\\hadoopDefaultData"
+local_host = socket.getfqdn()
+db_flavor = "DERBY"
+hdp_22 = """#Namenode Data directory
+HDFS_NAMENODE_DATA_DIR={hdp_data_dir}\\hdpdatann
+
+#Datanode Data directory
+HDFS_DATANODE_DATA_DIR={hdp_data_dir}\\hdpdatadn
+
+IS_SLIDER=yes
+IS_PHOENIX=no
+"""
+cluster_properties = """#Log directory
+HDP_LOG_DIR={hdp_log_dir}
+
+#Data directory
+HDP_DATA_DIR={hdp_data_dir}
+
+{hdp_22_specific_props}
+
+#hosts
+NAMENODE_HOST={local_host}
+SECONDARY_NAMENODE_HOST={local_host}
+RESOURCEMANAGER_HOST={local_host}
+HIVE_SERVER_HOST={local_host}
+OOZIE_SERVER_HOST={local_host}
+WEBHCAT_HOST={local_host}
+SLAVE_HOSTS={local_host}
+ZOOKEEPER_HOSTS={local_host}
+CLIENT_HOSTS={local_host}
+HBASE_MASTER={local_host}
+HBASE_REGIONSERVERS={local_host}
+FLUME_HOSTS={local_host}
+FALCON_HOST={local_host}
+KNOX_HOST={local_host}
+STORM_NIMBUS={local_host}
+STORM_SUPERVISORS={local_host}
+
+#Database host
+DB_FLAVOR={db_flavor}
+DB_HOSTNAME={local_host}
+DB_PORT=1527
+
+#Hive properties
+HIVE_DB_NAME=hive
+HIVE_DB_USERNAME=hive
+HIVE_DB_PASSWORD=hive
+
+#Oozie properties
+OOZIE_DB_NAME=oozie
+OOZIE_DB_USERNAME=oozie
+OOZIE_DB_PASSWORD=oozie
+"""
+
+INSTALL_MSI_CMD = 'cmd /C start /wait msiexec /qn /i {hdp_msi_path} /lv {hdp_log_path} MSIUSEREALADMINDETECTION=1 ' \
+ 'HDP_LAYOUT={hdp_layout_path} DESTROY_DATA=yes HDP_USER={hadoop_user} HDP_USER_PASSWORD={hadoop_password_arg} HDP=yes ' \
+ 'KNOX=yes KNOX_MASTER_SECRET="AmbariHDP2Windows" FALCON=yes STORM=yes HBase=yes STORM=yes FLUME=yes SLIDER=yes PHOENIX=no RANGER=no'
+CREATE_SERVICE_SCRIPT = os.path.abspath("sbin\createservice.ps1")
+CREATE_SERVICE_CMD = 'cmd /C powershell -ExecutionPolicy Bypass -File "{script}" -username {username} -password "{password}" -servicename ' \
+ '{servicename} -hdpresourcesdir "{resourcedir}" -servicecmdpath "{servicecmd}"'
+INSTALL_MARKER_OK = "msi.installed"
+INSTALL_MARKER_FAILED = "msi.failed"
+_working_dir = None
+
+
+def _ensure_services_created(hadoop_user, hadoop_password):
+ resource_dir_hdfs = os.path.join(os.environ["HADOOP_HDFS_HOME"], "bin")
+ service_cmd_hdfs = os.path.join(os.environ["HADOOP_HDFS_HOME"], "bin", "hdfs.cmd")
+ if not check_windows_service_exists("journalnode"):
+ Execute(CREATE_SERVICE_CMD.format(script=CREATE_SERVICE_SCRIPT, username=hadoop_user, password=hadoop_password, servicename="journalnode",
+ resourcedir=resource_dir_hdfs, servicecmd=service_cmd_hdfs), logoutput=True)
+ if not check_windows_service_exists("zkfc"):
+ Execute(CREATE_SERVICE_CMD.format(script=CREATE_SERVICE_SCRIPT, username=hadoop_user, password=hadoop_password, servicename="zkfc",
+ resourcedir=resource_dir_hdfs, servicecmd=service_cmd_hdfs), logoutput=True)
+
+
+# creating symlinks to services folders to avoid using stack-dependent paths
+def _create_symlinks(stack_version):
+ # folders
+ Execute("cmd /c mklink /d %HADOOP_NODE%\\hadoop %HADOOP_HOME%")
+ Execute("cmd /c mklink /d %HADOOP_NODE%\\hive %HIVE_HOME%")
+ stack_version_formatted = format_stack_version(stack_version)
+ if stack_version_formatted != "" and compare_versions(stack_version_formatted, '2.2') >= 0:
+ Execute("cmd /c mklink /d %HADOOP_NODE%\\knox %KNOX_HOME%")
+ # files pairs (symlink_path, path_template_to_target_file), use * to replace file version
+ links_pairs = [
+ ("%HADOOP_HOME%\\share\\hadoop\\tools\\lib\\hadoop-streaming.jar",
+ "%HADOOP_HOME%\\share\\hadoop\\tools\\lib\\hadoop-streaming-*.jar"),
+ ("%HIVE_HOME%\\hcatalog\\share\\webhcat\\svr\\lib\\hive-webhcat.jar",
+ "%HIVE_HOME%\\hcatalog\\share\\webhcat\\svr\\lib\\hive-webhcat-*.jar"),
+ ("%HIVE_HOME%\\lib\\zookeeper.jar", "%HIVE_HOME%\\lib\\zookeeper-*.jar")
+ ]
+ for link_pair in links_pairs:
+ link, target = link_pair
+ target = glob.glob(os.path.expandvars(target))[0].replace("\\\\", "\\")
+ Execute('cmd /c mklink "{0}" "{1}"'.format(link, target))
+
+
+# check if services exists and marker file present
+def _is_msi_installed():
+ return os.path.exists(os.path.join(_working_dir, INSTALL_MARKER_OK)) and check_windows_service_exists("namenode")
+
+
+# check if msi was installed correctly and raise Fail in case of broken install
+def _validate_msi_install():
+ if not _is_msi_installed() and os.path.exists(os.path.join(_working_dir, INSTALL_MARKER_FAILED)):
+ raise Fail("Current or previous hdp.msi install failed. Check hdp.msi install logs")
+ return _is_msi_installed()
+
+
+def _write_marker():
+ if check_windows_service_exists("namenode"):
+ open(os.path.join(_working_dir, INSTALL_MARKER_OK), "w").close()
+ else:
+ open(os.path.join(_working_dir, INSTALL_MARKER_FAILED), "w").close()
+
+
+def install_windows_msi(url_base, save_dir, save_files, hadoop_user, hadoop_password, stack_version):
+ global _working_dir
+ _working_dir = save_dir
+ save_dir = os.path.abspath(save_dir)
+ msi_save_dir = save_dir
+ # system wide lock to prevent simultaneous installations(when first task failed on timeout)
+ install_lock = SystemWideLock("Global\\hdp_msi_lock")
+ try:
+ # try to acquire lock
+ if not install_lock.lock():
+ Logger.info("Some other task currently installing hdp.msi, waiting for 10 min for finish")
+ if not install_lock.lock(600000):
+ raise Fail("Timeout on acquiring lock")
+ if _validate_msi_install():
+ Logger.info("hdp.msi already installed")
+ return
+
+ stack_version_formatted = format_stack_version(stack_version)
+ hdp_22_specific_props = ''
+ if stack_version_formatted != "" and compare_versions(stack_version_formatted, '2.2') >= 0:
+ hdp_22_specific_props = hdp_22.format(hdp_data_dir=hdp_data_dir)
+
+ # MSIs cannot be larger than 2GB. HDPWIN 2.3 needed split in order to accommodate this limitation
+ hdp_msi_file = ''
+ for save_file in save_files:
+ if save_file.lower().endswith(".msi"):
+ hdp_msi_file = save_file
+ file_url = urlparse.urljoin(url_base, save_file)
+ try:
+ download_file(file_url, os.path.join(msi_save_dir, save_file))
+ except:
+ raise Fail("Failed to download {url}".format(url=file_url))
+
+ File(os.path.join(msi_save_dir, "properties.txt"), content=cluster_properties.format(hdp_log_dir=hdp_log_dir,
+ hdp_data_dir=hdp_data_dir,
+ local_host=local_host,
+ db_flavor=db_flavor,
+ hdp_22_specific_props=hdp_22_specific_props))
+
+ # install msi
+ hdp_msi_path = os_utils.quote_path(os.path.join(save_dir, hdp_msi_file))
+ hdp_log_path = os_utils.quote_path(os.path.join(save_dir, hdp_msi_file[:-3] + "log"))
+ hdp_layout_path = os_utils.quote_path(os.path.join(save_dir, "properties.txt"))
+ hadoop_password_arg = os_utils.quote_path(hadoop_password)
+
+ Execute(
+ INSTALL_MSI_CMD.format(hdp_msi_path=hdp_msi_path, hdp_log_path=hdp_log_path, hdp_layout_path=hdp_layout_path,
+ hadoop_user=hadoop_user, hadoop_password_arg=hadoop_password_arg))
+ reload_windows_env()
+ # create additional services manually due to hdp.msi limitaitons
+ _ensure_services_created(hadoop_user, hadoop_password)
+ _create_symlinks(stack_version)
+ # finalizing install
+ _write_marker()
+ _validate_msi_install()
+ finally:
+ install_lock.unlock()
http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin.py b/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin.py
index e5e4266..4d9d8a4 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin.py
@@ -24,7 +24,7 @@ from datetime import datetime
from resource_management.libraries.functions.ranger_functions import Rangeradmin
from resource_management.core.resources import File, Execute
from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.get_hdp_version import get_hdp_version
+from resource_management.libraries.functions.get_stack_version import get_stack_version
from resource_management.core.logger import Logger
from resource_management.core.source import DownloadSource
from resource_management.libraries.resources import ModifyPropertiesFile
@@ -50,8 +50,8 @@ def setup_ranger_plugin(component_select_name, service_name,
File(driver_curl_target, mode=0644)
- hdp_version = get_hdp_version(component_select_name)
- file_path = format('/usr/hdp/{hdp_version}/ranger-{service_name}-plugin/install.properties')
+ stack_version = get_stack_version(component_select_name)
+ file_path = format('/usr/hdp/{stack_version}/ranger-{service_name}-plugin/install.properties')
if not os.path.isfile(file_path):
raise Fail(format('Ranger {service_name} plugin install.properties file does not exist at {file_path}'))
@@ -79,7 +79,7 @@ def setup_ranger_plugin(component_select_name, service_name,
else:
cmd = (format('disable-{service_name}-plugin.sh'),)
- cmd_env = {'JAVA_HOME': java_home, 'PWD': format('/usr/hdp/{hdp_version}/ranger-{service_name}-plugin'), 'PATH': format('/usr/hdp/{hdp_version}/ranger-{service_name}-plugin')}
+ cmd_env = {'JAVA_HOME': java_home, 'PWD': format('/usr/hdp/{stack_version}/ranger-{service_name}-plugin'), 'PATH': format('/usr/hdp/{stack_version}/ranger-{service_name}-plugin')}
Execute(cmd,
environment=cmd_env,
http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py b/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
index d6f6deb..2ccc0c6 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
@@ -26,7 +26,7 @@ from resource_management.libraries.functions.ranger_functions import Rangeradmin
from resource_management.core.resources import File, Directory, Execute
from resource_management.libraries.resources.xml_config import XmlConfig
from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.get_hdp_version import get_hdp_version
+from resource_management.libraries.functions.get_stack_version import get_stack_version
from resource_management.core.logger import Logger
from resource_management.core.source import DownloadSource, InlineTemplate
from resource_management.libraries.functions.ranger_functions_v2 import RangeradminV2
@@ -44,7 +44,7 @@ def setup_ranger_plugin(component_select_name, service_name,
plugin_policymgr_ssl_properties, plugin_policymgr_ssl_attributes,
component_list, audit_db_is_enabled, credential_file,
xa_audit_db_password, ssl_truststore_password,
- ssl_keystore_password, api_version=None, hdp_version_override = None, skip_if_rangeradmin_down = True):
+ ssl_keystore_password, api_version=None, stack_version_override = None, skip_if_rangeradmin_down = True):
if audit_db_is_enabled:
File(component_downloaded_custom_connector,
@@ -59,9 +59,9 @@ def setup_ranger_plugin(component_select_name, service_name,
File(component_driver_curl_target, mode=0644)
- hdp_version = get_hdp_version(component_select_name)
- if hdp_version_override is not None:
- hdp_version = hdp_version_override
+ stack_version = get_stack_version(component_select_name)
+ if stack_version_override is not None:
+ stack_version = stack_version_override
component_conf_dir = conf_dict
@@ -135,9 +135,9 @@ def setup_ranger_plugin(component_select_name, service_name,
mode=0744)
#This should be done by rpm
- #setup_ranger_plugin_jar_symblink(hdp_version, service_name, component_list)
+ #setup_ranger_plugin_jar_symblink(stack_version, service_name, component_list)
- setup_ranger_plugin_keystore(service_name, audit_db_is_enabled, hdp_version, credential_file,
+ setup_ranger_plugin_keystore(service_name, audit_db_is_enabled, stack_version, credential_file,
xa_audit_db_password, ssl_truststore_password, ssl_keystore_password,
component_user, component_group, java_home)
@@ -147,22 +147,22 @@ def setup_ranger_plugin(component_select_name, service_name,
)
-def setup_ranger_plugin_jar_symblink(hdp_version, service_name, component_list):
+def setup_ranger_plugin_jar_symblink(stack_version, service_name, component_list):
- jar_files = os.listdir(format('/usr/hdp/{hdp_version}/ranger-{service_name}-plugin/lib'))
+ jar_files = os.listdir(format('/usr/hdp/{stack_version}/ranger-{service_name}-plugin/lib'))
for jar_file in jar_files:
for component in component_list:
- Execute(('ln','-sf',format('/usr/hdp/{hdp_version}/ranger-{service_name}-plugin/lib/{jar_file}'),format('/usr/hdp/current/{component}/lib/{jar_file}')),
+ Execute(('ln','-sf',format('/usr/hdp/{stack_version}/ranger-{service_name}-plugin/lib/{jar_file}'),format('/usr/hdp/current/{component}/lib/{jar_file}')),
not_if=format('ls /usr/hdp/current/{component}/lib/{jar_file}'),
- only_if=format('ls /usr/hdp/{hdp_version}/ranger-{service_name}-plugin/lib/{jar_file}'),
+ only_if=format('ls /usr/hdp/{stack_version}/ranger-{service_name}-plugin/lib/{jar_file}'),
sudo=True)
-def setup_ranger_plugin_keystore(service_name, audit_db_is_enabled, hdp_version, credential_file, xa_audit_db_password,
+def setup_ranger_plugin_keystore(service_name, audit_db_is_enabled, stack_version, credential_file, xa_audit_db_password,
ssl_truststore_password, ssl_keystore_password, component_user, component_group, java_home):
- cred_lib_path = format('/usr/hdp/{hdp_version}/ranger-{service_name}-plugin/install/lib/*')
- cred_setup_prefix = (format('/usr/hdp/{hdp_version}/ranger-{service_name}-plugin/ranger_credential_helper.py'), '-l', cred_lib_path)
+ cred_lib_path = format('/usr/hdp/{stack_version}/ranger-{service_name}-plugin/install/lib/*')
+ cred_setup_prefix = (format('/usr/hdp/{stack_version}/ranger-{service_name}-plugin/ranger_credential_helper.py'), '-l', cred_lib_path)
if audit_db_is_enabled:
cred_setup = cred_setup_prefix + ('-f', credential_file, '-k', 'auditDBCred', '-v', PasswordString(xa_audit_db_password), '-c', '1')