You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2017/07/11 16:26:02 UTC
[29/50] [abbrv] ambari git commit: AMBARI-21430 - Allow Multiple
Versions of Stack Tools to Co-Exist (jonathanhurley)
http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
index 0e9fe74..54eef18 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
@@ -26,6 +26,7 @@ from resource_management.core.resources import Execute
from resource_management.libraries.functions import format
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions import get_klist_path
+from resource_management.libraries.functions import stack_tools
from ambari_commons.os_check import OSConst, OSCheck
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from urlparse import urlparse
@@ -66,6 +67,7 @@ USER_PRINCIPAL_DEFAULT = 'oozie@EXAMPLE.COM'
# default user
USER_DEFAULT = 'oozie'
+STACK_NAME_KEY = '{{cluster-env/stack_name}}'
STACK_ROOT_KEY = '{{cluster-env/stack_root}}'
STACK_ROOT_DEFAULT = '/usr/hdp'
@@ -86,7 +88,7 @@ def get_tokens():
to build the dictionary passed into execute
"""
return (OOZIE_URL_KEY, USER_PRINCIPAL_KEY, SECURITY_ENABLED, USER_KEYTAB_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY,
- USER_KEY, OOZIE_HTTPS_PORT, OOZIE_ENV_CONTENT, STACK_ROOT_KEY)
+ USER_KEY, OOZIE_HTTPS_PORT, OOZIE_ENV_CONTENT, STACK_NAME_KEY, STACK_ROOT_KEY)
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def get_check_command(oozie_url, host_name, configurations):
@@ -158,8 +160,8 @@ def get_check_command(oozie_url, host_name, configurations, parameters, only_kin
# Configure stack root
stack_root = STACK_ROOT_DEFAULT
- if STACK_ROOT_KEY in configurations:
- stack_root = configurations[STACK_ROOT_KEY].lower()
+ if STACK_NAME_KEY in configurations and STACK_ROOT_KEY in configurations:
+ stack_root = stack_tools.get_stack_root(configurations[STACK_NAME_KEY], configurations[STACK_ROOT_KEY]).lower()
# oozie configuration directory using a symlink
oozie_config_directory = OOZIE_CONF_DIR.replace(STACK_ROOT_PATTERN, stack_root)
http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/host_scripts/alert_disk_space.py b/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
index 4c5834f..f3c6406 100644
--- a/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
+++ b/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
@@ -23,6 +23,7 @@ import os
import platform
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons import OSConst
+from resource_management.libraries.functions import stack_tools
DiskInfo = collections.namedtuple('DiskInfo', 'total used free path')
@@ -36,6 +37,7 @@ MIN_FREE_SPACE_DEFAULT = 5000000000L
PERCENT_USED_WARNING_DEFAULT = 50
PERCENT_USED_CRITICAL_DEFAULT = 80
+STACK_NAME = '{{cluster-env/stack_name}}'
STACK_ROOT = '{{cluster-env/stack_root}}'
def get_tokens():
@@ -43,7 +45,7 @@ def get_tokens():
Returns a tuple of tokens in the format {{site/property}} that will be used
to build the dictionary passed into execute
"""
- return (STACK_ROOT, )
+ return (STACK_NAME, STACK_ROOT)
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
@@ -64,10 +66,10 @@ def execute(configurations={}, parameters={}, host_name=None):
if configurations is None:
return (('UNKNOWN', ['There were no configurations supplied to the script.']))
- if not STACK_ROOT in configurations:
- return (('STACK_ROOT', ['cluster-env/stack_root is not specified']))
+ if not STACK_NAME in configurations or not STACK_ROOT in configurations:
+ return (('STACK_ROOT', ['cluster-env/stack_name and cluster-env/stack_root are required']))
- path = configurations[STACK_ROOT]
+ path = stack_tools.get_stack_root(configurations[STACK_NAME], configurations[STACK_ROOT])
try:
disk_usage = _get_disk_usage(path)
http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/resources/host_scripts/alert_version_select.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/host_scripts/alert_version_select.py b/ambari-server/src/main/resources/host_scripts/alert_version_select.py
index f8755c9..a7b65af 100644
--- a/ambari-server/src/main/resources/host_scripts/alert_version_select.py
+++ b/ambari-server/src/main/resources/host_scripts/alert_version_select.py
@@ -31,6 +31,7 @@ RESULT_STATE_WARNING = 'WARNING'
RESULT_STATE_CRITICAL = 'CRITICAL'
RESULT_STATE_UNKNOWN = 'UNKNOWN'
+STACK_NAME = '{{cluster-env/stack_name}}'
STACK_TOOLS = '{{cluster-env/stack_tools}}'
@@ -42,7 +43,7 @@ def get_tokens():
Returns a tuple of tokens in the format {{site/property}} that will be used
to build the dictionary passed into execute
"""
- return (STACK_TOOLS,)
+ return (STACK_NAME, STACK_TOOLS)
def execute(configurations={}, parameters={}, host_name=None):
@@ -65,8 +66,10 @@ def execute(configurations={}, parameters={}, host_name=None):
if STACK_TOOLS not in configurations:
return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(STACK_TOOLS)])
+ stack_name = Script.get_stack_name()
+
# Of the form,
- # { "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"], "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"] }
+ # { "HDP" : { "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"], "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"] } }
stack_tools_str = configurations[STACK_TOOLS]
if stack_tools_str is None:
@@ -75,6 +78,7 @@ def execute(configurations={}, parameters={}, host_name=None):
distro_select = "unknown-distro-select"
try:
stack_tools = json.loads(stack_tools_str)
+ stack_tools = stack_tools[stack_name]
distro_select = stack_tools["stack_selector"][0]
except:
pass
@@ -87,18 +91,18 @@ def execute(configurations={}, parameters={}, host_name=None):
(code, out, versions) = unsafe_get_stack_versions()
if code == 0:
- msg.append("Ok. {0}".format(distro_select))
+ msg.append("{0} ".format(distro_select))
if versions is not None and type(versions) is list and len(versions) > 0:
- msg.append("Versions: {0}".format(", ".join(versions)))
+ msg.append("reported the following versions: {0}".format(", ".join(versions)))
return (RESULT_STATE_OK, ["\n".join(msg)])
else:
- msg.append("Failed, check dir {0} for unexpected contents.".format(stack_root_dir))
+ msg.append("{0} could not properly read {1}. Check this directory for unexpected contents.".format(distro_select, stack_root_dir))
if out is not None:
msg.append(out)
return (RESULT_STATE_CRITICAL, ["\n".join(msg)])
else:
- msg.append("Ok. No stack root {0} to check.".format(stack_root_dir))
+ msg.append("No stack root {0} to check.".format(stack_root_dir))
return (RESULT_STATE_OK, ["\n".join(msg)])
except Exception, e:
return (RESULT_STATE_CRITICAL, [e.message])
http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/resources/stacks/BigInsights/4.0/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/configuration/cluster-env.xml
index 93c7948..2d11ef3 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/configuration/cluster-env.xml
@@ -234,7 +234,20 @@ gpgcheck=0</value>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
-
+
+ <!-- Define stack_name property in the base stack. DO NOT override this property for each stack version -->
+ <property>
+ <name>stack_name</name>
+ <value>BigInsights</value>
+ <description>The name of the stack.</description>
+ <value-attributes>
+ <read-only>true</read-only>
+ <overridable>false</overridable>
+ <visible>false</visible>
+ </value-attributes>
+ <on-ambari-upgrade add="true"/>
+ </property>
+
<!-- Define stack_tools property in the base stack. DO NOT override this property for each stack version -->
<property>
<name>stack_tools</name>
@@ -267,8 +280,8 @@ gpgcheck=0</value>
</property>
<property>
<name>stack_root</name>
- <value>/usr/iop</value>
- <description>Stack root folder</description>
+ <value>{"BigInsights":"/usr/iop"}</value>
+ <description>JSON which defines the stack root by stack name</description>
<value-attributes>
<read-only>true</read-only>
<overridable>false</overridable>
http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_features.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_features.json
index 4627e73..a6672e4 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_features.json
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_features.json
@@ -1,212 +1,214 @@
{
- "stack_features": [
- {
- "name": "snappy",
- "description": "Snappy compressor/decompressor support",
- "max_version": "4.0.0.0"
- },
- {
- "name": "lzo",
- "description": "LZO libraries support",
- "min_version": "4.1.0.0"
- },
- {
- "name": "express_upgrade",
- "description": "Express upgrade support",
- "min_version": "4.2.0.0"
- },
- {
- "name": "rolling_upgrade",
- "description": "Rolling upgrade support",
- "min_version": "4.1.0.0"
- },
- {
- "name": "config_versioning",
- "description": "Configurable versions support",
- "min_version": "4.1.0.0"
- },
- {
- "name": "datanode_non_root",
- "description": "DataNode running as non-root support (AMBARI-7615)",
- "min_version": "4.1.0.0"
- },
- {
- "name": "remove_ranger_hdfs_plugin_env",
- "description": "HDFS removes Ranger env files (AMBARI-14299)",
- "min_version": "4.2.0.0"
- },
- {
- "name": "ranger",
- "description": "Ranger Service support",
- "min_version": "4.2.0.0"
- },
- {
- "name": "ranger_tagsync_component",
- "description": "Ranger Tagsync component support (AMBARI-14383)",
- "min_version": "4.2.5.0"
- },
- {
- "name": "phoenix",
- "description": "Phoenix Service support",
- "min_version": "4.2.0.0"
- },
- {
- "name": "nfs",
- "description": "NFS support",
- "min_version": "4.1.0.0"
- },
- {
- "name": "timeline_state_store",
- "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
- "min_version": "4.0.0.0"
- },
- {
- "name": "copy_tarball_to_hdfs",
- "description": "Copy tarball to HDFS support (AMBARI-12113)",
- "min_version": "4.1.0.0"
- },
- {
- "name": "spark_16plus",
- "description": "Spark 1.6+",
- "min_version": "4.2.0.0"
- },
- {
- "name": "spark_thriftserver",
- "description": "Spark Thrift Server",
- "min_version": "4.2.0.0"
- },
- {
- "name": "create_kafka_broker_id",
- "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
- "min_version": "4.0.0.0",
- "max_version": "4.2.0.0"
- },
- {
- "name": "kafka_listeners",
- "description": "Kafka listeners (AMBARI-10984)",
- "min_version": "4.2.0.0"
- },
- {
- "name": "kafka_kerberos",
- "description": "Kafka Kerberos support (AMBARI-10984)",
- "min_version": "4.1.0.0"
- },
- {
- "name": "ranger_usersync_non_root",
- "description": "Ranger Usersync as non-root user (AMBARI-10416)",
- "min_version": "4.1.0.0"
- },
- {
- "name": "ranger_audit_db_support",
- "description": "Ranger Audit to DB support",
- "min_version": "4.2.0.0",
- "max_version": "4.2.0.0"
- },
- {
- "name": "knox_versioned_data_dir",
- "description": "Use versioned data dir for Knox (AMBARI-13164)",
- "min_version": "4.2.0.0"
- },
- {
- "name": "knox_sso_topology",
- "description": "Knox SSO Topology support (AMBARI-13975)",
- "min_version": "4.2.0.0"
- },
- {
- "name": "oozie_admin_user",
- "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
- "min_version": "4.0.0.0"
- },
- {
- "name": "oozie_setup_shared_lib",
- "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
- "min_version": "4.0.0.0"
- },
- {
- "name": "oozie_host_kerberos",
- "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
- "min_version": "4.0.0.0",
- "max_version": "4.1.0.0"
- },
- {
- "name": "hive_metastore_upgrade_schema",
- "description": "Hive metastore upgrade schema support (AMBARI-11176)",
- "min_version": "4.1.0.0"
- },
- {
- "name": "hive_server_interactive",
- "description": "Hive server interactive support (AMBARI-15573)",
- "min_version": "4.4.0.0"
- },
- {
- "name": "hive_webhcat_specific_configs",
- "description": "Hive webhcat specific configurations support (AMBARI-12364)",
- "min_version": "4.1.0.0"
- },
- {
- "name": "hive_purge_table",
- "description": "Hive purge table support (AMBARI-12260)",
- "min_version": "4.1.0.0"
- },
- {
- "name": "hive_server2_kerberized_env",
- "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
- "max_version": "4.1.0.0"
- },
- {
- "name": "hive_env_heapsize",
- "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
- "min_version": "4.0.0.0"
- },
- {
- "name": "ranger_kms_hsm_support",
- "description": "Ranger KMS HSM support (AMBARI-15752)",
- "min_version": "4.2.5.0"
- },
- {
- "name": "ranger_log4j_support",
- "description": "Ranger supporting log-4j properties (AMBARI-15681)",
- "min_version": "4.2.5.0"
- },
- {
- "name": "ranger_kerberos_support",
- "description": "Ranger Kerberos support",
- "min_version": "4.2.5.0"
- },
- {
- "name": "hive_metastore_site_support",
- "description": "Hive Metastore site support",
- "min_version": "4.2.5.0"
- },
- {
- "name": "ranger_usersync_password_jceks",
- "description": "Saving Ranger Usersync credentials in jceks",
- "min_version": "4.2.5.0"
- },
- {
- "name": "ranger_install_logsearch_client",
- "description": "LogSearch Service support",
- "min_version": "4.2.5.0"
- },
- {
- "name": "hbase_home_directory",
- "description": "Hbase home directory in HDFS needed for HBASE backup",
- "min_version": "4.2.5.0"
- },
- {
- "name": "spark_livy",
- "description": "Livy as slave component of spark",
- "min_version": "4.4.0.0"
- },
- {
- "name": "ranger_pid_support",
- "description": "Ranger Service support pid generation AMBARI-16756",
- "min_version": "4.2.5.0"
- },
- {
- "name": "ranger_kms_pid_support",
- "description": "Ranger KMS Service support pid generation",
- "min_version": "4.2.5.0"
- }
- ]
+ "BigInsights": {
+ "stack_features": [
+ {
+ "name": "snappy",
+ "description": "Snappy compressor/decompressor support",
+ "max_version": "4.0.0.0"
+ },
+ {
+ "name": "lzo",
+ "description": "LZO libraries support",
+ "min_version": "4.1.0.0"
+ },
+ {
+ "name": "express_upgrade",
+ "description": "Express upgrade support",
+ "min_version": "4.2.0.0"
+ },
+ {
+ "name": "rolling_upgrade",
+ "description": "Rolling upgrade support",
+ "min_version": "4.1.0.0"
+ },
+ {
+ "name": "config_versioning",
+ "description": "Configurable versions support",
+ "min_version": "4.1.0.0"
+ },
+ {
+ "name": "datanode_non_root",
+ "description": "DataNode running as non-root support (AMBARI-7615)",
+ "min_version": "4.1.0.0"
+ },
+ {
+ "name": "remove_ranger_hdfs_plugin_env",
+ "description": "HDFS removes Ranger env files (AMBARI-14299)",
+ "min_version": "4.2.0.0"
+ },
+ {
+ "name": "ranger",
+ "description": "Ranger Service support",
+ "min_version": "4.2.0.0"
+ },
+ {
+ "name": "ranger_tagsync_component",
+ "description": "Ranger Tagsync component support (AMBARI-14383)",
+ "min_version": "4.2.5.0"
+ },
+ {
+ "name": "phoenix",
+ "description": "Phoenix Service support",
+ "min_version": "4.2.0.0"
+ },
+ {
+ "name": "nfs",
+ "description": "NFS support",
+ "min_version": "4.1.0.0"
+ },
+ {
+ "name": "timeline_state_store",
+ "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
+ "min_version": "4.0.0.0"
+ },
+ {
+ "name": "copy_tarball_to_hdfs",
+ "description": "Copy tarball to HDFS support (AMBARI-12113)",
+ "min_version": "4.1.0.0"
+ },
+ {
+ "name": "spark_16plus",
+ "description": "Spark 1.6+",
+ "min_version": "4.2.0.0"
+ },
+ {
+ "name": "spark_thriftserver",
+ "description": "Spark Thrift Server",
+ "min_version": "4.2.0.0"
+ },
+ {
+ "name": "create_kafka_broker_id",
+ "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
+ "min_version": "4.0.0.0",
+ "max_version": "4.2.0.0"
+ },
+ {
+ "name": "kafka_listeners",
+ "description": "Kafka listeners (AMBARI-10984)",
+ "min_version": "4.2.0.0"
+ },
+ {
+ "name": "kafka_kerberos",
+ "description": "Kafka Kerberos support (AMBARI-10984)",
+ "min_version": "4.1.0.0"
+ },
+ {
+ "name": "ranger_usersync_non_root",
+ "description": "Ranger Usersync as non-root user (AMBARI-10416)",
+ "min_version": "4.1.0.0"
+ },
+ {
+ "name": "ranger_audit_db_support",
+ "description": "Ranger Audit to DB support",
+ "min_version": "4.2.0.0",
+ "max_version": "4.2.0.0"
+ },
+ {
+ "name": "knox_versioned_data_dir",
+ "description": "Use versioned data dir for Knox (AMBARI-13164)",
+ "min_version": "4.2.0.0"
+ },
+ {
+ "name": "knox_sso_topology",
+ "description": "Knox SSO Topology support (AMBARI-13975)",
+ "min_version": "4.2.0.0"
+ },
+ {
+ "name": "oozie_admin_user",
+ "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
+ "min_version": "4.0.0.0"
+ },
+ {
+ "name": "oozie_setup_shared_lib",
+ "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
+ "min_version": "4.0.0.0"
+ },
+ {
+ "name": "oozie_host_kerberos",
+ "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
+ "min_version": "4.0.0.0",
+ "max_version": "4.1.0.0"
+ },
+ {
+ "name": "hive_metastore_upgrade_schema",
+ "description": "Hive metastore upgrade schema support (AMBARI-11176)",
+ "min_version": "4.1.0.0"
+ },
+ {
+ "name": "hive_server_interactive",
+ "description": "Hive server interactive support (AMBARI-15573)",
+ "min_version": "4.4.0.0"
+ },
+ {
+ "name": "hive_webhcat_specific_configs",
+ "description": "Hive webhcat specific configurations support (AMBARI-12364)",
+ "min_version": "4.1.0.0"
+ },
+ {
+ "name": "hive_purge_table",
+ "description": "Hive purge table support (AMBARI-12260)",
+ "min_version": "4.1.0.0"
+ },
+ {
+ "name": "hive_server2_kerberized_env",
+ "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
+ "max_version": "4.1.0.0"
+ },
+ {
+ "name": "hive_env_heapsize",
+ "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
+ "min_version": "4.0.0.0"
+ },
+ {
+ "name": "ranger_kms_hsm_support",
+ "description": "Ranger KMS HSM support (AMBARI-15752)",
+ "min_version": "4.2.5.0"
+ },
+ {
+ "name": "ranger_log4j_support",
+ "description": "Ranger supporting log-4j properties (AMBARI-15681)",
+ "min_version": "4.2.5.0"
+ },
+ {
+ "name": "ranger_kerberos_support",
+ "description": "Ranger Kerberos support",
+ "min_version": "4.2.5.0"
+ },
+ {
+ "name": "hive_metastore_site_support",
+ "description": "Hive Metastore site support",
+ "min_version": "4.2.5.0"
+ },
+ {
+ "name": "ranger_usersync_password_jceks",
+ "description": "Saving Ranger Usersync credentials in jceks",
+ "min_version": "4.2.5.0"
+ },
+ {
+ "name": "ranger_install_logsearch_client",
+ "description": "LogSearch Service support",
+ "min_version": "4.2.5.0"
+ },
+ {
+ "name": "hbase_home_directory",
+ "description": "Hbase home directory in HDFS needed for HBASE backup",
+ "min_version": "4.2.5.0"
+ },
+ {
+ "name": "spark_livy",
+ "description": "Livy as slave component of spark",
+ "min_version": "4.4.0.0"
+ },
+ {
+ "name": "ranger_pid_support",
+ "description": "Ranger Service support pid generation AMBARI-16756",
+ "min_version": "4.2.5.0"
+ },
+ {
+ "name": "ranger_kms_pid_support",
+ "description": "Ranger KMS Service support pid generation",
+ "min_version": "4.2.5.0"
+ }
+ ]
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_tools.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_tools.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_tools.json
index fdbbdf9..92c9349 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_tools.json
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_tools.json
@@ -1,4 +1,14 @@
{
- "stack_selector": ["iop-select", "/usr/bin/iop-select", "iop-select"],
- "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"]
+ "BigInsights": {
+ "stack_selector": [
+ "iop-select",
+ "/usr/bin/iop-select",
+ "iop-select"
+ ],
+ "conf_selector": [
+ "conf-select",
+ "/usr/bin/conf-select",
+ "conf-select"
+ ]
+ }
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
index a79e904..c6b091d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
@@ -220,6 +220,18 @@ gpgcheck=0</value>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
+ <!-- Define stack_name property in the base stack. DO NOT override this property for each stack version -->
+ <property>
+ <name>stack_name</name>
+ <value>HDP</value>
+ <description>The name of the stack.</description>
+ <value-attributes>
+ <read-only>true</read-only>
+ <overridable>false</overridable>
+ <visible>false</visible>
+ </value-attributes>
+ <on-ambari-upgrade add="true"/>
+ </property>
<!-- Define stack_tools property in the base stack. DO NOT override this property for each stack version -->
<property>
<name>stack_tools</name>
@@ -252,8 +264,8 @@ gpgcheck=0</value>
</property>
<property>
<name>stack_root</name>
- <value>/usr/hdp</value>
- <description>Stack root folder</description>
+ <value>{"HDP":"/usr/hdp"}</value>
+ <description>JSON which defines the stack root by stack name</description>
<value-attributes>
<read-only>true</read-only>
<overridable>false</overridable>
http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
index 878645b..31cf0c8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
@@ -1,427 +1,429 @@
{
- "stack_features": [
- {
- "name": "snappy",
- "description": "Snappy compressor/decompressor support",
- "min_version": "2.0.0.0",
- "max_version": "2.2.0.0"
- },
- {
- "name": "lzo",
- "description": "LZO libraries support",
- "min_version": "2.2.1.0"
- },
- {
- "name": "express_upgrade",
- "description": "Express upgrade support",
- "min_version": "2.1.0.0"
- },
- {
- "name": "rolling_upgrade",
- "description": "Rolling upgrade support",
- "min_version": "2.2.0.0"
- },
- {
- "name": "kafka_acl_migration_support",
- "description": "ACL migration support",
- "min_version": "2.3.4.0"
- },
- {
- "name": "secure_zookeeper",
- "description": "Protect ZNodes with SASL acl in secure clusters",
- "min_version": "2.6.0.0"
- },
- {
- "name": "config_versioning",
- "description": "Configurable versions support",
- "min_version": "2.3.0.0"
- },
- {
- "name": "datanode_non_root",
- "description": "DataNode running as non-root support (AMBARI-7615)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "remove_ranger_hdfs_plugin_env",
- "description": "HDFS removes Ranger env files (AMBARI-14299)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "ranger",
- "description": "Ranger Service support",
- "min_version": "2.2.0.0"
- },
- {
- "name": "ranger_tagsync_component",
- "description": "Ranger Tagsync component support (AMBARI-14383)",
- "min_version": "2.5.0.0"
- },
- {
- "name": "phoenix",
- "description": "Phoenix Service support",
- "min_version": "2.3.0.0"
- },
- {
- "name": "nfs",
- "description": "NFS support",
- "min_version": "2.3.0.0"
- },
- {
- "name": "tez_for_spark",
- "description": "Tez dependency for Spark",
- "min_version": "2.2.0.0",
- "max_version": "2.3.0.0"
- },
- {
- "name": "timeline_state_store",
- "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "copy_tarball_to_hdfs",
- "description": "Copy tarball to HDFS support (AMBARI-12113)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "spark_16plus",
- "description": "Spark 1.6+",
- "min_version": "2.4.0.0"
- },
- {
- "name": "spark_thriftserver",
- "description": "Spark Thrift Server",
- "min_version": "2.3.2.0"
- },
- {
- "name": "storm_kerberos",
- "description": "Storm Kerberos support (AMBARI-7570)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "storm_ams",
- "description": "Storm AMS integration (AMBARI-10710)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "create_kafka_broker_id",
- "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
- "min_version": "2.2.0.0",
- "max_version": "2.3.0.0"
- },
- {
- "name": "kafka_listeners",
- "description": "Kafka listeners (AMBARI-10984)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "kafka_kerberos",
- "description": "Kafka Kerberos support (AMBARI-10984)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "pig_on_tez",
- "description": "Pig on Tez support (AMBARI-7863)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "ranger_usersync_non_root",
- "description": "Ranger Usersync as non-root user (AMBARI-10416)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "ranger_audit_db_support",
- "description": "Ranger Audit to DB support",
- "min_version": "2.2.0.0",
- "max_version": "2.4.99.99"
- },
- {
- "name": "accumulo_kerberos_user_auth",
- "description": "Accumulo Kerberos User Auth (AMBARI-10163)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "knox_versioned_data_dir",
- "description": "Use versioned data dir for Knox (AMBARI-13164)",
- "min_version": "2.3.2.0"
- },
- {
- "name": "knox_sso_topology",
- "description": "Knox SSO Topology support (AMBARI-13975)",
- "min_version": "2.3.8.0"
- },
- {
- "name": "atlas_rolling_upgrade",
- "description": "Rolling upgrade support for Atlas",
- "min_version": "2.3.0.0"
- },
- {
- "name": "oozie_admin_user",
- "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "oozie_create_hive_tez_configs",
- "description": "Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "oozie_setup_shared_lib",
- "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "oozie_host_kerberos",
- "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
- "min_version": "2.0.0.0"
- },
- {
- "name": "falcon_extensions",
- "description": "Falcon Extension",
- "min_version": "2.5.0.0"
- },
- {
- "name": "hive_metastore_upgrade_schema",
- "description": "Hive metastore upgrade schema support (AMBARI-11176)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "hive_server_interactive",
- "description": "Hive server interactive support (AMBARI-15573)",
- "min_version": "2.5.0.0"
- },
- {
- "name": "hive_webhcat_specific_configs",
- "description": "Hive webhcat specific configurations support (AMBARI-12364)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "hive_purge_table",
- "description": "Hive purge table support (AMBARI-12260)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "hive_server2_kerberized_env",
- "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
- "min_version": "2.2.3.0",
- "max_version": "2.2.5.0"
- },
- {
- "name": "hive_env_heapsize",
- "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "ranger_kms_hsm_support",
- "description": "Ranger KMS HSM support (AMBARI-15752)",
- "min_version": "2.5.0.0"
- },
- {
- "name": "ranger_log4j_support",
- "description": "Ranger supporting log-4j properties (AMBARI-15681)",
- "min_version": "2.5.0.0"
- },
- {
- "name": "ranger_kerberos_support",
- "description": "Ranger Kerberos support",
- "min_version": "2.5.0.0"
- },
- {
- "name": "hive_metastore_site_support",
- "description": "Hive Metastore site support",
- "min_version": "2.5.0.0"
- },
- {
- "name": "ranger_usersync_password_jceks",
- "description": "Saving Ranger Usersync credentials in jceks",
- "min_version": "2.5.0.0"
- },
- {
- "name": "ranger_install_infra_client",
- "description": "Ambari Infra Service support",
- "min_version": "2.5.0.0"
- },
- {
- "name": "falcon_atlas_support_2_3",
- "description": "Falcon Atlas integration support for 2.3 stack",
- "min_version": "2.3.99.0",
- "max_version": "2.4.0.0"
- },
- {
- "name": "falcon_atlas_support",
- "description": "Falcon Atlas integration",
- "min_version": "2.5.0.0"
- },
- {
- "name": "hbase_home_directory",
- "description": "Hbase home directory in HDFS needed for HBASE backup",
- "min_version": "2.5.0.0"
- },
- {
- "name": "spark_livy",
- "description": "Livy as slave component of spark",
- "min_version": "2.5.0.0"
- },
- {
- "name": "spark_livy2",
- "description": "Livy as slave component of spark",
- "min_version": "2.6.0.0"
- },
- {
- "name": "atlas_ranger_plugin_support",
- "description": "Atlas Ranger plugin support",
- "min_version": "2.5.0.0"
- },
- {
- "name": "atlas_conf_dir_in_path",
- "description": "Prepend the Atlas conf dir (/etc/atlas/conf) to the classpath of Storm and Falcon",
- "min_version": "2.3.0.0",
- "max_version": "2.4.99.99"
- },
- {
- "name": "atlas_upgrade_support",
- "description": "Atlas supports express and rolling upgrades",
- "min_version": "2.5.0.0"
- },
- {
- "name": "atlas_hook_support",
- "description": "Atlas support for hooks in Hive, Storm, Falcon, and Sqoop",
- "min_version": "2.5.0.0"
- },
- {
- "name": "ranger_pid_support",
- "description": "Ranger Service support pid generation AMBARI-16756",
- "min_version": "2.5.0.0"
- },
- {
- "name": "ranger_kms_pid_support",
- "description": "Ranger KMS Service support pid generation",
- "min_version": "2.5.0.0"
- },
- {
- "name": "ranger_admin_password_change",
- "description": "Allow ranger admin credentials to be specified during cluster creation (AMBARI-17000)",
- "min_version": "2.5.0.0"
- },
- {
- "name": "ranger_setup_db_on_start",
- "description": "Allows setup of ranger db and java patches to be called multiple times on each START",
- "min_version": "2.6.0.0"
- },
- {
- "name": "storm_metrics_apache_classes",
- "description": "Metrics sink for Storm that uses Apache class names",
- "min_version": "2.5.0.0"
- },
- {
- "name": "spark_java_opts_support",
- "description": "Allow Spark to generate java-opts file",
- "min_version": "2.2.0.0",
- "max_version": "2.4.0.0"
- },
- {
- "name": "atlas_hbase_setup",
- "description": "Use script to create Atlas tables in Hbase and set permissions for Atlas user.",
- "min_version": "2.5.0.0"
- },
- {
- "name": "ranger_hive_plugin_jdbc_url",
- "description": "Handle Ranger hive repo config jdbc url change for stack 2.5 (AMBARI-18386)",
- "min_version": "2.5.0.0"
- },
- {
- "name": "zkfc_version_advertised",
- "description": "ZKFC advertise version",
- "min_version": "2.5.0.0"
- },
- {
- "name": "phoenix_core_hdfs_site_required",
- "description": "HDFS and CORE site required for Phoenix",
- "max_version": "2.5.9.9"
- },
- {
- "name": "ranger_tagsync_ssl_xml_support",
- "description": "Ranger Tagsync ssl xml support.",
- "min_version": "2.6.0.0"
- },
- {
- "name": "ranger_xml_configuration",
- "description": "Ranger code base support xml configurations",
- "min_version": "2.3.0.0"
- },
- {
- "name": "kafka_ranger_plugin_support",
- "description": "Ambari stack changes for Ranger Kafka Plugin (AMBARI-11299)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "yarn_ranger_plugin_support",
- "description": "Implement Stack changes for Ranger Yarn Plugin integration (AMBARI-10866)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "ranger_solr_config_support",
- "description": "Showing Ranger solrconfig.xml on UI",
- "min_version": "2.6.0.0"
- },
- {
- "name": "hive_interactive_atlas_hook_required",
- "description": "Registering Atlas Hook for Hive Interactive.",
- "min_version": "2.6.0.0"
- },
- {
- "name": "core_site_for_ranger_plugins",
- "description": "Adding core-site.xml in when Ranger plugin is enabled for Storm, Kafka, and Knox.",
- "min_version": "2.6.0.0"
- },
- {
- "name": "atlas_install_hook_package_support",
- "description": "Stop installing packages from 2.6",
- "max_version": "2.5.9.9"
- },
- {
- "name": "atlas_hdfs_site_on_namenode_ha",
- "description": "Need to create hdfs-site under atlas-conf dir when Namenode-HA is enabled.",
- "min_version": "2.6.0.0"
- },
- {
- "name": "hive_interactive_ga",
- "description": "Hive Interactive GA support",
- "min_version": "2.6.0.0"
- },
- {
- "name": "secure_ranger_ssl_password",
- "description": "Securing Ranger Admin and Usersync SSL and Trustore related passwords in jceks",
- "min_version": "2.6.0.0"
- },
- {
- "name": "ranger_kms_ssl",
- "description": "Ranger KMS SSL properties in ambari stack",
- "min_version": "2.6.0.0"
- },
- {
- "name": "nifi_encrypt_config",
- "description": "Encrypt sensitive properties written to nifi property file",
- "min_version": "2.6.0.0"
- },
- {
- "name": "toolkit_config_update",
- "description": "Support separate input and output for toolkit configuration",
- "min_version": "2.6.0.0"
- },
- {
- "name": "admin_toolkit_support",
- "description": "Supports the nifi admin toolkit",
- "min_version": "2.6.0.0"
- },
- {
- "name": "tls_toolkit_san",
- "description": "Support subject alternative name flag",
- "min_version": "2.6.0.0"
- },
- {
- "name": "nifi_jaas_conf_create",
- "description": "Create NIFI jaas configuration when kerberos is enabled",
- "min_version": "2.6.0.0"
- }
- ]
+ "HDP": {
+ "stack_features": [
+ {
+ "name": "snappy",
+ "description": "Snappy compressor/decompressor support",
+ "min_version": "2.0.0.0",
+ "max_version": "2.2.0.0"
+ },
+ {
+ "name": "lzo",
+ "description": "LZO libraries support",
+ "min_version": "2.2.1.0"
+ },
+ {
+ "name": "express_upgrade",
+ "description": "Express upgrade support",
+ "min_version": "2.1.0.0"
+ },
+ {
+ "name": "rolling_upgrade",
+ "description": "Rolling upgrade support",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "kafka_acl_migration_support",
+ "description": "ACL migration support",
+ "min_version": "2.3.4.0"
+ },
+ {
+ "name": "secure_zookeeper",
+ "description": "Protect ZNodes with SASL acl in secure clusters",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "config_versioning",
+ "description": "Configurable versions support",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "datanode_non_root",
+ "description": "DataNode running as non-root support (AMBARI-7615)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "remove_ranger_hdfs_plugin_env",
+ "description": "HDFS removes Ranger env files (AMBARI-14299)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "ranger",
+ "description": "Ranger Service support",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "ranger_tagsync_component",
+ "description": "Ranger Tagsync component support (AMBARI-14383)",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "phoenix",
+ "description": "Phoenix Service support",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "nfs",
+ "description": "NFS support",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "tez_for_spark",
+ "description": "Tez dependency for Spark",
+ "min_version": "2.2.0.0",
+ "max_version": "2.3.0.0"
+ },
+ {
+ "name": "timeline_state_store",
+ "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "copy_tarball_to_hdfs",
+ "description": "Copy tarball to HDFS support (AMBARI-12113)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "spark_16plus",
+ "description": "Spark 1.6+",
+ "min_version": "2.4.0.0"
+ },
+ {
+ "name": "spark_thriftserver",
+ "description": "Spark Thrift Server",
+ "min_version": "2.3.2.0"
+ },
+ {
+ "name": "storm_kerberos",
+ "description": "Storm Kerberos support (AMBARI-7570)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "storm_ams",
+ "description": "Storm AMS integration (AMBARI-10710)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "create_kafka_broker_id",
+ "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
+ "min_version": "2.2.0.0",
+ "max_version": "2.3.0.0"
+ },
+ {
+ "name": "kafka_listeners",
+ "description": "Kafka listeners (AMBARI-10984)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "kafka_kerberos",
+ "description": "Kafka Kerberos support (AMBARI-10984)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "pig_on_tez",
+ "description": "Pig on Tez support (AMBARI-7863)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "ranger_usersync_non_root",
+ "description": "Ranger Usersync as non-root user (AMBARI-10416)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "ranger_audit_db_support",
+ "description": "Ranger Audit to DB support",
+ "min_version": "2.2.0.0",
+ "max_version": "2.4.99.99"
+ },
+ {
+ "name": "accumulo_kerberos_user_auth",
+ "description": "Accumulo Kerberos User Auth (AMBARI-10163)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "knox_versioned_data_dir",
+ "description": "Use versioned data dir for Knox (AMBARI-13164)",
+ "min_version": "2.3.2.0"
+ },
+ {
+ "name": "knox_sso_topology",
+ "description": "Knox SSO Topology support (AMBARI-13975)",
+ "min_version": "2.3.8.0"
+ },
+ {
+ "name": "atlas_rolling_upgrade",
+ "description": "Rolling upgrade support for Atlas",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "oozie_admin_user",
+ "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "oozie_create_hive_tez_configs",
+ "description": "Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "oozie_setup_shared_lib",
+ "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "oozie_host_kerberos",
+ "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
+ "min_version": "2.0.0.0"
+ },
+ {
+ "name": "falcon_extensions",
+ "description": "Falcon Extension",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "hive_metastore_upgrade_schema",
+ "description": "Hive metastore upgrade schema support (AMBARI-11176)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "hive_server_interactive",
+ "description": "Hive server interactive support (AMBARI-15573)",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "hive_webhcat_specific_configs",
+ "description": "Hive webhcat specific configurations support (AMBARI-12364)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "hive_purge_table",
+ "description": "Hive purge table support (AMBARI-12260)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "hive_server2_kerberized_env",
+ "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
+ "min_version": "2.2.3.0",
+ "max_version": "2.2.5.0"
+ },
+ {
+ "name": "hive_env_heapsize",
+ "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "ranger_kms_hsm_support",
+ "description": "Ranger KMS HSM support (AMBARI-15752)",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_log4j_support",
+ "description": "Ranger supporting log-4j properties (AMBARI-15681)",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_kerberos_support",
+ "description": "Ranger Kerberos support",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "hive_metastore_site_support",
+ "description": "Hive Metastore site support",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_usersync_password_jceks",
+ "description": "Saving Ranger Usersync credentials in jceks",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_install_infra_client",
+ "description": "Ambari Infra Service support",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "falcon_atlas_support_2_3",
+ "description": "Falcon Atlas integration support for 2.3 stack",
+ "min_version": "2.3.99.0",
+ "max_version": "2.4.0.0"
+ },
+ {
+ "name": "falcon_atlas_support",
+ "description": "Falcon Atlas integration",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "hbase_home_directory",
+ "description": "Hbase home directory in HDFS needed for HBASE backup",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "spark_livy",
+ "description": "Livy as slave component of spark",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "spark_livy2",
+ "description": "Livy as slave component of spark",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "atlas_ranger_plugin_support",
+ "description": "Atlas Ranger plugin support",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "atlas_conf_dir_in_path",
+ "description": "Prepend the Atlas conf dir (/etc/atlas/conf) to the classpath of Storm and Falcon",
+ "min_version": "2.3.0.0",
+ "max_version": "2.4.99.99"
+ },
+ {
+ "name": "atlas_upgrade_support",
+ "description": "Atlas supports express and rolling upgrades",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "atlas_hook_support",
+ "description": "Atlas support for hooks in Hive, Storm, Falcon, and Sqoop",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_pid_support",
+ "description": "Ranger Service support pid generation AMBARI-16756",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_kms_pid_support",
+ "description": "Ranger KMS Service support pid generation",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_admin_password_change",
+ "description": "Allow ranger admin credentials to be specified during cluster creation (AMBARI-17000)",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_setup_db_on_start",
+ "description": "Allows setup of ranger db and java patches to be called multiple times on each START",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "storm_metrics_apache_classes",
+ "description": "Metrics sink for Storm that uses Apache class names",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "spark_java_opts_support",
+ "description": "Allow Spark to generate java-opts file",
+ "min_version": "2.2.0.0",
+ "max_version": "2.4.0.0"
+ },
+ {
+ "name": "atlas_hbase_setup",
+ "description": "Use script to create Atlas tables in Hbase and set permissions for Atlas user.",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_hive_plugin_jdbc_url",
+ "description": "Handle Ranger hive repo config jdbc url change for stack 2.5 (AMBARI-18386)",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "zkfc_version_advertised",
+ "description": "ZKFC advertise version",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "phoenix_core_hdfs_site_required",
+ "description": "HDFS and CORE site required for Phoenix",
+ "max_version": "2.5.9.9"
+ },
+ {
+ "name": "ranger_tagsync_ssl_xml_support",
+ "description": "Ranger Tagsync ssl xml support.",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "ranger_xml_configuration",
+ "description": "Ranger code base support xml configurations",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "kafka_ranger_plugin_support",
+ "description": "Ambari stack changes for Ranger Kafka Plugin (AMBARI-11299)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "yarn_ranger_plugin_support",
+ "description": "Implement Stack changes for Ranger Yarn Plugin integration (AMBARI-10866)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "ranger_solr_config_support",
+ "description": "Showing Ranger solrconfig.xml on UI",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "hive_interactive_atlas_hook_required",
+ "description": "Registering Atlas Hook for Hive Interactive.",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "core_site_for_ranger_plugins",
+ "description": "Adding core-site.xml in when Ranger plugin is enabled for Storm, Kafka, and Knox.",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "atlas_install_hook_package_support",
+ "description": "Stop installing packages from 2.6",
+ "max_version": "2.5.9.9"
+ },
+ {
+ "name": "atlas_hdfs_site_on_namenode_ha",
+ "description": "Need to create hdfs-site under atlas-conf dir when Namenode-HA is enabled.",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "hive_interactive_ga",
+ "description": "Hive Interactive GA support",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "secure_ranger_ssl_password",
+ "description": "Securing Ranger Admin and Usersync SSL and Trustore related passwords in jceks",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "ranger_kms_ssl",
+ "description": "Ranger KMS SSL properties in ambari stack",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "nifi_encrypt_config",
+ "description": "Encrypt sensitive properties written to nifi property file",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "toolkit_config_update",
+ "description": "Support separate input and output for toolkit configuration",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "admin_toolkit_support",
+ "description": "Supports the nifi admin toolkit",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "tls_toolkit_san",
+ "description": "Support subject alternative name flag",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "nifi_jaas_conf_create",
+ "description": "Create NIFI jaas configuration when kerberos is enabled",
+ "min_version": "2.6.0.0"
+ }
+ ]
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json
index d1aab4b..c515d57 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json
@@ -1,4 +1,14 @@
{
- "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"],
- "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"]
-}
\ No newline at end of file
+ "HDP": {
+ "stack_selector": [
+ "hdp-select",
+ "/usr/bin/hdp-select",
+ "hdp-select"
+ ],
+ "conf_selector": [
+ "conf-select",
+ "/usr/bin/conf-select",
+ "conf-select"
+ ]
+ }
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
index 7df00ee..f19ac52 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
@@ -20,6 +20,18 @@
*/
-->
<configuration>
+ <!-- Define stack_name property in the base stack. DO NOT override this property for each stack version -->
+ <property>
+ <name>stack_name</name>
+ <value>PERF</value>
+ <description>The name of the stack.</description>
+ <value-attributes>
+ <read-only>true</read-only>
+ <overridable>false</overridable>
+ <visible>false</visible>
+ </value-attributes>
+ <on-ambari-upgrade add="true"/>
+ </property>
<!-- Define stack_tools property in the base stack. DO NOT override this property for each stack version -->
<property>
@@ -55,8 +67,8 @@
<property>
<name>stack_root</name>
- <value>/usr/perf</value>
- <description>Stack root folder</description>
+ <value>{"PERF":"/usr/perf"}</value>
+ <description>JSON which defines the stack root by stack name</description>
<value-attributes>
<read-only>true</read-only>
<overridable>false</overridable>
http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json b/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json
index e9e0ed2..839e8e6 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json
@@ -1,19 +1,21 @@
{
- "stack_features": [
- {
- "name": "rolling_upgrade",
- "description": "Rolling upgrade support",
- "min_version": "1.0.0.0"
- },
- {
- "name": "secure_zookeeper",
- "description": "Protect ZNodes with SASL acl in secure clusters",
- "min_version": "2.6.0.0"
- },
- {
- "name": "config_versioning",
- "description": "Configurable versions support",
- "min_version": "1.0.0.0"
- }
- ]
-}
+ "PERF": {
+ "stack_features": [
+ {
+ "name": "rolling_upgrade",
+ "description": "Rolling upgrade support",
+ "min_version": "1.0.0.0"
+ },
+ {
+ "name": "secure_zookeeper",
+ "description": "Protect ZNodes with SASL acl in secure clusters",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "config_versioning",
+ "description": "Configurable versions support",
+ "min_version": "1.0.0.0"
+ }
+ ]
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_tools.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_tools.json b/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_tools.json
index 535b9d9..62562f8 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_tools.json
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_tools.json
@@ -1,4 +1,14 @@
{
- "stack_selector": ["distro-select", "/usr/bin/distro-select", "distro-select"],
- "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"]
-}
\ No newline at end of file
+ "PERF": {
+ "stack_selector": [
+ "distro-select",
+ "/usr/bin/distro-select",
+ "distro-select"
+ ],
+ "conf_selector": [
+ "conf-select",
+ "/usr/bin/conf-select",
+ "conf-select"
+ ]
+ }
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index 793caf1..3562c96 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -41,9 +41,11 @@ import java.util.Map;
import java.util.Set;
import org.apache.ambari.server.controller.StackConfigurationResponse;
+import org.apache.ambari.server.state.ConfigHelper;
import org.apache.ambari.server.state.PropertyDependencyInfo;
import org.apache.ambari.server.state.PropertyInfo;
import org.apache.ambari.server.state.ServiceInfo;
+import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.ValueAttributesInfo;
import org.apache.ambari.server.topology.AdvisedConfiguration;
import org.apache.ambari.server.topology.AmbariContext;
@@ -60,6 +62,7 @@ import org.apache.ambari.server.topology.HostGroupInfo;
import org.apache.ambari.server.topology.InvalidTopologyException;
import org.apache.ambari.server.topology.TopologyRequest;
import org.apache.commons.lang.StringUtils;
+import org.easymock.EasyMock;
import org.easymock.EasyMockRule;
import org.easymock.EasyMockSupport;
import org.easymock.Mock;
@@ -83,6 +86,10 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
private static final Configuration EMPTY_CONFIG = new Configuration(Collections.<String, Map<String, String>>emptyMap(), Collections.<String, Map<String, Map<String, String>>>emptyMap());
private final Map<String, Collection<String>> serviceComponents = new HashMap<>();
+ private final Map<String, Map<String, String>> stackProperties = new HashMap<>();
+
+ private final String STACK_NAME = "testStack";
+ private final String STACK_VERSION = "1";
@Rule
public EasyMockRule mocks = new EasyMockRule(this);
@@ -102,13 +109,16 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
@Mock
private TopologyRequest topologyRequestMock;
+ @Mock(type = MockType.NICE)
+ private ConfigHelper configHelper;
+
@Before
public void init() throws Exception {
expect(bp.getStack()).andReturn(stack).anyTimes();
expect(bp.getName()).andReturn("test-bp").anyTimes();
- expect(stack.getName()).andReturn("testStack").anyTimes();
- expect(stack.getVersion()).andReturn("1").anyTimes();
+ expect(stack.getName()).andReturn(STACK_NAME).atLeastOnce();
+ expect(stack.getVersion()).andReturn(STACK_VERSION).atLeastOnce();
// return false for all components since for this test we don't care about the value
expect(stack.isMasterComponent((String) anyObject())).andReturn(false).anyTimes();
expect(stack.getConfigurationPropertiesWithMetadata(anyObject(String.class), anyObject(String.class))).andReturn(Collections.<String, Stack.ConfigProperty>emptyMap()).anyTimes();
@@ -198,11 +208,15 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
Set<String> emptySet = Collections.emptySet();
expect(stack.getExcludedConfigurationTypes(anyObject(String.class))).andReturn(emptySet).anyTimes();
+
+ expect(ambariContext.getConfigHelper()).andReturn(configHelper).anyTimes();
+ expect(configHelper.getDefaultStackProperties(
+ EasyMock.eq(new StackId(STACK_NAME, STACK_VERSION)))).andReturn(stackProperties).anyTimes();
}
@After
public void tearDown() {
- reset(bp, serviceInfo, stack, ambariContext);
+ reset(bp, serviceInfo, stack, ambariContext, configHelper);
}
@Test
@@ -6277,13 +6291,16 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
topology.getAdvisedConfigurations().putAll(createAdvisedConfigMap());
topology.setConfigRecommendationStrategy(ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY);
BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+
reset(stack);
+ expect(stack.getName()).andReturn(STACK_NAME).anyTimes();
+ expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes();
expect(stack.getConfiguration(bp.getServices())).andReturn(createStackDefaults()).anyTimes();
Set<String> emptySet = Collections.emptySet();
expect(stack.getExcludedConfigurationTypes(anyObject(String.class))).andReturn(emptySet).anyTimes();
-
replay(stack);
+
// WHEN
Set<String> configTypeUpdated = configProcessor.doUpdateForClusterCreate();
// THEN
@@ -6334,13 +6351,17 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
topology.getAdvisedConfigurations().putAll(createAdvisedConfigMap());
topology.setConfigRecommendationStrategy(ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY);
BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+
reset(stack);
+ expect(stack.getName()).andReturn(STACK_NAME).anyTimes();
+ expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes();
expect(stack.getConfiguration(bp.getServices())).andReturn(createStackDefaults()).anyTimes();
Set<String> emptySet = Collections.emptySet();
expect(stack.getExcludedConfigurationTypes(anyObject(String.class))).andReturn(emptySet).anyTimes();
replay(stack);
+
// WHEN
configProcessor.doUpdateForClusterCreate();
// THEN
@@ -7997,6 +8018,10 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
@Test
public void testValuesTrimming() throws Exception {
reset(stack);
+
+ expect(stack.getName()).andReturn(STACK_NAME).anyTimes();
+ expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes();
+
Map<String, Map<String, String>> properties = new HashMap<>();
Map<String, String> hdfsSite = new HashMap<>();
@@ -8020,6 +8045,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
new StackConfigurationResponse(null, null, null, null, "hdfs-site", null, Collections.singleton(PropertyInfo.PropertyType.PASSWORD), null, null, null)));
propertyConfigs.put("test.host", new Stack.ConfigProperty(
new StackConfigurationResponse(null, null, null, null, "hdfs-site", null, null, null, valueAttributesInfoHost, null)));
+
expect(stack.getServiceForConfigType("hdfs-site")).andReturn("HDFS").anyTimes();
expect(stack.getConfigurationPropertiesWithMetadata("HDFS", "hdfs-site")).andReturn(propertyConfigs).anyTimes();
@@ -8091,7 +8117,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
throws InvalidTopologyException {
- replay(stack, serviceInfo, ambariContext);
+ replay(stack, serviceInfo, ambariContext, configHelper);
Map<String, HostGroupInfo> hostGroupInfo = new HashMap<>();
Collection<String> allServices = new HashSet<>();
@@ -8154,7 +8180,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
this.name = name;
this.components = components;
this.hosts = hosts;
- this.configuration = new Configuration(Collections.<String, Map<String, String>>emptyMap(),
+ configuration = new Configuration(Collections.<String, Map<String, String>>emptyMap(),
Collections.<String, Map<String, Map<String, String>>>emptyMap());
}