You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by rl...@apache.org on 2017/07/19 12:38:54 UTC
[01/21] ambari git commit: AMBARI-21234. Ambari rack awareness for
Kafka. (Ambud Sharma via stoader)
Repository: ambari
Updated Branches:
refs/heads/branch-feature-AMBARI-20859 3adbbadc3 -> 903cd1a06
AMBARI-21234. Ambari rack awareness for Kafka. (Ambud Sharma via stoader)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f22256e7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f22256e7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f22256e7
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: f22256e73af4e4cb27d3aaf47ba58a8864e37873
Parents: 63186bf
Author: Ambud Sharma <am...@hortonworks.com>
Authored: Fri Jul 14 16:04:17 2017 +0200
Committer: Toader, Sebastian <st...@hortonworks.com>
Committed: Fri Jul 14 16:04:17 2017 +0200
----------------------------------------------------------------------
.../common-services/KAFKA/0.10.0.3.0/metainfo.xml | 1 +
.../KAFKA/0.10.0.3.0/package/scripts/kafka.py | 10 ++++++++++
.../KAFKA/0.10.0.3.0/package/scripts/params.py | 3 +++
.../resources/common-services/KAFKA/0.10.0/metainfo.xml | 1 +
.../KAFKA/0.8.1/package/scripts/kafka.py | 12 ++++++++++++
.../KAFKA/0.8.1/package/scripts/params.py | 3 +++
6 files changed, 30 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/f22256e7/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/metainfo.xml
index a19850e..f408ba3 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/metainfo.xml
@@ -104,6 +104,7 @@
</osSpecific>
</osSpecifics>
<restartRequiredAfterChange>true</restartRequiredAfterChange>
+ <restartRequiredAfterRackChange>true</restartRequiredAfterRackChange>
</service>
</services>
</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/f22256e7/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/kafka.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/kafka.py b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/kafka.py
index 680dd32..62a9003 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/kafka.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/kafka.py
@@ -103,6 +103,16 @@ def kafka(upgrade_type=None):
kafka_data_dir = kafka_server_config['log.dirs']
kafka_data_dirs = filter(None, kafka_data_dir.split(","))
+
+ rack="/default-rack"
+ i=0
+ if len(params.all_racks) > 0:
+ for host in params.all_hosts:
+ if host == params.hostname:
+ rack=params.all_racks[i]
+ break
+ i=i+1
+
Directory(kafka_data_dirs,
mode=0755,
cd_access='a',
http://git-wip-us.apache.org/repos/asf/ambari/blob/f22256e7/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/params.py
index 5b0be54..4d0448f 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/params.py
@@ -105,6 +105,9 @@ zookeeper_hosts.sort()
secure_acls = default("/configurations/kafka-broker/zookeeper.set.acl", False)
kafka_security_migrator = os.path.join(kafka_home, "bin", "zookeeper-security-migration.sh")
+all_hosts = default("/clusterHostInfo/all_hosts", [])
+all_racks = default("/clusterHostInfo/all_racks", [])
+
#Kafka log4j
kafka_log_maxfilesize = default('/configurations/kafka-log4j/kafka_log_maxfilesize',256)
kafka_log_maxbackupindex = default('/configurations/kafka-log4j/kafka_log_maxbackupindex',20)
http://git-wip-us.apache.org/repos/asf/ambari/blob/f22256e7/ambari-server/src/main/resources/common-services/KAFKA/0.10.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0/metainfo.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0/metainfo.xml
index 84b47d8..c1fcde8 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.10.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.10.0/metainfo.xml
@@ -22,6 +22,7 @@
<name>KAFKA</name>
<extends>common-services/KAFKA/0.9.0</extends>
<version>0.10.0</version>
+ <restartRequiredAfterRackChange>true</restartRequiredAfterRackChange>
</service>
</services>
</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/f22256e7/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/kafka.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/kafka.py b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/kafka.py
index 680dd32..3fe1e2d 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/kafka.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/kafka.py
@@ -103,6 +103,18 @@ def kafka(upgrade_type=None):
kafka_data_dir = kafka_server_config['log.dirs']
kafka_data_dirs = filter(None, kafka_data_dir.split(","))
+
+ rack="/default-rack"
+ i=0
+ if len(params.all_racks) > 0:
+ for host in params.all_hosts:
+ if host == params.hostname:
+ rack=params.all_racks[i]
+ break
+ i=i+1
+
+ kafka_server_config['broker.rack']=rack
+
Directory(kafka_data_dirs,
mode=0755,
cd_access='a',
http://git-wip-us.apache.org/repos/asf/ambari/blob/f22256e7/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
index 9acc1ef..c7e84fc 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
@@ -105,6 +105,9 @@ zookeeper_hosts.sort()
secure_acls = default("/configurations/kafka-broker/zookeeper.set.acl", False)
kafka_security_migrator = os.path.join(kafka_home, "bin", "zookeeper-security-migration.sh")
+all_hosts = default("/clusterHostInfo/all_hosts", [])
+all_racks = default("/clusterHostInfo/all_racks", [])
+
#Kafka log4j
kafka_log_maxfilesize = default('/configurations/kafka-log4j/kafka_log_maxfilesize',256)
kafka_log_maxbackupindex = default('/configurations/kafka-log4j/kafka_log_maxbackupindex',20)
[05/21] ambari git commit: AMBARI-21480. NPE during "Update Kerberos
Descriptor" (rlevas)
Posted by rl...@apache.org.
AMBARI-21480. NPE during "Update Kerberos Descriptor" (rlevas)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0a8c397b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0a8c397b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0a8c397b
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 0a8c397bd1944b8787befdff08bf6b95b9afb225
Parents: 4e1da58
Author: Robert Levas <rl...@hortonworks.com>
Authored: Fri Jul 14 16:42:16 2017 -0400
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Fri Jul 14 16:42:16 2017 -0400
----------------------------------------------------------------------
.../KerberosDescriptorUpdateHelper.java | 9 ++-
.../KerberosDescriptorUpdateHelperTest.java | 70 ++++++++++++++++++++
2 files changed, 77 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/0a8c397b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelper.java
index f05b62b..dd865be 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelper.java
@@ -340,6 +340,11 @@ public class KerberosDescriptorUpdateHelper {
/**
* Processes a {@link KerberosIdentityDescriptor} to change the user-supplied data based on the changes
* observed between the previous stack version's data and the new stack version's data.
+ * <p>
+ * It is expected that <code>newStackIdentities</code> and <code>userIdentities</code> are not null.
+ * However, <code>previousStackIdentities</code> may be null in the event the user added a Kerberos
+ * identity that was then added in the new Kerberos descriptor. In this case, the user's values
+ * for the principal name and keytab file are kept while adding any other changes from tne new stack.
*
* @param previousStackIdentity a {@link KerberosIdentityDescriptor} from the previous stack version's Kerberos descriptor
* @param newStackIdentity a {@link KerberosIdentityDescriptor} from the new stack version's Kerberos descriptor
@@ -357,7 +362,7 @@ public class KerberosDescriptorUpdateHelper {
// If the new identity definition is a reference and no longer has a principal definition,
// Ignore any user changes to the old principal definition.
if (updatedValuePrincipal != null) {
- KerberosPrincipalDescriptor oldValuePrincipal = previousStackIdentity.getPrincipalDescriptor();
+ KerberosPrincipalDescriptor oldValuePrincipal = (previousStackIdentity == null) ? null : previousStackIdentity.getPrincipalDescriptor();
String previousValuePrincipalValue = null;
KerberosPrincipalDescriptor userValuePrincipal = userIdentity.getPrincipalDescriptor();
String userValuePrincipalValue = null;
@@ -380,7 +385,7 @@ public class KerberosDescriptorUpdateHelper {
// If the new identity definition is a reference and no longer has a keytab definition,
// Ignore any user changes to the old keytab definition.
if (updatedValueKeytab != null) {
- KerberosKeytabDescriptor oldValueKeytab = previousStackIdentity.getKeytabDescriptor();
+ KerberosKeytabDescriptor oldValueKeytab = (previousStackIdentity == null) ? null : previousStackIdentity.getKeytabDescriptor();
String previousValueKeytabFile = null;
KerberosKeytabDescriptor userValueKeytab = userIdentity.getKeytabDescriptor();
String userValueKeytabFile = null;
http://git-wip-us.apache.org/repos/asf/ambari/blob/0a8c397b/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java
index 247d17e..e717190 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java
@@ -272,6 +272,16 @@ public class KerberosDescriptorUpdateHelperTest extends EasyMockSupport {
" \"keytab\": {" +
" \"file\": \"${keytab_dir}/ambari.server.keytab\"" +
" }" +
+ " }," +
+ " {" +
+ " \"name\": \"future_identity\"," +
+ " \"principal\": {" +
+ " \"value\": \"CHANGED_future${principal_suffix}@${realm}\"," +
+ " \"type\": \"user\"" +
+ " }," +
+ " \"keytab\": {" +
+ " \"file\": \"${keytab_dir}/future.user.keytab\"" +
+ " }" +
" }" +
" ]" +
"}");
@@ -328,6 +338,26 @@ public class KerberosDescriptorUpdateHelperTest extends EasyMockSupport {
" \"keytab\": {" +
" \"file\": \"${keytab_dir}/ambari.server.keytab\"" +
" }" +
+ " }," +
+ " {" +
+ " \"name\": \"custom_identity\"," +
+ " \"principal\": {" +
+ " \"value\": \"custom${principal_suffix}@${realm}\"," +
+ " \"type\": \"user\"" +
+ " }," +
+ " \"keytab\": {" +
+ " \"file\": \"${keytab_dir}/custom.user.keytab\"" +
+ " }" +
+ " }," +
+ " {" +
+ " \"name\": \"future_identity\"," +
+ " \"principal\": {" +
+ " \"value\": \"future${principal_suffix}@${realm}\"," +
+ " \"type\": \"user\"" +
+ " }," +
+ " \"keytab\": {" +
+ " \"file\": \"${keytab_dir}/future.user.keytab\"" +
+ " }" +
" }" +
" ]" +
"}");
@@ -343,6 +373,26 @@ public class KerberosDescriptorUpdateHelperTest extends EasyMockSupport {
"{\n" +
" \"identities\": [\n" +
" {\n" +
+ " \"name\": \"future_identity\",\n" +
+ " \"principal\": {\n" +
+ " \"value\": \"future${principal_suffix}@${realm}\",\n" +
+ " \"type\": \"user\"\n" +
+ " },\n" +
+ " \"keytab\": {\n" +
+ " \"file\": \"${keytab_dir}/future.user.keytab\"\n" +
+ " }\n" +
+ " },\n" +
+ " {\n" +
+ " \"name\": \"custom_identity\",\n" +
+ " \"principal\": {\n" +
+ " \"value\": \"custom${principal_suffix}@${realm}\",\n" +
+ " \"type\": \"user\"\n" +
+ " },\n" +
+ " \"keytab\": {\n" +
+ " \"file\": \"${keytab_dir}/custom.user.keytab\"\n" +
+ " }\n" +
+ " },\n" +
+ " {\n" +
" \"name\": \"spnego\",\n" +
" \"principal\": {\n" +
" \"value\": \"CHANGED_HTTP/_HOST@${realm}\",\n" +
@@ -405,6 +455,26 @@ public class KerberosDescriptorUpdateHelperTest extends EasyMockSupport {
" }\n" +
" },\n" +
" {\n" +
+ " \"name\": \"custom_identity\",\n" +
+ " \"principal\": {\n" +
+ " \"value\": \"custom${principal_suffix}@${realm}\",\n" +
+ " \"type\": \"user\"\n" +
+ " },\n" +
+ " \"keytab\": {\n" +
+ " \"file\": \"${keytab_dir}/custom.user.keytab\"\n" +
+ " }\n" +
+ " },\n" +
+ " {\n" +
+ " \"name\": \"future_identity\",\n" +
+ " \"principal\": {\n" +
+ " \"value\": \"future${principal_suffix}@${realm}\",\n" +
+ " \"type\": \"user\"\n" +
+ " },\n" +
+ " \"keytab\": {\n" +
+ " \"file\": \"${keytab_dir}/future.user.keytab\"\n" +
+ " }\n" +
+ " },\n" +
+ " {\n" +
" \"name\": \"spnego\",\n" +
" \"principal\": {\n" +
" \"value\": \"CHANGED_HTTP/_HOST@${realm}\",\n" +
[09/21] ambari git commit: AMBARI-21478. Wrong string quoting in
get_stack_version
Posted by rl...@apache.org.
AMBARI-21478. Wrong string quoting in get_stack_version
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/56462b22
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/56462b22
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/56462b22
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 56462b222f528fd076e64da608d67ea39dab4580
Parents: f072dd2
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Fri Jul 14 18:13:22 2017 +0200
Committer: Attila Doroszlai <ad...@hortonworks.com>
Committed: Sat Jul 15 20:54:32 2017 +0200
----------------------------------------------------------------------
.../resource_management/libraries/functions/get_stack_version.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/56462b22/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py b/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py
index 463d61f..49416af 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py
@@ -68,7 +68,7 @@ def get_stack_version(package_name):
stack_selector_path = stack_tools.get_stack_tool_path(stack_tools.STACK_SELECTOR_NAME)
if not os.path.exists(stack_selector_path):
- Logger.info('Skipping get_stack_version since " + stack_selector_tool + " is not yet available')
+ Logger.info('Skipping get_stack_version since ' + stack_selector_path + ' is not yet available')
return None # lazy fail
try:
@@ -77,7 +77,7 @@ def get_stack_version(package_name):
return_code, stack_output = shell.call(command, timeout=20)
except Exception, e:
Logger.error(str(e))
- raise Fail('Unable to execute " + stack_selector_path + " command to retrieve the version.')
+ raise Fail('Unable to execute ' + stack_selector_path + ' command to retrieve the version.')
if return_code != 0:
raise Fail(
[13/21] ambari git commit: AMBARI-21345 Add host doesn't fully add a
node when include/exclude files are used (dsen)
Posted by rl...@apache.org.
AMBARI-21345 Add host doesn't fully add a node when include/exclude files are used (dsen)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/cc412e66
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/cc412e66
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/cc412e66
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: cc412e66156d5a887a725015537dcb75b0caf986
Parents: 93fe848
Author: Dmytro Sen <ds...@apache.org>
Authored: Mon Jul 17 13:36:58 2017 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Mon Jul 17 13:36:58 2017 +0300
----------------------------------------------------------------------
.../AmbariCustomCommandExecutionHelper.java | 14 ++-
.../AmbariManagementControllerImpl.java | 121 ++++++++++++++-----
.../internal/HostResourceProvider.java | 1 +
.../HDFS/2.1.0.2.0/configuration/hdfs-site.xml | 6 +
.../HDFS/2.1.0.2.0/package/scripts/hdfs.py | 8 ++
.../2.1.0.2.0/package/scripts/hdfs_namenode.py | 25 +++-
.../2.1.0.2.0/package/scripts/params_linux.py | 9 +-
.../2.1.0.2.0/package/scripts/params_windows.py | 7 ++
.../package/templates/include_hosts_list.j2 | 21 ++++
.../HDFS/3.0.0.3.0/package/scripts/hdfs.py | 8 ++
.../3.0.0.3.0/package/scripts/hdfs_namenode.py | 17 ++-
.../3.0.0.3.0/package/scripts/params_linux.py | 8 +-
.../3.0.0.3.0/package/scripts/params_windows.py | 7 ++
.../package/templates/include_hosts_list.j2 | 21 ++++
.../YARN/2.1.0.2.0/configuration/yarn-site.xml | 6 +
.../2.1.0.2.0/package/scripts/params_linux.py | 12 +-
.../2.1.0.2.0/package/scripts/params_windows.py | 10 +-
.../package/scripts/resourcemanager.py | 18 ++-
.../package/templates/include_hosts_list.j2 | 21 ++++
.../YARN/3.0.0.3.0/configuration/yarn-site.xml | 6 +
.../3.0.0.3.0/package/scripts/params_linux.py | 11 +-
.../3.0.0.3.0/package/scripts/params_windows.py | 10 +-
.../package/scripts/resourcemanager.py | 18 ++-
.../package/templates/include_hosts_list.j2 | 21 ++++
.../services/HDFS/configuration/hdfs-site.xml | 6 +
.../HDFS/package/scripts/hdfs_namenode.py | 12 +-
.../0.8/services/HDFS/package/scripts/params.py | 11 +-
.../package/templates/include_hosts_list.j2 | 21 ++++
.../services/YARN/configuration/yarn-site.xml | 6 +
.../0.8/services/YARN/package/scripts/params.py | 10 +-
.../YARN/package/scripts/resourcemanager.py | 9 +-
.../package/templates/include_hosts_list.j2 | 21 ++++
.../services/YARN/configuration/yarn-site.xml | 6 +
.../services/YARN/package/scripts/params.py | 9 +-
.../YARN/package/scripts/resourcemanager.py | 9 +-
.../package/templates/exclude_hosts_list.j2 | 21 ++++
.../package/templates/include_hosts_list.j2 | 21 ++++
.../services/YARN/configuration/yarn-site.xml | 6 +
.../services/YARN/package/scripts/params.py | 10 +-
.../YARN/package/scripts/resourcemanager.py | 9 +-
.../package/templates/include_hosts_list.j2 | 21 ++++
.../AmbariManagementControllerTest.java | 8 +-
.../python/stacks/2.0.6/HDFS/test_namenode.py | 2 +-
.../stacks/2.0.6/configs/altfs_plus_hdfs.json | 2 +-
.../python/stacks/2.0.6/configs/default.json | 2 +-
.../2.0.6/configs/default_ams_embedded.json | 2 +-
.../2.0.6/configs/default_hive_nn_ha.json | 2 +-
.../2.0.6/configs/default_hive_nn_ha_2.json | 2 +-
.../2.0.6/configs/default_hive_non_hdfs.json | 2 +-
.../2.0.6/configs/default_no_install.json | 2 +-
.../2.0.6/configs/default_oozie_mysql.json | 2 +-
.../default_update_exclude_file_only.json | 2 +-
.../2.0.6/configs/default_with_bucket.json | 2 +-
.../python/stacks/2.0.6/configs/flume_22.json | 2 +-
.../python/stacks/2.0.6/configs/flume_only.json | 2 +-
.../stacks/2.0.6/configs/hbase_no_phx.json | 2 +-
.../stacks/2.0.6/configs/hbase_with_phx.json | 2 +-
.../2.0.6/configs/oozie_existing_sqla.json | 2 +-
.../stacks/2.0.6/configs/repository_file.json | 2 +-
.../python/stacks/2.0.6/configs/secured.json | 2 +-
.../test/python/stacks/2.3/configs/ats_1_5.json | 2 +-
.../python/stacks/2.5/configs/hsi_default.json | 2 +-
.../2.5/configs/hsi_default_for_restart.json | 2 +-
.../test/python/stacks/2.5/configs/hsi_ha.json | 2 +-
.../services/YARN/configuration/yarn-site.xml | 6 +
.../YARN/package/scripts/params_linux.py | 9 +-
.../YARN/package/scripts/params_windows.py | 10 +-
.../YARN/package/scripts/resourcemanager.py | 18 ++-
.../package/templates/include_hosts_list.j2 | 21 ++++
69 files changed, 638 insertions(+), 92 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
index 28aa4e4..aeb5a9c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
@@ -137,7 +137,7 @@ public class AmbariCustomCommandExecutionHelper {
public final static String DECOM_EXCLUDED_HOSTS = "excluded_hosts";
public final static String DECOM_SLAVE_COMPONENT = "slave_type";
public final static String HBASE_MARK_DRAINING_ONLY = "mark_draining_only";
- public final static String UPDATE_EXCLUDE_FILE_ONLY = "update_exclude_file_only";
+ public final static String UPDATE_FILES_ONLY = "update_files_only";
private final static String ALIGN_MAINTENANCE_STATE = "align_maintenance_state";
@@ -910,9 +910,9 @@ public class AmbariCustomCommandExecutionHelper {
@Override
public boolean shouldHostBeRemoved(final String hostname)
throws AmbariException {
- //Get UPDATE_EXCLUDE_FILE_ONLY parameter as string
+ //Get UPDATE_FILES_ONLY parameter as string
String upd_excl_file_only_str = actionExecutionContext.getParameters()
- .get(UPDATE_EXCLUDE_FILE_ONLY);
+ .get(UPDATE_FILES_ONLY);
String decom_incl_hosts_str = actionExecutionContext.getParameters()
.get(DECOM_INCLUDED_HOSTS);
@@ -986,15 +986,17 @@ public class AmbariCustomCommandExecutionHelper {
listOfExcludedHosts.add(sch.getHostName());
if (alignMtnState) {
sch.setMaintenanceState(MaintenanceState.ON);
+ LOG.info("marking Maintenance=ON on " + sch.getHostName());
}
- LOG.info("Decommissioning " + slaveCompType + " and marking Maintenance=ON on " + sch.getHostName());
+ LOG.info("Decommissioning " + slaveCompType + " on " + sch.getHostName());
}
if (filteredIncludedHosts.contains(sch.getHostName())) {
sch.setComponentAdminState(HostComponentAdminState.INSERVICE);
if (alignMtnState) {
sch.setMaintenanceState(MaintenanceState.OFF);
+ LOG.info("marking Maintenance=OFF on " + sch.getHostName());
}
- LOG.info("Recommissioning " + slaveCompType + " and marking Maintenance=OFF on " + sch.getHostName());
+ LOG.info("Recommissioning " + slaveCompType + " on " + sch.getHostName());
}
}
@@ -1048,7 +1050,7 @@ public class AmbariCustomCommandExecutionHelper {
}
if (!serviceName.equals(Service.Type.HBASE.name()) || hostName.equals(primaryCandidate)) {
- commandParams.put(UPDATE_EXCLUDE_FILE_ONLY, "false");
+ commandParams.put(UPDATE_FILES_ONLY, "false");
addCustomCommandAction(commandContext, commandFilter, stage, commandParams, commandDetail.toString(), null);
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 5639dc1..433ed56 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -42,6 +42,7 @@ import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.UNLIMITED
import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.USER_GROUPS;
import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.USER_LIST;
import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.VERSION;
+import static org.apache.ambari.server.controller.AmbariCustomCommandExecutionHelper.masterToSlaveMappingForDecom;
import java.io.File;
import java.io.FileNotFoundException;
@@ -3334,17 +3335,49 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
}
}
+ Map<String, String> serviceMasterForDecommissionMap = new HashMap<>();
for (Map<State, List<ServiceComponentHost>> stateScHostMap :
changedScHosts.values()) {
for (Entry<State, List<ServiceComponentHost>> entry :
stateScHostMap.entrySet()) {
State newState = entry.getKey();
for (ServiceComponentHost sch : entry.getValue()) {
+ String componentName = sch.getServiceComponentName();
+ //Create map for include/exclude files refresh
+ if (masterToSlaveMappingForDecom.containsValue(componentName) &&
+ sch.getState() == State.INIT && newState == State.INSTALLED) {
+ String serviceName = sch.getServiceName();
+ String masterComponentName = null;
+ for (Entry<String, String> entrySet : masterToSlaveMappingForDecom.entrySet()) {
+ if (entrySet.getValue().equals(componentName)) {
+ masterComponentName = entrySet.getKey();
+ }
+ }
+ try {
+ Service s = cluster.getService(serviceName);
+ //Filter services whose masters are not started
+ if (s.getServiceComponent(masterComponentName).getDesiredState() == State.STARTED) {
+ serviceMasterForDecommissionMap.put(serviceName, masterComponentName);
+ } else {
+ LOG.info(String.format("Not adding %s service from include/exclude files refresh map because it's master is not started", serviceName));
+ }
+ } catch (AmbariException e) {
+ LOG.error("Exception during INIT masters cleanup : ", e);
+ }
+ }
+
+ //actually set the new state
sch.setDesiredState(newState);
}
}
}
+ try {
+ createAndExecuteRefreshIncludeExcludeFilesActionForMasters(serviceMasterForDecommissionMap, cluster.getClusterName());
+ } catch (AmbariException e) {
+ LOG.error("Exception during refresh include exclude files action : ", e);
+ }
+
if (ignoredScHosts != null) {
for (ServiceComponentHost scHost : ignoredScHosts) {
scHost.setDesiredState(scHost.getState());
@@ -3582,18 +3615,39 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
}
}
+ Map<String, Map<String, String>> clusterServiceMasterForDecommissionMap = new HashMap<>();
for (Entry<ServiceComponent, Set<ServiceComponentHost>> entry : safeToRemoveSCHs.entrySet()) {
for (ServiceComponentHost componentHost : entry.getValue()) {
try {
deleteHostComponent(entry.getKey(), componentHost);
deleteStatusMetaData.addDeletedKey(componentHost.getHostName() + "/" + componentHost.getServiceComponentName());
-
+ //create cluster-master-service map to update all include/exclude files in one action
+ String componentName = componentHost.getServiceComponentName();
+ if (masterToSlaveMappingForDecom.containsValue(componentName)) {
+ String masterComponentName = null;
+ for (Entry<String, String> entrySet : masterToSlaveMappingForDecom.entrySet()) {
+ if (entrySet.getValue().equals(componentName)) {
+ masterComponentName = entrySet.getKey();
+ }
+ }
+ if (clusterServiceMasterForDecommissionMap.containsKey(componentHost.getClusterName())) {
+ clusterServiceMasterForDecommissionMap.get(componentHost.getClusterName()).put(componentHost.getServiceName(), masterComponentName);
+ } else {
+ Map<String, String> tempMap = new HashMap<>();
+ tempMap.put(componentHost.getServiceName(), masterComponentName);
+ clusterServiceMasterForDecommissionMap.put(componentHost.getClusterName(), tempMap);
+ }
+ }
} catch (Exception ex) {
deleteStatusMetaData.addException(componentHost.getHostName() + "/" + componentHost.getServiceComponentName(), ex);
}
}
}
+ for (String cluster : clusterServiceMasterForDecommissionMap.keySet()) {
+ createAndExecuteRefreshIncludeExcludeFilesActionForMasters(clusterServiceMasterForDecommissionMap.get(cluster), cluster);
+ }
+
//Do not break behavior for existing clients where delete request contains only 1 host component.
//Response for these requests will have empty body with appropriate error code.
if (deleteStatusMetaData.getDeletedKeys().size() + deleteStatusMetaData.getExceptionForKeys().size() == 1) {
@@ -3616,7 +3670,6 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
}
private void deleteHostComponent(ServiceComponent serviceComponent, ServiceComponentHost componentHost) throws AmbariException {
- String included_hostname = componentHost.getHostName();
String serviceName = serviceComponent.getServiceName();
String master_component_name = null;
String slave_component_name = componentHost.getServiceComponentName();
@@ -3624,37 +3677,15 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
State slaveState = componentHost.getState();
//Delete hostcomponents
serviceComponent.deleteServiceComponentHosts(componentHost.getHostName());
- // If deleted hostcomponents support decomission and were decommited and stopped
- if (AmbariCustomCommandExecutionHelper.masterToSlaveMappingForDecom.containsValue(slave_component_name)
+ // If deleted hostcomponents support decomission and were decommited and stopped or in unknown state
+ if (masterToSlaveMappingForDecom.containsValue(slave_component_name)
&& desiredAdminState.equals(HostComponentAdminState.DECOMMISSIONED)
- && slaveState.equals(State.INSTALLED)) {
-
- for (Entry<String, String> entrySet : AmbariCustomCommandExecutionHelper.masterToSlaveMappingForDecom.entrySet()) {
+ && (slaveState.equals(State.INSTALLED) || slaveState.equals(State.UNKNOWN))) {
+ for (Entry<String, String> entrySet : masterToSlaveMappingForDecom.entrySet()) {
if (entrySet.getValue().equals(slave_component_name)) {
master_component_name = entrySet.getKey();
}
}
- //Clear exclud file or draining list except HBASE
- if (!serviceName.equals(Service.Type.HBASE.toString())) {
- HashMap<String, String> requestProperties = new HashMap<>();
- requestProperties.put("context", "Remove host " +
- included_hostname + " from exclude file");
- requestProperties.put("exclusive", "true");
- HashMap<String, String> params = new HashMap<>();
- params.put("included_hosts", included_hostname);
- params.put("slave_type", slave_component_name);
- params.put(AmbariCustomCommandExecutionHelper.UPDATE_EXCLUDE_FILE_ONLY, "true");
-
- //Create filter for RECOMISSION command
- RequestResourceFilter resourceFilter
- = new RequestResourceFilter(serviceName, master_component_name, null);
- //Create request for RECOMISSION command
- ExecuteActionRequest actionRequest = new ExecuteActionRequest(
- serviceComponent.getClusterName(), AmbariCustomCommandExecutionHelper.DECOMMISSION_COMMAND_NAME, null,
- Collections.singletonList(resourceFilter), null, params, true);
- //Send request
- createAction(actionRequest, requestProperties);
- }
//Mark master component as needed to restart for remove host info from components UI
Cluster cluster = clusters.getCluster(serviceComponent.getClusterName());
@@ -3695,6 +3726,40 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
}
}
+ /**
+ * Creates and triggers an action to update include and exclude files for the master components depending on current cluster topology and components state
+ * @param serviceMasterMap
+ * @param clusterName
+ * @throws AmbariException
+ */
+ private void createAndExecuteRefreshIncludeExcludeFilesActionForMasters(Map<String, String> serviceMasterMap, String clusterName) throws AmbariException {
+ //Clear include/exclude files or draining list except HBASE
+ serviceMasterMap.remove(Service.Type.HBASE.toString());
+ //exit if empty
+ if (serviceMasterMap.isEmpty()) {
+ return;
+ }
+ LOG.debug("Refresh include/exclude files action will be executed for " + serviceMasterMap);
+ HashMap<String, String> requestProperties = new HashMap<>();
+ requestProperties.put("context", "Update Include and Exclude Files for " + serviceMasterMap.keySet().toString());
+ requestProperties.put("exclusive", "true");
+ HashMap<String, String> params = new HashMap<>();
+ params.put(AmbariCustomCommandExecutionHelper.UPDATE_FILES_ONLY, "false");
+
+ //Create filter for command
+ List<RequestResourceFilter> resourceFilters = new ArrayList<>(serviceMasterMap.size());
+ for (String serviceName : serviceMasterMap.keySet()) {
+ resourceFilters.add(new RequestResourceFilter(serviceName, serviceMasterMap.get(serviceName), null));
+ }
+
+ //Create request for command
+ ExecuteActionRequest actionRequest = new ExecuteActionRequest(
+ clusterName, AmbariCustomCommandExecutionHelper.DECOMMISSION_COMMAND_NAME, null,
+ resourceFilters, null, params, true);
+ //Send action
+ createAction(actionRequest, requestProperties);
+ }
+
@Override
public void deleteMembers(java.util.Set<MemberRequest> requests) throws AmbariException {
for (MemberRequest request : requests) {
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
index 4e2944f..8ef42ba 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
@@ -946,6 +946,7 @@ public class HostResourceProvider extends AbstractControllerResourceProvider {
throw new AmbariException(reason.toString());
}
} else {
+// TODO why host with all components stopped can't be deleted? This functional is implemented and only this validation stops the request.
if (!componentsToRemove.isEmpty()) {
StringBuilder reason = new StringBuilder("Cannot remove host ")
.append(hostName)
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
index aad2db0..4eab367 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
@@ -99,6 +99,12 @@
excluded.</description>
<on-ambari-upgrade add="true"/>
</property>
+ <property>
+ <name>manage.include.files</name>
+ <value>false</value>
+ <description>If true Ambari will manage include file if dfs.hosts is configured.</description>
+ <on-ambari-upgrade add="false"/>
+ </property>
<!--
<property>
<name>dfs.hosts</name>
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
index 15fda67..e054209 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
@@ -170,6 +170,14 @@ def hdfs(component=None):
owner=params.hdfs_user,
mode="f",
)
+
+ if params.hdfs_include_file:
+ File(params.include_file_path,
+ content=Template("include_hosts_list.j2"),
+ owner=params.hdfs_user,
+ mode="f",
+ )
+ pass
if params.service_map.has_key(component):
service_name = params.service_map[component]
ServiceConfig(service_name,
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
index 7226d22..cac6e9c 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
@@ -108,6 +108,14 @@ def namenode(action=None, hdfs_binary=None, do_format=True, upgrade_type=None,
group=params.user_group
)
+ if params.hdfs_include_file:
+ File(params.include_file_path,
+ content=Template("include_hosts_list.j2"),
+ owner=params.hdfs_user,
+ group=params.user_group
+ )
+ pass
+
if do_format and not params.hdfs_namenode_format_disabled:
format_namenode()
pass
@@ -437,7 +445,15 @@ def decommission():
group=user_group
)
- if not params.update_exclude_file_only:
+ if params.hdfs_include_file:
+ File(params.include_file_path,
+ content=Template("include_hosts_list.j2"),
+ owner=params.hdfs_user,
+ group=params.user_group
+ )
+ pass
+
+ if not params.update_files_only:
Execute(nn_kinit_cmd,
user=hdfs_user
)
@@ -464,6 +480,13 @@ def decommission():
owner=hdfs_user
)
+ if params.hdfs_include_file:
+ File(params.include_file_path,
+ content=Template("include_hosts_list.j2"),
+ owner=params.hdfs_user
+ )
+ pass
+
if params.dfs_ha_enabled:
# due to a bug in hdfs, refreshNodes will not run on both namenodes so we
# need to execute each command scoped to a particular namenode
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
index a9fc179..2854a00 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
@@ -163,7 +163,13 @@ falcon_user = config['configurations']['falcon-env']['falcon_user']
#exclude file
hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
-update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+include_file_path = default("/configurations/hdfs-site/dfs.hosts", None)
+hdfs_include_file = None
+manage_include_files = default("/configurations/hdfs-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+ hdfs_include_file = list(set(slave_hosts) - set(hdfs_exclude_file))
+update_files_only = default("/commandParams/update_files_only",False)
command_phase = default("/commandParams/phase","")
klist_path_local = get_klist_path(default('/configurations/kerberos-env/executable_search_paths', None))
@@ -172,7 +178,6 @@ kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executab
hostname = config["hostname"]
public_hostname = config["public_hostname"]
rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
oozie_servers = default("/clusterHostInfo/oozie_server", [])
hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
hive_server_host = default("/clusterHostInfo/hive_server_host", [])
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_windows.py
index 1e47c29..b3ac578 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_windows.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_windows.py
@@ -43,6 +43,13 @@ dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
#decomission
hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
+include_file_path = default("/configurations/hdfs-site/dfs.hosts", None)
+hdfs_include_file = None
+manage_include_files = default("/configurations/hdfs-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+ slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+ hdfs_include_file = list(set(slave_hosts) - set(hdfs_exclude_file))
+update_files_only = default("/commandParams/update_files_only",False)
# HDFS High Availability properties
dfs_ha_enabled = False
dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.internal.nameservices", None)
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/templates/include_hosts_list.j2
new file mode 100644
index 0000000..115a8a4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in hdfs_include_file %}
+{{host}}
+{% endfor %}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
index 15fda67..e054209 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs.py
@@ -170,6 +170,14 @@ def hdfs(component=None):
owner=params.hdfs_user,
mode="f",
)
+
+ if params.hdfs_include_file:
+ File(params.include_file_path,
+ content=Template("include_hosts_list.j2"),
+ owner=params.hdfs_user,
+ mode="f",
+ )
+ pass
if params.service_map.has_key(component):
service_name = params.service_map[component]
ServiceConfig(service_name,
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
index 181b3c8..5a1f368 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
@@ -436,7 +436,15 @@ def decommission():
group=user_group
)
- if not params.update_exclude_file_only:
+ if params.hdfs_include_file:
+ File(params.include_file_path,
+ content=Template("include_hosts_list.j2"),
+ owner=params.hdfs_user,
+ group=params.user_group
+ )
+ pass
+
+ if not params.update_files_only:
Execute(nn_kinit_cmd,
user=hdfs_user
)
@@ -463,6 +471,13 @@ def decommission():
owner=hdfs_user
)
+ if params.hdfs_include_file:
+ File(params.include_file_path,
+ content=Template("include_hosts_list.j2"),
+ owner=params.hdfs_user
+ )
+ pass
+
if params.dfs_ha_enabled:
# due to a bug in hdfs, refreshNodes will not run on both namenodes so we
# need to execute each command scoped to a particular namenode
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
index ad49d81..1581c2a 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_linux.py
@@ -158,7 +158,13 @@ falcon_user = config['configurations']['falcon-env']['falcon_user']
#exclude file
hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
-update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+include_file_path = default("/configurations/hdfs-site/dfs.hosts", None)
+hdfs_include_file = None
+manage_include_files = default("/configurations/hdfs-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+ hdfs_include_file = list(set(slave_hosts) - set(hdfs_exclude_file))
+update_files_only = default("/commandParams/update_files_only", False)
command_phase = default("/commandParams/phase","")
klist_path_local = get_klist_path(default('/configurations/kerberos-env/executable_search_paths', None))
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_windows.py
index 70d95a6..6c492d8 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_windows.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/params_windows.py
@@ -43,6 +43,13 @@ dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
#decomission
hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
+include_file_path = default("/configurations/hdfs-site/dfs.hosts", None)
+hdfs_include_file = None
+manage_include_files = default("/configurations/hdfs-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+ slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+ hdfs_include_file = list(set(slave_hosts) - set(hdfs_exclude_file))
+update_files_only = default("/commandParams/update_files_only",False)
# HDFS High Availability properties
dfs_ha_enabled = False
dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.internal.nameservices", None)
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/templates/include_hosts_list.j2
new file mode 100644
index 0000000..115a8a4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in hdfs_include_file %}
+{{host}}
+{% endfor %}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-site.xml b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-site.xml
index d0d0ede..a65b801 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/configuration/yarn-site.xml
@@ -408,6 +408,12 @@
<on-ambari-upgrade add="true"/>
</property>
<property>
+ <name>manage.include.files</name>
+ <value>false</value>
+ <description>If true Ambari will manage include file if yarn.resourcemanager.nodes.include-path is configured.</description>
+ <on-ambari-upgrade add="false"/>
+ </property>
+ <property>
<name>yarn.http.policy</name>
<value>HTTP_ONLY</value>
<description>
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
index f474a89..67931c6 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
@@ -241,11 +241,17 @@ user_group = config['configurations']['cluster-env']['user_group']
exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
+nm_hosts = default("/clusterHostInfo/nm_hosts", [])
+#incude file
+include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.include-path", None)
+include_hosts = None
+manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+ include_hosts = list(set(nm_hosts) - set(exclude_hosts))
+
ats_host = set(default("/clusterHostInfo/app_timeline_server_hosts", []))
has_ats = not len(ats_host) == 0
-nm_hosts = default("/clusterHostInfo/nm_hosts", [])
-
# don't using len(nm_hosts) here, because check can take too much time on large clusters
number_of_nm = 1
@@ -345,7 +351,7 @@ HdfsResource = functools.partial(
immutable_paths = get_not_managed_resources(),
dfs_type = dfs_type
)
-update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
+update_files_only = default("/commandParams/update_files_only",False)
mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_windows.py
index 52918d2e..c2a02d7 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_windows.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_windows.py
@@ -59,4 +59,12 @@ hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
-update_exclude_file_only = config['commandParams']['update_exclude_file_only']
+
+nm_hosts = default("/clusterHostInfo/nm_hosts", [])
+#incude file
+include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.include-path", None)
+include_hosts = None
+manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+ include_hosts = list(set(nm_hosts) - set(exclude_hosts))
+update_files_only = default("/commandParams/update_files_only", False)
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
index 7d024b1..b929af0 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
@@ -91,7 +91,14 @@ class ResourcemanagerWindows(Resourcemanager):
mode="f"
)
- if params.update_exclude_file_only == False:
+ if params.include_hosts:
+ File(params.include_file_path,
+ content=Template("include_hosts_list.j2"),
+ owner=yarn_user,
+ mode="f"
+ )
+
+ if params.update_files_only == False:
Execute(yarn_refresh_cmd, user=yarn_user)
@@ -159,7 +166,14 @@ class ResourcemanagerDefault(Resourcemanager):
group=user_group
)
- if params.update_exclude_file_only == False:
+ if params.include_hosts:
+ File(params.include_file_path,
+ content=Template("include_hosts_list.j2"),
+ owner=yarn_user,
+ group=user_group
+ )
+
+ if params.update_files_only == False:
Execute(yarn_refresh_cmd,
environment= {'PATH' : params.execute_path },
user=yarn_user)
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/templates/include_hosts_list.j2
new file mode 100644
index 0000000..42e33c0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in include_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
index 64e0bcb..2a69d35 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
@@ -402,6 +402,12 @@
<on-ambari-upgrade add="false"/>
</property>
<property>
+ <name>manage.include.files</name>
+ <value>false</value>
+ <description>If true Ambari will manage include file if yarn.resourcemanager.nodes.include-path is configured.</description>
+ <on-ambari-upgrade add="false"/>
+ </property>
+ <property>
<name>yarn.http.policy</name>
<value>HTTP_ONLY</value>
<description>
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_linux.py
index a05d259..68d17f0 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_linux.py
@@ -239,10 +239,17 @@ user_group = config['configurations']['cluster-env']['user_group']
exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
+nm_hosts = default("/clusterHostInfo/nm_hosts", [])
+#incude file
+include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.include-path", None)
+include_hosts = None
+manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+ include_hosts = list(set(nm_hosts) - set(exclude_hosts))
+
ats_host = set(default("/clusterHostInfo/app_timeline_server_hosts", []))
has_ats = not len(ats_host) == 0
-nm_hosts = default("/clusterHostInfo/nm_hosts", [])
# don't using len(nm_hosts) here, because check can take too much time on large clusters
number_of_nm = 1
@@ -341,7 +348,7 @@ HdfsResource = functools.partial(
immutable_paths = get_not_managed_resources(),
dfs_type = dfs_type
)
-update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
+update_files_only = default("/commandParams/update_files_only", False)
mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_windows.py
index 52918d2e..c2a02d7 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_windows.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_windows.py
@@ -59,4 +59,12 @@ hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
-update_exclude_file_only = config['commandParams']['update_exclude_file_only']
+
+nm_hosts = default("/clusterHostInfo/nm_hosts", [])
+#incude file
+include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.include-path", None)
+include_hosts = None
+manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+ include_hosts = list(set(nm_hosts) - set(exclude_hosts))
+update_files_only = default("/commandParams/update_files_only", False)
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
index f6d6315..961fe63 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
@@ -91,7 +91,14 @@ class ResourcemanagerWindows(Resourcemanager):
mode="f"
)
- if params.update_exclude_file_only == False:
+ if params.include_hosts:
+ File(params.include_file_path,
+ content=Template("include_hosts_list.j2"),
+ owner=yarn_user,
+ mode="f"
+ )
+
+ if params.update_files_only == False:
Execute(yarn_refresh_cmd, user=yarn_user)
@@ -174,7 +181,14 @@ class ResourcemanagerDefault(Resourcemanager):
group=user_group
)
- if params.update_exclude_file_only == False:
+ if params.include_hosts:
+ File(params.include_file_path,
+ content=Template("include_hosts_list.j2"),
+ owner=yarn_user,
+ mode="f"
+ )
+
+ if params.update_files_only == False:
Execute(yarn_refresh_cmd,
environment= {'PATH' : params.execute_path },
user=yarn_user)
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/include_hosts_list.j2
new file mode 100644
index 0000000..42e33c0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in include_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hdfs-site.xml
index 87684df..66d25cf 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hdfs-site.xml
@@ -72,6 +72,12 @@
excluded.</description>
<on-ambari-upgrade add="true"/>
</property>
+ <property>
+ <name>manage.include.files</name>
+ <value>false</value>
+ <description>If true Ambari will manage include file if dfs.hosts is configured.</description>
+ <on-ambari-upgrade add="false"/>
+ </property>
<!--
<property>
<name>dfs.hosts</name>
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_namenode.py
index 6de7735..19751f6 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_namenode.py
@@ -143,8 +143,16 @@ def decommission():
owner=hdfs_user,
group=user_group
)
-
- if not params.update_exclude_file_only:
+
+ if params.hdfs_include_file:
+ File(params.include_file_path,
+ content=Template("include_hosts_list.j2"),
+ owner=params.hdfs_user,
+ group=params.user_group
+ )
+ pass
+
+ if not params.update_files_only:
Execute(nn_kinit_cmd,
user=hdfs_user
)
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
index 19e223c..9cf163a 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
@@ -55,13 +55,18 @@ falcon_user = config['configurations']['falcon-env']['falcon_user']
#exclude file
hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
-update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+include_file_path = default("/configurations/hdfs-site/dfs.hosts", None)
+hdfs_include_file = None
+manage_include_files = default("/configurations/hdfs-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+ hdfs_include_file = list(set(slave_hosts) - set(hdfs_exclude_file))
+update_files_only = default("/commandParams/update_files_only",False)
kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
#hosts
hostname = config["hostname"]
rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
oozie_servers = default("/clusterHostInfo/oozie_server", [])
hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
hive_server_host = default("/clusterHostInfo/hive_server_host", [])
@@ -241,4 +246,4 @@ ttnode_heapsize = "1024m"
dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-script_https_protocol = Script.get_force_https_protocol_name()
\ No newline at end of file
+script_https_protocol = Script.get_force_https_protocol_name()
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/templates/include_hosts_list.j2
new file mode 100644
index 0000000..115a8a4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in hdfs_include_file %}
+{{host}}
+{% endfor %}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml
index 8e9b8b1..3cb5add 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml
@@ -334,6 +334,12 @@
<on-ambari-upgrade add="true"/>
</property>
<property>
+ <name>manage.include.files</name>
+ <value>false</value>
+ <description>If true Ambari will manage include file if yarn.resourcemanager.nodes.include-path is configured.</description>
+ <on-ambari-upgrade add="false"/>
+ </property>
+ <property>
<name>yarn.timeline-service.enabled</name>
<value>true</value>
<description>Indicate to clients whether timeline service is enabled or not.
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/params.py
index 33496cfe..87b5992 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/params.py
@@ -118,6 +118,14 @@ user_group = config['configurations']['cluster-env']['user_group']
exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
+nm_hosts = default("/clusterHostInfo/nm_hosts", [])
+#incude file
+include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.include-path", None)
+include_hosts = None
+manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+ include_hosts = list(set(nm_hosts) - set(exclude_hosts))
+
hostname = config['hostname']
ats_host = set(default("/clusterHostInfo/app_timeline_server_hosts", []))
@@ -162,7 +170,7 @@ HdfsDirectory = functools.partial(
kinit_path_local = kinit_path_local,
bin_dir = hadoop_bin_dir
)
-update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
+update_files_only = default("/commandParams/update_files_only",False)
mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/resourcemanager.py
index 4d40d68..8bd76bf 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/resourcemanager.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/resourcemanager.py
@@ -89,7 +89,14 @@ class Resourcemanager(Script):
group=user_group
)
- if params.update_exclude_file_only == False:
+ if params.include_hosts:
+ File(params.include_file_path,
+ content=Template("include_hosts_list.j2"),
+ owner=yarn_user,
+ group=user_group
+ )
+
+ if params.update_files_only == False:
Execute(yarn_refresh_cmd,
environment= {'PATH' : params.execute_path },
user=yarn_user)
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/templates/include_hosts_list.j2
new file mode 100644
index 0000000..42e33c0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in include_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-site.xml
index 93a35cd..f2da835 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/configuration/yarn-site.xml
@@ -334,6 +334,12 @@
<on-ambari-upgrade add="true"/>
</property>
<property>
+ <name>manage.include.files</name>
+ <value>false</value>
+ <description>If true Ambari will manage include file if yarn.resourcemanager.nodes.include-path is configured.</description>
+ <on-ambari-upgrade add="false"/>
+ </property>
+ <property>
<name>yarn.timeline-service.enabled</name>
<value>true</value>
<description>Indicate to clients whether timeline service is enabled or not.
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/params.py
index 5a7e508..16db0e4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/params.py
@@ -98,6 +98,13 @@ yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
#exclude file
exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
exclude_file_path = config['configurations']['yarn-site']['yarn.resourcemanager.nodes.exclude-path']
+nm_hosts = default("/clusterHostInfo/nm_hosts", [])
+#incude file
+include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.include-path", None)
+include_hosts = None
+manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+ include_hosts = list(set(nm_hosts) - set(exclude_hosts))
hostname = config['hostname']
@@ -128,7 +135,7 @@ HdfsDirectory = functools.partial(
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local
)
-update_exclude_file_only = config['commandParams']['update_exclude_file_only']
+update_files_only = default("/commandParams/update_files_only", False)
hadoop_bin = "/usr/lib/hadoop/sbin"
mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/resourcemanager.py
index a286ae3..f92938b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/resourcemanager.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/scripts/resourcemanager.py
@@ -79,7 +79,14 @@ class Resourcemanager(Script):
group=user_group
)
- if params.update_exclude_file_only == False:
+ if params.include_hosts:
+ File(params.include_file_path,
+ content=Template("include_hosts_list.j2"),
+ owner=yarn_user,
+ group=user_group
+ )
+
+ if params.update_files_only == False:
Execute(yarn_refresh_cmd,
user=yarn_user)
pass
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..c7ce416
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/exclude_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in exclude_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/include_hosts_list.j2
new file mode 100644
index 0000000..42e33c0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/YARN/package/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in include_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-site.xml
index 93a35cd..f2da835 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-site.xml
@@ -334,6 +334,12 @@
<on-ambari-upgrade add="true"/>
</property>
<property>
+ <name>manage.include.files</name>
+ <value>false</value>
+ <description>If true Ambari will manage include file if yarn.resourcemanager.nodes.include-path is configured.</description>
+ <on-ambari-upgrade add="false"/>
+ </property>
+ <property>
<name>yarn.timeline-service.enabled</name>
<value>true</value>
<description>Indicate to clients whether timeline service is enabled or not.
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/params.py
index 5a7e508..bd188e9 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/params.py
@@ -98,7 +98,13 @@ yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
#exclude file
exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
exclude_file_path = config['configurations']['yarn-site']['yarn.resourcemanager.nodes.exclude-path']
-
+nm_hosts = default("/clusterHostInfo/nm_hosts", [])
+#incude file
+include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.include-path", None)
+include_hosts = None
+manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+ include_hosts = list(set(nm_hosts) - set(exclude_hosts))
hostname = config['hostname']
if security_enabled:
@@ -128,7 +134,7 @@ HdfsDirectory = functools.partial(
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local
)
-update_exclude_file_only = config['commandParams']['update_exclude_file_only']
+update_files_only = default("/commandParams/update_files_only", False)
hadoop_bin = "/usr/lib/hadoop/sbin"
mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/resourcemanager.py
index a286ae3..f92938b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/resourcemanager.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/resourcemanager.py
@@ -79,7 +79,14 @@ class Resourcemanager(Script):
group=user_group
)
- if params.update_exclude_file_only == False:
+ if params.include_hosts:
+ File(params.include_file_path,
+ content=Template("include_hosts_list.j2"),
+ owner=yarn_user,
+ group=user_group
+ )
+
+ if params.update_files_only == False:
Execute(yarn_refresh_cmd,
user=yarn_user)
pass
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/templates/include_hosts_list.j2
new file mode 100644
index 0000000..42e33c0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in include_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index b3a12f2..fdfca0f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -6028,8 +6028,8 @@ public class AmbariManagementControllerTest {
execCmd = storedTasks.get(0).getExecutionCommandWrapper
().getExecutionCommand();
Map<String, String> cmdParams = execCmd.getCommandParams();
- Assert.assertTrue(cmdParams.containsKey("update_exclude_file_only"));
- Assert.assertTrue(cmdParams.get("update_exclude_file_only").equals("false"));
+ Assert.assertTrue(cmdParams.containsKey("update_files_only"));
+ Assert.assertTrue(cmdParams.get("update_files_only").equals("false"));
Assert.assertNotNull(storedTasks);
Assert.assertEquals(1, storedTasks.size());
Assert.assertEquals(HostComponentAdminState.DECOMMISSIONED, scHost.getComponentAdminState());
@@ -6084,8 +6084,8 @@ public class AmbariManagementControllerTest {
Assert.assertTrue(hrc.getCommandDetail().contains(host1));
Assert.assertTrue(hrc.getCommandDetail().contains(host2));
cmdParams = hrc.getExecutionCommandWrapper().getExecutionCommand().getCommandParams();
- if(!cmdParams.containsKey("update_exclude_file_only")
- || !cmdParams.get("update_exclude_file_only").equals("true")) {
+ if(!cmdParams.containsKey("update_files_only")
+ || !cmdParams.get("update_files_only").equals("true")) {
countRefresh++;
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
index a7233c7..f0f2587 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
@@ -1041,7 +1041,7 @@ class TestNamenode(RMFTestCase):
bin_dir = '/usr/bin')
self.assertNoMoreResources()
- def test_decommission_update_exclude_file_only(self):
+ def test_decommission_update_files_only(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
classname = "NameNode",
command = "decommission",
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json b/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
index ea00a37..f928073 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
@@ -33,7 +33,7 @@
"script": "scripts/service_check.py",
"excluded_hosts": "host1,host2",
"mark_draining_only" : "false",
- "update_exclude_file_only" : "false",
+ "update_files_only" : "false",
"xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
"env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
"properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
index d267bc1..a16ec26 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
@@ -60,7 +60,7 @@
"script": "scripts/service_check.py",
"excluded_hosts": "host1,host2",
"mark_draining_only" : "false",
- "update_exclude_file_only" : "false",
+ "update_files_only" : "false",
"xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
"env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
"properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
index 71423c8..f3e8dc3 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_ams_embedded.json
@@ -32,7 +32,7 @@
"script": "scripts/service_check.py",
"excluded_hosts": "host1,host2",
"mark_draining_only" : "false",
- "update_exclude_file_only" : "false",
+ "update_files_only" : "false",
"xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
"env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
"properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
index 009ff6d..7b0f78d 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
@@ -35,7 +35,7 @@
"script": "scripts/service_check.py",
"excluded_hosts": "host1,host2",
"mark_draining_only" : "false",
- "update_exclude_file_only" : "false",
+ "update_files_only" : "false",
"xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
"env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
"properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
index 2b078c3..01f0efc 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
@@ -35,7 +35,7 @@
"script": "scripts/service_check.py",
"excluded_hosts": "host1,host2",
"mark_draining_only" : "false",
- "update_exclude_file_only" : "false",
+ "update_files_only" : "false",
"xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
"env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
"properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
index 571b737..0cbd322 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
@@ -33,7 +33,7 @@
"script": "scripts/service_check.py",
"excluded_hosts": "host1,host2",
"mark_draining_only" : "false",
- "update_exclude_file_only" : "false",
+ "update_files_only" : "false",
"xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
"env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
"properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
index 7fdb449..cfcf5e1 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
@@ -35,7 +35,7 @@
"script": "scripts/service_check.py",
"excluded_hosts": "host1,host2",
"mark_draining_only" : "false",
- "update_exclude_file_only" : "false",
+ "update_files_only" : "false",
"xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
"env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
"properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json
index 7378b68..7db73ab 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json
@@ -36,7 +36,7 @@
"script": "scripts/service_check.py",
"excluded_hosts": "host1,host2",
"mark_draining_only" : "false",
- "update_exclude_file_only" : "false",
+ "update_files_only" : "false",
"xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
"env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
"properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],
[17/21] ambari git commit: AMBARI-21504. Restart of MR2 History
Server failed due to null in immutable_paths.(vbrodetskyi)
Posted by rl...@apache.org.
AMBARI-21504. Restart of MR2 History Server failed due to null in immutable_paths.(vbrodetskyi)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/274a9951
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/274a9951
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/274a9951
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 274a9951c34721a867e8e541b4fa73bdf03aa5d0
Parents: f450eba
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Tue Jul 18 17:14:10 2017 +0300
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Tue Jul 18 17:14:10 2017 +0300
----------------------------------------------------------------------
.../AmbariCustomCommandExecutionHelper.java | 7 +++++--
.../AmbariManagementControllerImpl.java | 3 ++-
.../internal/ClientConfigResourceProvider.java | 3 ++-
.../ambari/server/state/ConfigHelper.java | 17 +++++++++++++++
.../HDFS/2.1.0.2.0/package/scripts/namenode.py | 4 ++++
.../YARN/2.1.0.2.0/package/scripts/service.py | 4 ++++
.../AmbariManagementControllerImplTest.java | 12 ++++++++++-
.../ambari/server/state/ConfigHelperTest.java | 22 ++++++++++++++++++++
8 files changed, 67 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/274a9951/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
index aeb5a9c..5180870 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
@@ -88,6 +88,7 @@ import org.apache.ambari.server.state.Host;
import org.apache.ambari.server.state.HostComponentAdminState;
import org.apache.ambari.server.state.HostState;
import org.apache.ambari.server.state.MaintenanceState;
+import org.apache.ambari.server.state.PropertyInfo;
import org.apache.ambari.server.state.PropertyInfo.PropertyType;
import org.apache.ambari.server.state.RepositoryInfo;
import org.apache.ambari.server.state.Service;
@@ -431,7 +432,8 @@ public class AmbariCustomCommandExecutionHelper {
String groupList = gson.toJson(groupSet);
hostLevelParams.put(GROUP_LIST, groupList);
- Set<String> notManagedHdfsPathSet = configHelper.getPropertyValuesWithPropertyType(stackId, PropertyType.NOT_MANAGED_HDFS_PATH, cluster, desiredConfigs);
+ Map<PropertyInfo, String> notManagedHdfsPathMap = configHelper.getPropertiesWithPropertyType(stackId, PropertyType.NOT_MANAGED_HDFS_PATH, cluster, desiredConfigs);
+ Set<String> notManagedHdfsPathSet = configHelper.filterInvalidPropertyValues(notManagedHdfsPathMap, NOT_MANAGED_HDFS_PATH_LIST);
String notManagedHdfsPathList = gson.toJson(notManagedHdfsPathSet);
hostLevelParams.put(NOT_MANAGED_HDFS_PATH_LIST, notManagedHdfsPathList);
@@ -1494,7 +1496,8 @@ public class AmbariCustomCommandExecutionHelper {
hostLevelParams.put(AGENT_STACK_RETRY_COUNT, configs.getAgentStackRetryOnInstallCount());
Map<String, DesiredConfig> desiredConfigs = cluster.getDesiredConfigs();
- Set<String> notManagedHdfsPathSet = configHelper.getPropertyValuesWithPropertyType(stackId, PropertyType.NOT_MANAGED_HDFS_PATH, cluster, desiredConfigs);
+ Map<PropertyInfo, String> notManagedHdfsPathMap = configHelper.getPropertiesWithPropertyType(stackId, PropertyType.NOT_MANAGED_HDFS_PATH, cluster, desiredConfigs);
+ Set<String> notManagedHdfsPathSet = configHelper.filterInvalidPropertyValues(notManagedHdfsPathMap, NOT_MANAGED_HDFS_PATH_LIST);
String notManagedHdfsPathList = gson.toJson(notManagedHdfsPathSet);
hostLevelParams.put(NOT_MANAGED_HDFS_PATH_LIST, notManagedHdfsPathList);
http://git-wip-us.apache.org/repos/asf/ambari/blob/274a9951/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 433ed56..38842fa 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -2522,7 +2522,8 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
String groupList = gson.toJson(groupSet);
hostParams.put(GROUP_LIST, groupList);
- Set<String> notManagedHdfsPathSet = configHelper.getPropertyValuesWithPropertyType(PropertyType.NOT_MANAGED_HDFS_PATH, cluster, clusterDesiredConfigs, servicesMap, stackProperties);
+ Map<PropertyInfo, String> notManagedHdfsPathMap = configHelper.getPropertiesWithPropertyType(PropertyType.NOT_MANAGED_HDFS_PATH, cluster, clusterDesiredConfigs, servicesMap, stackProperties);
+ Set<String> notManagedHdfsPathSet = configHelper.filterInvalidPropertyValues(notManagedHdfsPathMap, NOT_MANAGED_HDFS_PATH_LIST);
String notManagedHdfsPathList = gson.toJson(notManagedHdfsPathSet);
hostParams.put(NOT_MANAGED_HDFS_PATH_LIST, notManagedHdfsPathList);
http://git-wip-us.apache.org/repos/asf/ambari/blob/274a9951/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
index 15c2d81..166fc5f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
@@ -407,7 +407,8 @@ public class ClientConfigResourceProvider extends AbstractControllerResourceProv
String groupList = gson.toJson(groupSet);
hostLevelParams.put(GROUP_LIST, groupList);
- Set<String> notManagedHdfsPathSet = configHelper.getPropertyValuesWithPropertyType(stackId, PropertyType.NOT_MANAGED_HDFS_PATH, cluster, desiredClusterConfigs);
+ Map<org.apache.ambari.server.state.PropertyInfo, String> notManagedHdfsPathMap = configHelper.getPropertiesWithPropertyType(stackId, PropertyType.NOT_MANAGED_HDFS_PATH, cluster, desiredClusterConfigs);
+ Set<String> notManagedHdfsPathSet = configHelper.filterInvalidPropertyValues(notManagedHdfsPathMap, NOT_MANAGED_HDFS_PATH_LIST);
String notManagedHdfsPathList = gson.toJson(notManagedHdfsPathSet);
hostLevelParams.put(NOT_MANAGED_HDFS_PATH_LIST, notManagedHdfsPathList);
http://git-wip-us.apache.org/repos/asf/ambari/blob/274a9951/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
index a3a676d..2a70ee1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
@@ -220,6 +220,23 @@ public class ConfigHelper {
return resolved;
}
+
+ public Set<String> filterInvalidPropertyValues(Map<PropertyInfo, String> properties, String filteredListName) {
+ Set<String> resultSet = new HashSet<>();
+ for (Iterator<Entry<PropertyInfo, String>> iterator = properties.entrySet().iterator(); iterator.hasNext();) {
+ Entry<PropertyInfo, String> property = iterator.next();
+ PropertyInfo propertyInfo = property.getKey();
+ String propertyValue = property.getValue();
+ if (property == null || propertyValue == null || propertyValue.toLowerCase().equals("null") || propertyValue.isEmpty()) {
+ LOG.error(String.format("Excluding property %s from %s, because of invalid or empty value!", propertyInfo.getName(), filteredListName));
+ iterator.remove();
+ } else {
+ resultSet.add(propertyValue);
+ }
+ }
+ return resultSet;
+ }
+
/**
* Get all config properties for a cluster given a set of configType to
* versionTags map. This helper method merges all the override tags with a
http://git-wip-us.apache.org/repos/asf/ambari/blob/274a9951/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
index c554349..897e6cb 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
@@ -96,6 +96,10 @@ class NameNode(Script):
env.set_params(params)
self.configure(env)
hdfs_binary = self.get_hdfs_binary()
+
+ if not params.hdfs_tmp_dir or params.hdfs_tmp_dir == None or params.hdfs_tmp_dir.lower() == 'null':
+ Logger.error("WARNING: HDFS tmp dir property (hdfs_tmp_dir) is empty or invalid. Ambari will change permissions for the folder on regular basis.")
+
namenode(action="start", hdfs_binary=hdfs_binary, upgrade_type=upgrade_type,
upgrade_suspended=params.upgrade_suspended, env=env)
http://git-wip-us.apache.org/repos/asf/ambari/blob/274a9951/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service.py
index 1c1b11b..7c59b60 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service.py
@@ -26,6 +26,7 @@ from resource_management.libraries.functions.show_logs import show_logs
from resource_management.libraries.functions.format import format
from resource_management.core.resources.system import Execute, File
from resource_management.core.signal_utils import TerminateStrategy
+from resource_management.core.logger import Logger
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def service(componentName, action='start', serviceName='yarn'):
@@ -43,6 +44,9 @@ def service(componentName, action='start', serviceName='yarn'):
import params
if serviceName == 'mapreduce' and componentName == 'historyserver':
+ if not params.hdfs_tmp_dir or params.hdfs_tmp_dir == None or params.hdfs_tmp_dir.lower() == 'null':
+ Logger.error("WARNING: HDFS tmp dir property (hdfs_tmp_dir) is empty or invalid. Ambari will change permissions for the folder on regular basis.")
+
delete_pid_file = True
daemon = format("{mapred_bin}/mr-jobhistory-daemon.sh")
pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-{componentName}.pid")
http://git-wip-us.apache.org/repos/asf/ambari/blob/274a9951/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
index eadc678..1f2c332 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
@@ -2059,6 +2059,14 @@ public class AmbariManagementControllerImplTest {
String JCE_NAME = "jceName";
String OJDBC_JAR_NAME = "OjdbcJarName";
String SERVER_DB_NAME = "ServerDBName";
+ Map<PropertyInfo, String> notManagedHdfsPathMap = new HashMap<>();
+ PropertyInfo propertyInfo1 = new PropertyInfo();
+ propertyInfo1.setName("1");
+ PropertyInfo propertyInfo2 = new PropertyInfo();
+ propertyInfo2.setName("2");
+ notManagedHdfsPathMap.put(propertyInfo1, "/tmp");
+ notManagedHdfsPathMap.put(propertyInfo2, "/apps/falcon");
+
Set<String> notManagedHdfsPathSet = new HashSet<>(Arrays.asList("/tmp", "/apps/falcon"));
Gson gson = new Gson();
@@ -2089,8 +2097,10 @@ public class AmbariManagementControllerImplTest {
expect(configuration.getPreviousDatabaseConnectorNames()).andReturn(new HashMap<String, String>()).anyTimes();
expect(repositoryVersionEntity.getVersion()).andReturn("1234").anyTimes();
expect(repositoryVersionEntity.getStackId()).andReturn(stackId).anyTimes();
- expect(configHelper.getPropertyValuesWithPropertyType(stackId,
+ expect(configHelper.getPropertiesWithPropertyType(stackId,
PropertyInfo.PropertyType.NOT_MANAGED_HDFS_PATH, cluster, desiredConfigs)).andReturn(
+ notManagedHdfsPathMap);
+ expect(configHelper.filterInvalidPropertyValues(notManagedHdfsPathMap, NOT_MANAGED_HDFS_PATH_LIST)).andReturn(
notManagedHdfsPathSet);
replay(manager, clusters, cluster, injector, stackId, configuration, repositoryVersionEntity, configHelper);
http://git-wip-us.apache.org/repos/asf/ambari/blob/274a9951/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
index 857da61..6e84b33 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
@@ -764,6 +764,28 @@ public class ConfigHelperTest {
}
@Test
+ public void testFilterInvalidPropertyValues() {
+ Map<PropertyInfo, String> properties = new HashMap<>();
+ PropertyInfo prop1 = new PropertyInfo();
+ prop1.setName("1");
+ PropertyInfo prop2 = new PropertyInfo();
+ prop1.setName("2");
+ PropertyInfo prop3 = new PropertyInfo();
+ prop1.setName("3");
+ PropertyInfo prop4 = new PropertyInfo();
+ prop1.setName("4");
+
+ properties.put(prop1, "/tmp");
+ properties.put(prop2, "null");
+ properties.put(prop3, "");
+ properties.put(prop4, null);
+
+ Set<String> resultSet = configHelper.filterInvalidPropertyValues(properties, "testlist");
+ Assert.assertEquals(1, resultSet.size());
+ Assert.assertEquals(resultSet.iterator().next(), "/tmp");
+ }
+
+ @Test
public void testMergeAttributesWithNullProperties() throws Exception {
Map<String, Map<String, String>> persistedAttributes = new HashMap<>();
Map<String, String> persistedFinalAttrs = new HashMap<>();
[18/21] ambari git commit: AMBARI-21510. Convert calculated value for
'hive.server2.tez.sessions.per.default.queue' to long before setting it.
Posted by rl...@apache.org.
AMBARI-21510. Convert calculated value for 'hive.server2.tez.sessions.per.default.queue' to long before setting it.
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/016df4e9
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/016df4e9
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/016df4e9
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 016df4e9f795ff3b05e27dd49d8e81bd4e9dc28e
Parents: 274a995
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Tue Jul 18 12:48:53 2017 -0700
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Tue Jul 18 15:24:23 2017 -0700
----------------------------------------------------------------------
.../common-services/YARN/3.0.0.3.0/service_advisor.py | 7 +++----
.../main/resources/stacks/HDP/2.5/services/stack_advisor.py | 7 +++----
.../src/test/python/stacks/2.5/common/test_stack_advisor.py | 4 ++--
3 files changed, 8 insertions(+), 10 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/016df4e9/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
index 0fb538d..74e0510 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
@@ -1008,11 +1008,10 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
if not llap_concurrency_in_changed_configs:
min_llap_concurrency = 1
- putHiveInteractiveSiteProperty('hive.server2.tez.sessions.per.default.queue', llap_concurrency)
- putHiveInteractiveSitePropertyAttribute('hive.server2.tez.sessions.per.default.queue', "minimum",
- min_llap_concurrency)
+ putHiveInteractiveSiteProperty('hive.server2.tez.sessions.per.default.queue', long(llap_concurrency))
+ putHiveInteractiveSitePropertyAttribute('hive.server2.tez.sessions.per.default.queue', "minimum", min_llap_concurrency)
- putHiveInteractiveSitePropertyAttribute('hive.server2.tez.sessions.per.default.queue', "maximum", max_llap_concurreny)
+ putHiveInteractiveSitePropertyAttribute('hive.server2.tez.sessions.per.default.queue', "maximum", long(max_llap_concurreny))
num_llap_nodes = long(num_llap_nodes)
putHiveInteractiveEnvPropertyAttribute('num_llap_nodes', "minimum", min_nodes_required)
http://git-wip-us.apache.org/repos/asf/ambari/blob/016df4e9/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
index 1c19d8b..92b7367 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
@@ -1252,11 +1252,10 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
if not llap_concurrency_in_changed_configs:
min_llap_concurrency = 1
- putHiveInteractiveSiteProperty('hive.server2.tez.sessions.per.default.queue', llap_concurrency)
- putHiveInteractiveSitePropertyAttribute('hive.server2.tez.sessions.per.default.queue', "minimum",
- min_llap_concurrency)
+ putHiveInteractiveSiteProperty('hive.server2.tez.sessions.per.default.queue', long(llap_concurrency))
+ putHiveInteractiveSitePropertyAttribute('hive.server2.tez.sessions.per.default.queue', "minimum", min_llap_concurrency)
- putHiveInteractiveSitePropertyAttribute('hive.server2.tez.sessions.per.default.queue', "maximum", max_llap_concurreny)
+ putHiveInteractiveSitePropertyAttribute('hive.server2.tez.sessions.per.default.queue', "maximum", long(max_llap_concurreny))
num_llap_nodes = long(num_llap_nodes)
http://git-wip-us.apache.org/repos/asf/ambari/blob/016df4e9/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
index bf0cbec..fc5f220 100644
--- a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
@@ -1561,7 +1561,7 @@ class TestHDP25StackAdvisor(TestCase):
self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, self.hosts)
- self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.server2.tez.sessions.per.default.queue'], {'maximum': '3.0'})
+ self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.server2.tez.sessions.per.default.queue'], {'maximum': '3'})
self.assertTrue(configurations['hive-interactive-env']['properties']['num_llap_nodes'], 3)
self.assertTrue(configurations['hive-interactive-env']['properties']['num_llap_nodes_for_llap_daemons'], 3)
@@ -2773,7 +2773,7 @@ class TestHDP25StackAdvisor(TestCase):
self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, self.hosts)
self.assertEqual(configurations['capacity-scheduler']['properties'], {'capacity-scheduler': 'yarn.scheduler.capacity.root.accessible-node-labels=*\nyarn.scheduler.capacity.maximum-am-resource-percent=1\nyarn.scheduler.capacity.node-locality-delay=40\nyarn.scheduler.capacity.root.capacity=100\nyarn.scheduler.capacity.root.default.state=RUNNING\nyarn.scheduler.capacity.root.default.maximum-capacity=2.0\nyarn.scheduler.capacity.root.queues=default,llap\nyarn.scheduler.capacity.maximum-applications=10000\nyarn.scheduler.capacity.root.default.user-limit-factor=1\nyarn.scheduler.capacity.root.acl_administer_queue=*\nyarn.scheduler.capacity.root.default.acl_submit_applications=*\nyarn.scheduler.capacity.root.default.capacity=2.0\nyarn.scheduler.capacity.queue-mappings-override.enable=false\nyarn.scheduler.capacity.root.ordering-policy=priority-utilization\nyarn.scheduler.capacity.root.llap.user-limit-factor=1\nyarn.scheduler.capacity.root.llap.state=RUNNING\nyarn.scheduler.capacity.roo
t.llap.ordering-policy=fifo\nyarn.scheduler.capacity.root.llap.priority=10\nyarn.scheduler.capacity.root.llap.minimum-user-limit-percent=100\nyarn.scheduler.capacity.root.llap.maximum-capacity=98.0\nyarn.scheduler.capacity.root.llap.capacity=98.0\nyarn.scheduler.capacity.root.llap.acl_submit_applications=hive\nyarn.scheduler.capacity.root.llap.acl_administer_queue=hive\nyarn.scheduler.capacity.root.llap.maximum-am-resource-percent=1'})
- self.assertEqual(configurations['hive-interactive-site']['properties']['hive.server2.tez.sessions.per.default.queue'], '1.0')
+ self.assertEqual(configurations['hive-interactive-site']['properties']['hive.server2.tez.sessions.per.default.queue'], '1')
self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.server2.tez.sessions.per.default.queue'], {'maximum': '4', 'minimum': '1'})
self.assertTrue('num_llap_nodes_for_llap_daemons' not in configurations['hive-interactive-env']['properties'])
[10/21] ambari git commit: AMBARI-21483. Add UID/GID related
enhancements (echekanskiy)
Posted by rl...@apache.org.
AMBARI-21483. Add UID/GID related enhancements (echekanskiy)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f92d1219
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f92d1219
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f92d1219
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: f92d12193b30d53dc06ab9642ef4b9d61b5bac1c
Parents: 56462b2
Author: Eugene Chekanskiy <ec...@hortonworks.com>
Authored: Sun Jul 16 20:22:34 2017 +0300
Committer: Eugene Chekanskiy <ec...@hortonworks.com>
Committed: Sun Jul 16 20:22:34 2017 +0300
----------------------------------------------------------------------
.../ambari/server/state/PropertyInfo.java | 2 +
.../hooks/before-ANY/files/changeToSecureUid.sh | 13 +-
.../before-ANY/scripts/shared_initialization.py | 45 ++-
.../2.0.6/hooks/before-ANY/test_before_any.py | 294 +++++++++++--------
.../app/controllers/wizard/step7_controller.js | 67 +++++
.../configs/stack_config_properties_mapper.js | 14 +-
ambari-web/app/styles/application.less | 15 +
...ontrols_service_config_usergroup_with_id.hbs | 27 ++
ambari-web/app/utils/config.js | 3 +
.../configs/service_configs_by_category_view.js | 6 +
ambari-web/app/views/common/controls_view.js | 39 +++
11 files changed, 392 insertions(+), 133 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/f92d1219/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
index 62396e3..63c850e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
@@ -281,7 +281,9 @@ public class PropertyInfo {
public enum PropertyType {
PASSWORD,
USER,
+ UID,
GROUP,
+ GID,
TEXT,
ADDITIONAL_USER_PROPERTY,
NOT_MANAGED_HDFS_PATH,
http://git-wip-us.apache.org/repos/asf/ambari/blob/f92d1219/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh
index 08542c4..4663f10 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh
@@ -21,6 +21,7 @@
username=$1
directories=$2
+newUid=$3
function find_available_uid() {
for ((i=1001; i<=2000; i++))
@@ -34,7 +35,16 @@ function find_available_uid() {
done
}
-find_available_uid
+if [ -z $2 ]; then
+ test $(id -u ${username} 2>/dev/null)
+ if [ $? -ne 1 ]; then
+ newUid=`id -u ${username}`
+ else
+ find_available_uid
+ fi
+ echo $newUid
+ exit 0
+fi
if [ $newUid -eq 0 ]
then
@@ -43,7 +53,6 @@ then
fi
set -e
-
dir_array=($(echo $directories | sed 's/,/\n/g'))
old_uid=$(id -u $username)
sudo_prefix="/var/lib/ambari-agent/ambari-sudo.sh -H -E"
http://git-wip-us.apache.org/repos/asf/ambari/blob/f92d1219/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
index 39f5a47..bcc1a3a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
@@ -24,6 +24,7 @@ import tempfile
from copy import copy
from resource_management.libraries.functions.version import compare_versions
from resource_management import *
+from resource_management.core import shell
def setup_users():
"""
@@ -43,11 +44,17 @@ def setup_users():
)
for user in params.user_list:
- User(user,
- gid = params.user_to_gid_dict[user],
- groups = params.user_to_groups_dict[user],
- fetch_nonlocal_groups = params.fetch_nonlocal_groups
- )
+ if params.override_uid == "true":
+ User(user,
+ uid = get_uid(user),
+ gid = params.user_to_gid_dict[user],
+ groups = params.user_to_groups_dict[user],
+ )
+ else:
+ User(user,
+ gid = params.user_to_gid_dict[user],
+ groups = params.user_to_groups_dict[user],
+ )
if params.override_uid == "true":
set_uid(params.smoke_user, params.smoke_user_dirs)
@@ -65,6 +72,7 @@ def setup_users():
create_parents = True,
cd_access="a",
)
+
if params.override_uid == "true":
set_uid(params.hbase_user, params.hbase_user_dirs)
else:
@@ -125,7 +133,7 @@ def create_users_and_groups(user_and_groups):
Group(copy(groups_list),
)
return groups_list
-
+
def set_uid(user, user_dirs):
"""
user_dirs - comma separated directories
@@ -136,9 +144,30 @@ def set_uid(user, user_dirs):
content=StaticFile("changeToSecureUid.sh"),
mode=0555)
ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
- Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs}"),
+ uid = get_uid(user)
+ Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs} {uid}"),
not_if = format("(test $(id -u {user}) -gt 1000) || ({ignore_groupsusers_create_str})"))
-
+
+def get_uid(user):
+ import params
+ user_str = str(user) + "_uid"
+ service_env = [ serviceEnv for serviceEnv in params.config['configurations'] if user_str in params.config['configurations'][serviceEnv]]
+
+ if service_env and params.config['configurations'][service_env[0]][user_str]:
+ service_env_str = str(service_env[0])
+ uid = params.config['configurations'][service_env_str][user_str]
+ if len(service_env) > 1:
+ Logger.warning("Multiple values found for %s, using %s" % (user_str, uid))
+ return uid
+ else:
+ if user == params.smoke_user:
+ return 0
+ File(format("{tmp_dir}/changeUid.sh"),
+ content=StaticFile("changeToSecureUid.sh"),
+ mode=0555)
+ conde, newUid = shell.call((format("{tmp_dir}/changeUid.sh"), format("{user}")), sudo=True)
+ return newUid
+
def setup_hadoop_env():
import params
stackversion = params.stack_version_unformatted
http://git-wip-us.apache.org/repos/asf/ambari/blob/f92d1219/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py
index 75c6543..1d2351f 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py
@@ -21,6 +21,7 @@ limitations under the License.
from stacks.utils.RMFTestCase import *
from mock.mock import MagicMock, call, patch
from resource_management import Hook
+import itertools
import getpass
import os
@@ -45,147 +46,201 @@ class TestHookBeforeInstall(RMFTestCase):
self.executeScript("2.0.6/hooks/before-ANY/scripts/hook.py",
classname="BeforeAnyHook",
command="hook",
- config_file="default.json"
- )
-
- self.assertResourceCalled('Group', 'hadoop',
- )
- self.assertResourceCalled('Group', 'nobody',
- )
- self.assertResourceCalled('Group', 'users',
+ config_file="default.json",
+ call_mocks=itertools.cycle([(0, "1000")])
)
+ self.assertResourceCalled('Group', 'hadoop',)
+ self.assertResourceCalled('Group', 'nobody',)
+ self.assertResourceCalled('Group', 'users',)
+ self.assertResourceCalled('File', '/tmp/changeUid.sh',
+ content = StaticFile('changeToSecureUid.sh'),
+ mode = 0555,
+ )
self.assertResourceCalled('User', 'hive',
- gid = 'hadoop',
- groups = [u'hadoop'],
- fetch_nonlocal_groups = True,
- )
+ gid = 'hadoop',
+ uid = '1000',
+ groups = [u'hadoop'],
+ )
+ self.assertResourceCalled('File', '/tmp/changeUid.sh',
+ content = StaticFile('changeToSecureUid.sh'),
+ mode = 0555,
+ )
self.assertResourceCalled('User', 'oozie',
- gid = 'hadoop',
- groups = [u'users'],
- fetch_nonlocal_groups = True,
- )
+ gid = 'hadoop',
+ uid = '1000',
+ groups = [u'users'],
+ )
+ self.assertResourceCalled('File', '/tmp/changeUid.sh',
+ content = StaticFile('changeToSecureUid.sh'),
+ mode = 0555,
+ )
self.assertResourceCalled('User', 'nobody',
- gid = 'hadoop',
- groups = [u'nobody'],
- fetch_nonlocal_groups = True,
- )
+ gid = 'hadoop',
+ uid = '1000',
+ groups = [u'nobody'],
+ )
self.assertResourceCalled('User', 'ambari-qa',
- gid = 'hadoop',
- groups = [u'users'],
- fetch_nonlocal_groups = True,
- )
+ gid = 'hadoop',
+ uid = 0,
+ groups = [u'users'],
+ )
+ self.assertResourceCalled('File', '/tmp/changeUid.sh',
+ content = StaticFile('changeToSecureUid.sh'),
+ mode = 0555,
+ )
self.assertResourceCalled('User', 'flume',
- gid = 'hadoop',
- groups = [u'hadoop'],
- fetch_nonlocal_groups = True,
- )
+ gid = 'hadoop',
+ uid = '1000',
+ groups = [u'hadoop'],
+ )
+ self.assertResourceCalled('File', '/tmp/changeUid.sh',
+ content = StaticFile('changeToSecureUid.sh'),
+ mode = 0555,
+ )
self.assertResourceCalled('User', 'hdfs',
- gid = 'hadoop',
- groups = [u'hadoop'],
- fetch_nonlocal_groups = True,
- )
+ gid = 'hadoop',
+ uid = '1000',
+ groups = [u'hadoop'],
+ )
+ self.assertResourceCalled('File', '/tmp/changeUid.sh',
+ content = StaticFile('changeToSecureUid.sh'),
+ mode = 0555,
+ )
self.assertResourceCalled('User', 'storm',
- gid = 'hadoop',
- groups = [u'hadoop'],
- fetch_nonlocal_groups = True,
- )
+ gid = 'hadoop',
+ uid = '1000',
+ groups = [u'hadoop'],
+ )
+ self.assertResourceCalled('File', '/tmp/changeUid.sh',
+ content = StaticFile('changeToSecureUid.sh'),
+ mode = 0555,
+ )
self.assertResourceCalled('User', 'mapred',
- gid = 'hadoop',
- groups = [u'hadoop'],
- fetch_nonlocal_groups = True,
- )
+ gid = 'hadoop',
+ uid = '1000',
+ groups = [u'hadoop'],
+ )
+ self.assertResourceCalled('File', '/tmp/changeUid.sh',
+ content = StaticFile('changeToSecureUid.sh'),
+ mode = 0555,
+ )
self.assertResourceCalled('User', 'hbase',
- gid = 'hadoop',
- groups = [u'hadoop'],
- fetch_nonlocal_groups = True,
- )
+ gid = 'hadoop',
+ uid = '1000',
+ groups = [u'hadoop'],
+ )
+ self.assertResourceCalled('File', '/tmp/changeUid.sh',
+ content = StaticFile('changeToSecureUid.sh'),
+ mode = 0555,
+ )
self.assertResourceCalled('User', 'tez',
- gid = 'hadoop',
- groups = [u'users'],
- fetch_nonlocal_groups = True,
- )
+ gid = 'hadoop',
+ uid = '1000',
+ groups = [u'users'],
+ )
+ self.assertResourceCalled('File', '/tmp/changeUid.sh',
+ content = StaticFile('changeToSecureUid.sh'),
+ mode = 0555,
+ )
self.assertResourceCalled('User', 'zookeeper',
- gid = 'hadoop',
- groups = [u'hadoop'],
- fetch_nonlocal_groups = True,
- )
+ gid = 'hadoop',
+ uid = '1000',
+ groups = [u'hadoop'],
+ )
+ self.assertResourceCalled('File', '/tmp/changeUid.sh',
+ content = StaticFile('changeToSecureUid.sh'),
+ mode = 0555,
+ )
self.assertResourceCalled('User', 'falcon',
- gid = 'hadoop',
- groups = [u'users'],
- fetch_nonlocal_groups = True,
- )
+ gid = 'hadoop',
+ uid = '1000',
+ groups = [u'users'],
+ )
+ self.assertResourceCalled('File', '/tmp/changeUid.sh',
+ content = StaticFile('changeToSecureUid.sh'),
+ mode = 0555,
+ )
self.assertResourceCalled('User', 'sqoop',
- gid = 'hadoop',
- groups = [u'hadoop'],
- fetch_nonlocal_groups = True,
- )
+ gid = 'hadoop',
+ uid = '1000',
+ groups = [u'hadoop'],
+ )
+ self.assertResourceCalled('File', '/tmp/changeUid.sh',
+ content = StaticFile('changeToSecureUid.sh'),
+ mode = 0555,
+ )
self.assertResourceCalled('User', 'yarn',
- gid = 'hadoop',
- groups = [u'hadoop'],
- fetch_nonlocal_groups = True,
- )
+ gid = 'hadoop',
+ uid = '1000',
+ groups = [u'hadoop'],
+ )
+ self.assertResourceCalled('File', '/tmp/changeUid.sh',
+ content = StaticFile('changeToSecureUid.sh'),
+ mode = 0555,
+ )
self.assertResourceCalled('User', 'hcat',
- gid = 'hadoop',
- groups = [u'hadoop'],
- fetch_nonlocal_groups = True,
- )
+ gid = 'hadoop',
+ uid = '1000',
+ groups = [u'hadoop'],
+ )
self.assertResourceCalled('File', '/tmp/changeUid.sh',
- content = StaticFile('changeToSecureUid.sh'),
- mode = 0555,
- )
- self.assertResourceCalled('Execute', '/tmp/changeUid.sh ambari-qa /tmp/hadoop-ambari-qa,/tmp/hsperfdata_ambari-qa,/home/ambari-qa,/tmp/ambari-qa,/tmp/sqoop-ambari-qa',
- not_if = '(test $(id -u ambari-qa) -gt 1000) || (false)',
- )
- self.assertResourceCalled('Directory', self.TMP_PATH,
- owner = 'hbase',
- mode = 0775,
- create_parents = True,
- cd_access='a'
- )
+ content = StaticFile('changeToSecureUid.sh'),
+ mode = 0555,
+ )
+ self.assertResourceCalled('Execute', '/tmp/changeUid.sh ambari-qa /tmp/hadoop-ambari-qa,/tmp/hsperfdata_ambari-qa,/home/ambari-qa,/tmp/ambari-qa,/tmp/sqoop-ambari-qa 0',
+ not_if = '(test $(id -u ambari-qa) -gt 1000) || (false)',
+ )
+ self.assertResourceCalled('Directory', '/tmp/hbase-hbase',
+ owner = 'hbase',
+ create_parents = True,
+ mode = 0775,
+ cd_access = 'a',
+ )
self.assertResourceCalled('File', '/tmp/changeUid.sh',
- content = StaticFile('changeToSecureUid.sh'),
- mode = 0555,
- )
- self.assertResourceCalled('Execute', '/tmp/changeUid.sh hbase /home/hbase,/tmp/hbase,/usr/bin/hbase,/var/log/hbase,' + self.TMP_PATH,
- not_if = '(test $(id -u hbase) -gt 1000) || (false)',
- )
+ content = StaticFile('changeToSecureUid.sh'),
+ mode = 0555,
+ )
+ self.assertResourceCalled('File', '/tmp/changeUid.sh',
+ content = StaticFile('changeToSecureUid.sh'),
+ mode = 0555,
+ )
+ self.assertResourceCalled('Execute', '/tmp/changeUid.sh hbase /home/hbase,/tmp/hbase,/usr/bin/hbase,/var/log/hbase,/tmp/hbase-hbase 1000',
+ not_if = '(test $(id -u hbase) -gt 1000) || (false)',
+ )
self.assertResourceCalled('User', 'test_user1',
- fetch_nonlocal_groups = True,
- )
+ fetch_nonlocal_groups = True,
+ )
self.assertResourceCalled('User', 'test_user2',
- fetch_nonlocal_groups = True,
- )
- self.assertResourceCalled('Group', 'hdfs',
- )
- self.assertResourceCalled('Group', 'test_group',
- )
+ fetch_nonlocal_groups = True,
+ )
+ self.assertResourceCalled('Group', 'hdfs',)
+ self.assertResourceCalled('Group', 'test_group',)
self.assertResourceCalled('User', 'hdfs',
- groups = [u'hadoop', u'hdfs', u'test_group'],
- fetch_nonlocal_groups = True,
- )
+ fetch_nonlocal_groups = True,
+ groups = [u'hadoop', u'hdfs', u'test_group'],
+ )
self.assertResourceCalled('Directory', '/etc/hadoop',
- mode = 0755
- )
+ mode = 0755,
+ )
self.assertResourceCalled('Directory', '/etc/hadoop/conf.empty',
- owner = 'root',
- group = 'hadoop',
- create_parents = True,
- )
+ owner = 'root',
+ create_parents = True,
+ group = 'hadoop',
+ )
self.assertResourceCalled('Link', '/etc/hadoop/conf',
- not_if = 'ls /etc/hadoop/conf',
- to = '/etc/hadoop/conf.empty',
- )
+ not_if = 'ls /etc/hadoop/conf',
+ to = '/etc/hadoop/conf.empty',
+ )
self.assertResourceCalled('File', '/etc/hadoop/conf/hadoop-env.sh',
- content = InlineTemplate(self.getConfig()['configurations']['hadoop-env']['content']),
- owner = 'hdfs',
- group = 'hadoop'
+ content = InlineTemplate(self.getConfig()['configurations']['hadoop-env']['content']),
+ owner = 'hdfs',
+ group = 'hadoop'
)
self.assertResourceCalled('Directory', '/tmp/hadoop_java_io_tmpdir',
owner = 'hdfs',
group = 'hadoop',
- mode = 01777
- )
-
+ mode = 01777,
+ )
self.assertResourceCalled('Directory', '/tmp/AMBARI-artifacts/',
create_parents = True,
)
@@ -198,20 +253,17 @@ class TestHookBeforeInstall(RMFTestCase):
)
self.assertResourceCalled('Directory', '/usr/jdk64',)
self.assertResourceCalled('Execute', ('chmod', 'a+x', u'/usr/jdk64'),
- sudo = True
- )
- self.assertResourceCalled('Execute', 'cd /tmp/jdk_tmp_dir && tar -xf /tmp/jdk-7u67-linux-x64.tar.gz && ambari-sudo.sh cp -rp /tmp/jdk_tmp_dir/* /usr/jdk64'
+ sudo = True,
)
+ self.assertResourceCalled('Execute', 'cd /tmp/jdk_tmp_dir && tar -xf /tmp/jdk-7u67-linux-x64.tar.gz && ambari-sudo.sh cp -rp /tmp/jdk_tmp_dir/* /usr/jdk64',)
self.assertResourceCalled('Directory', '/tmp/jdk_tmp_dir',
- action = ['delete']
+ action = ['delete'],
)
-
self.assertResourceCalled('File', '/usr/jdk64/jdk1.7.0_45/bin/java',
mode = 0755,
- cd_access = "a",
+ cd_access = 'a',
)
self.assertResourceCalled('Execute', ('chmod', '-R', '755', u'/usr/jdk64/jdk1.7.0_45'),
- sudo = True,
- )
-
+ sudo = True,
+ )
self.assertNoMoreResources()
http://git-wip-us.apache.org/repos/asf/ambari/blob/f92d1219/ambari-web/app/controllers/wizard/step7_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard/step7_controller.js b/ambari-web/app/controllers/wizard/step7_controller.js
index 9a897d0..6a90c26 100644
--- a/ambari-web/app/controllers/wizard/step7_controller.js
+++ b/ambari-web/app/controllers/wizard/step7_controller.js
@@ -531,6 +531,7 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
}
var stepConfigs = this.createStepConfigs();
var serviceConfigs = this.renderConfigs(stepConfigs, configs);
+ this.addUidAndGidRepresentations(serviceConfigs);
// if HA is enabled -> Make some reconfigurations
if (this.get('wizardController.name') === 'addServiceController') {
this.updateComponentActionConfigs(configs, serviceConfigs);
@@ -802,6 +803,38 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
},
/**
+ * Set the uid property for user properties. The uid is later used to help map the user and uid values in adjacent columns
+ * @param {object} miscSvc
+ * @param {string} svcName
+ * @private
+ */
+ _setUID: function (miscSvc, svcName) {
+ var user = miscSvc.configs.findProperty('name', svcName + '_user');
+ if (user) {
+ var uid = miscSvc.configs.findProperty('name', user.value + '_uid');
+ if (uid) {
+ user.set('ugid', uid);
+ }
+ }
+ },
+
+ /**
+ * Set the gid property for group properties. The gid is later used to help map the group and gid values in adjacent columns
+ * @param {object} miscSvc
+ * @param {string} svcName
+ * @private
+ */
+ _setGID: function (miscSvc, svcName) {
+ var group = miscSvc.configs.findProperty('name', svcName + '_group');
+ if (group) {
+ var gid = miscSvc.configs.findProperty('name', group.value + '_gid');
+ if (gid) {
+ group.set('ugid', gid);
+ }
+ }
+ },
+
+ /**
* render configs, distribute them by service
* and wrap each in ServiceConfigProperty object
* @param stepConfigs
@@ -841,6 +874,11 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
this.updateHostOverrides(serviceConfigProperty, _config);
if (this.get('wizardController.name') === 'addServiceController') {
this._updateIsEditableFlagForConfig(serviceConfigProperty, true);
+ //since the override_uid and ignore_groupusers_create changes are not saved to the database post install, they should be editable only
+ //during initial cluster installation
+ if (['override_uid', 'ignore_groupsusers_create'].contains(serviceConfigProperty.get('name'))) {
+ serviceConfigProperty.set('isEditable', false);
+ }
}
if (!this.get('content.serviceConfigProperties.length') && !serviceConfigProperty.get('hasInitialValue')) {
App.ConfigInitializer.initialValue(serviceConfigProperty, localDB, dependencies);
@@ -860,6 +898,35 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
return stepConfigs;
},
+ addUidAndGidRepresentations: function(serviceConfigs) {
+ //map the uids to the corresponding users
+ var miscSvc = serviceConfigs.findProperty('serviceName', 'MISC');
+ if (miscSvc) {
+ //iterate through the list of users and groups and assign the uid/gid accordingly
+ //user properties are servicename_user
+ //uid properties are value of servicename_user + _uid
+ //group properties are servicename_group
+ //gid properties are value of servicename_group + _gid
+ //we will map the users/uids and groups/gids based on this assumption
+ this.get('selectedServiceNames').forEach(function (serviceName) {
+ this._setUID(miscSvc, serviceName.toLowerCase());
+ this._setGID(miscSvc, serviceName.toLowerCase());
+ }, this);
+
+ //for zookeeper, the user property name does not follow the convention that users for other services do. i.e. the user property name is not servicename_user as is the case with other services
+ //the user property name is zk_user and not zookeeper_user, hence set the uid for zk_user separately
+ this._setUID(miscSvc, 'zk');
+ //the user property name is mapred_user and not mapreduce2_user for mapreduce2 service, hence set the uid for mapred_user separately
+ this._setUID(miscSvc, 'mapred');
+ //for haddop, the group property name does not follow the convention that groups for other services do. i.e. the group property name is not servicename_group as is the case with other services
+ //the group property name is user_group and not zookeeper_group, hence set the gid for user_group separately
+ this._setGID(miscSvc, 'user');
+
+ // uid/gid properties are displayed in a separate column, hence prevent the properties from showing up on a separate line
+ miscSvc.configs.filterProperty('displayType', 'uid_gid').setEach('isVisible', false);
+ }
+ },
+
/**
* Add host name properties to appropriate categories (for installer and add service)
*
http://git-wip-us.apache.org/repos/asf/ambari/blob/f92d1219/ambari-web/app/mappers/configs/stack_config_properties_mapper.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mappers/configs/stack_config_properties_mapper.js b/ambari-web/app/mappers/configs/stack_config_properties_mapper.js
index 9b4b920..75a5564 100644
--- a/ambari-web/app/mappers/configs/stack_config_properties_mapper.js
+++ b/ambari-web/app/mappers/configs/stack_config_properties_mapper.js
@@ -197,9 +197,14 @@ App.stackConfigPropertiesMapper = App.QuickDataMapper.create({
* @param config
*/
handleSpecialProperties: function(config) {
- if (!config.StackConfigurations.property_type.contains('ADDITIONAL_USER_PROPERTY')) {
+ var types = config.StackConfigurations.property_type;
+ if (!types.contains('ADDITIONAL_USER_PROPERTY')) {
config.index = App.StackService.displayOrder.indexOf(config.StackConfigurations.service_name) + 1 || 30;
}
+ // displayType from stack ignored, cause UID and GID should be shown along with service's user config
+ if (types.contains('UID') || types.contains('GID')) {
+ config.StackConfigurations.property_value_attributes.type = 'uid_gid';
+ }
config.StackConfigurations.service_name = 'MISC';
config.category = 'Users and Groups';
},
@@ -210,7 +215,12 @@ App.stackConfigPropertiesMapper = App.QuickDataMapper.create({
* @returns {Boolean}
*/
isMiscService: function(type) {
- return type.length && (type.contains('USER') || type.contains('GROUP') || type.contains('ADDITIONAL_USER_PROPERTY'));
+ return type.length &&
+ (type.contains('USER')
+ || type.contains('GROUP')
+ || type.contains('ADDITIONAL_USER_PROPERTY')
+ || type.contains('UID')
+ || type.contains('GID'));
},
/**
http://git-wip-us.apache.org/repos/asf/ambari/blob/f92d1219/ambari-web/app/styles/application.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/application.less b/ambari-web/app/styles/application.less
index 29788bc..a32275f 100644
--- a/ambari-web/app/styles/application.less
+++ b/ambari-web/app/styles/application.less
@@ -1147,6 +1147,21 @@ a:focus {
}
}
+.serviceConfigUGIDLbl {
+ display: inline-block;
+ text-align: left;
+ margin-left: 92px;
+ width: 100px;
+}
+
+.serviceConfigUGID {
+ width: 150px !important;
+}
+
+.serviceConfigNoUGID {
+ width: 500px !important;
+}
+
.chart-container {
cursor: pointer;
cursor: -moz-zoom-in;
http://git-wip-us.apache.org/repos/asf/ambari/blob/f92d1219/ambari-web/app/templates/wizard/controls_service_config_usergroup_with_id.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/wizard/controls_service_config_usergroup_with_id.hbs b/ambari-web/app/templates/wizard/controls_service_config_usergroup_with_id.hbs
new file mode 100644
index 0000000..24c785c
--- /dev/null
+++ b/ambari-web/app/templates/wizard/controls_service_config_usergroup_with_id.hbs
@@ -0,0 +1,27 @@
+{{!
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+}}
+
+{{#if view.isUIDGIDVisible}}
+ {{view App.ServiceConfigTextField serviceConfigBinding="view.serviceConfig" class="serviceConfigUGID"}}
+ <label class="serviceConfigUGIDLbl control-label" {{bindAttr for="view.serviceConfig.ugid.name"}}>
+ {{view.serviceConfig.ugid.displayName}}
+ </label>
+ {{view Ember.TextField valueBinding="view.serviceConfig.ugid.value" class="serviceConfigUGID"}}
+{{else}}
+ {{view App.ServiceConfigTextField serviceConfigBinding="view.serviceConfig" class="serviceConfigNoUGID"}}
+{{/if}}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/f92d1219/ambari-web/app/utils/config.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/config.js b/ambari-web/app/utils/config.js
index 00cc2a3..7cfcb13 100644
--- a/ambari-web/app/utils/config.js
+++ b/ambari-web/app/utils/config.js
@@ -545,6 +545,9 @@ App.config = Em.Object.create({
*/
getViewClass: function (displayType, dependentConfigPattern, unit) {
switch (displayType) {
+ case 'user':
+ case 'group':
+ return App.ServiceConfigTextFieldUserGroupWithID;
case 'checkbox':
case 'boolean':
return dependentConfigPattern ? App.ServiceConfigCheckboxWithDependencies : App.ServiceConfigCheckbox;
http://git-wip-us.apache.org/repos/asf/ambari/blob/f92d1219/ambari-web/app/views/common/configs/service_configs_by_category_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/configs/service_configs_by_category_view.js b/ambari-web/app/views/common/configs/service_configs_by_category_view.js
index 4058020..6cf9b99 100644
--- a/ambari-web/app/views/common/configs/service_configs_by_category_view.js
+++ b/ambari-web/app/views/common/configs/service_configs_by_category_view.js
@@ -50,6 +50,7 @@ App.ServiceConfigsByCategoryView = Em.View.extend(App.Persist, App.ConfigOverrid
* @type {App.ServiceConfigProperty[]}
*/
serviceConfigs: null,
+ isUIDGIDVisible: true,
/**
* This is array of all the properties which apply
@@ -744,6 +745,11 @@ App.ServiceConfigsByCategoryView = Em.View.extend(App.Persist, App.ConfigOverrid
setRecommendedValue: function (event) {
var serviceConfigProperty = event.contexts[0];
serviceConfigProperty.set('value', serviceConfigProperty.get('recommendedValue'));
+
+ //in case of USER/GROUP fields, if they have uid/gid set, then these need to be reset to the recommended value as well
+ if (serviceConfigProperty.get('ugid')) {
+ serviceConfigProperty.set('ugid.value', serviceConfigProperty.get('ugid.recommendedValue'));
+ }
serviceConfigProperty = null;
},
http://git-wip-us.apache.org/repos/asf/ambari/blob/f92d1219/ambari-web/app/views/common/controls_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/controls_view.js b/ambari-web/app/views/common/controls_view.js
index edeaf0a..4e926ba 100644
--- a/ambari-web/app/views/common/controls_view.js
+++ b/ambari-web/app/views/common/controls_view.js
@@ -247,6 +247,40 @@ App.ServiceConfigTextField = Ember.TextField.extend(App.ServiceConfigPopoverSupp
});
/**
+ * Customized input control for user/group configs with corresponding uid/gid specified
+ * @type {Em.View}
+ */
+App.ServiceConfigTextFieldUserGroupWithID = Ember.View.extend(App.ServiceConfigPopoverSupport, {
+ valueBinding: 'serviceConfig.value',
+ placeholderBinding: 'serviceConfig.savedValue',
+ classNames: 'display-inline-block',
+
+ templateName: require('templates/wizard/controls_service_config_usergroup_with_id'),
+
+ isUIDGIDVisible: function () {
+ var overrideUidDisabled = this.get('parentView').serviceConfigs.findProperty('name', 'override_uid').value === 'false';
+ //don't display the ugid field if there is no uid/gid for this property or override_uid is unchecked
+ if (Em.isNone(this.get('serviceConfig.ugid')) || overrideUidDisabled) {
+ return false;
+ }
+
+ var serviceName = this.get('serviceConfig').name.substr(0, this.get('serviceConfig').name.indexOf('_')).toUpperCase();
+ if (serviceName === 'ZK') {
+ serviceName = 'ZOOKEEPER';
+ }
+ if (serviceName === 'MAPRED') {
+ serviceName = 'YARN';
+ }
+ //addServiceController and service already installed or Hadoop user group
+ if (App.Service.find(serviceName).get('isLoaded') || serviceName === 'USER') {
+ return false;
+ }
+
+ return this.get('parentView.isUIDGIDVisible');
+ }.property('parentView.isUIDGIDVisible')
+});
+
+/**
* Customized input control with Units type specified
* @type {Em.View}
*/
@@ -415,6 +449,11 @@ var checkboxConfigView = Ember.Checkbox.extend(App.ServiceConfigPopoverSupport,
this.set('serviceConfig.value', this.get(this.get('checked') + 'Value'));
this.get('serviceConfig').set("editDone", true);
this.sendRequestRorDependentConfigs(this.get('serviceConfig'));
+
+ //if the checkbox being toggled is the 'Have Ambari manage UIDs' in Misc Tab, show/hide uid/gid column accordingly
+ if (this.get('serviceConfig.name') === 'override_uid') {
+ this.set('parentView.isUIDGIDVisible', this.get('checked'));
+ }
}
}.observes('checked'),
[15/21] ambari git commit: AMBARI-21386. After install packages,
upgrade button does not work (alexantonenko)
Posted by rl...@apache.org.
AMBARI-21386. After install packages, upgrade button does not work (alexantonenko)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/01d60f4f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/01d60f4f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/01d60f4f
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 01d60f4f8b8c32238ba840e109e7b7e8b9de1774
Parents: ba2a29f
Author: Alex Antonenko <hi...@gmail.com>
Authored: Mon Jul 10 15:21:14 2017 +0300
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Mon Jul 17 18:24:07 2017 +0300
----------------------------------------------------------------------
ambari-web/app/utils/ajax/ajax.js | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/01d60f4f/ambari-web/app/utils/ajax/ajax.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/ajax/ajax.js b/ambari-web/app/utils/ajax/ajax.js
index d6e6dfa..f77b386 100644
--- a/ambari-web/app/utils/ajax/ajax.js
+++ b/ambari-web/app/utils/ajax/ajax.js
@@ -2287,7 +2287,7 @@ var urls = {
mock: '/data/users/privileges_{userName}.json'
},
'router.login.clusters': {
- 'real': '/clusters?fields=Clusters/provisioning_state,Clusters/security_type',
+ 'real': '/clusters?fields=Clusters/provisioning_state,Clusters/security_type,Clusters/version',
'mock': '/data/clusters/info.json'
},
'router.login.message': {
[11/21] ambari git commit: AMBARI-21442. Ambari updates memory
settings in blueprint incorrectly (amagyar)
Posted by rl...@apache.org.
AMBARI-21442. Ambari updates memory settings in blueprint incorrectly (amagyar)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/93fe8487
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/93fe8487
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/93fe8487
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 93fe8487a16fd2fe4f0c6a19bdc5d8a7c7b304a7
Parents: f92d121
Author: Attila Magyar <am...@hortonworks.com>
Authored: Mon Jul 17 10:19:38 2017 +0200
Committer: Attila Magyar <am...@hortonworks.com>
Committed: Mon Jul 17 10:19:38 2017 +0200
----------------------------------------------------------------------
.../BlueprintConfigurationProcessor.java | 64 +++-----
.../server/controller/internal/Stack.java | 2 +-
.../server/controller/internal/UnitUpdater.java | 150 +++++++++++++++++++
.../validators/TopologyValidatorFactory.java | 2 +-
.../validators/UnitValidatedProperty.java | 95 ++++++++++++
.../topology/validators/UnitValidator.java | 79 ++++++++++
.../controller/internal/UnitUpdaterTest.java | 114 ++++++++++++++
.../topology/validators/UnitValidatorTest.java | 114 ++++++++++++++
8 files changed, 571 insertions(+), 49 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/93fe8487/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index 37284be..1daf76f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -19,6 +19,8 @@
package org.apache.ambari.server.controller.internal;
+import static java.util.stream.Collectors.groupingBy;
+
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
@@ -48,6 +50,7 @@ import org.apache.ambari.server.topology.ConfigRecommendationStrategy;
import org.apache.ambari.server.topology.Configuration;
import org.apache.ambari.server.topology.HostGroup;
import org.apache.ambari.server.topology.HostGroupInfo;
+import org.apache.ambari.server.topology.validators.UnitValidatedProperty;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -2041,39 +2044,6 @@ public class BlueprintConfigurationProcessor {
}
/**
- * Updater which appends "m" to the original property value.
- * For example, "1024" would be updated to "1024m".
- */
- private static class MPropertyUpdater implements PropertyUpdater {
- /**
- * Append 'm' to the original property value if it doesn't already exist.
- *
- * @param propertyName property name
- * @param origValue original value of property
- * @param properties all properties
- * @param topology cluster topology
- *
- * @return property with 'm' appended
- */
- @Override
- public String updateForClusterCreate(String propertyName,
- String origValue,
- Map<String, Map<String, String>> properties,
- ClusterTopology topology) {
-
- return origValue.endsWith("m") ? origValue : origValue + 'm';
- }
-
- @Override
- public Collection<String> getRequiredHostGroups(String propertyName,
- String origValue,
- Map<String, Map<String, String>> properties,
- ClusterTopology topology) {
- return Collections.emptySet();
- }
- }
-
- /**
* Class to facilitate special formatting needs of property values.
*/
private abstract static class AbstractPropertyValueDecorator implements PropertyUpdater {
@@ -2784,20 +2754,7 @@ public class BlueprintConfigurationProcessor {
new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));
// Required due to AMBARI-4933. These no longer seem to be required as the default values in the stack
// are now correct but are left here in case an existing blueprint still contains an old value.
- mHadoopEnvMap.put("namenode_heapsize", new MPropertyUpdater());
- mHadoopEnvMap.put("namenode_opt_newsize", new MPropertyUpdater());
- mHadoopEnvMap.put("namenode_opt_maxnewsize", new MPropertyUpdater());
- mHadoopEnvMap.put("namenode_opt_permsize", new MPropertyUpdater());
- mHadoopEnvMap.put("namenode_opt_maxpermsize", new MPropertyUpdater());
- mHadoopEnvMap.put("dtnode_heapsize", new MPropertyUpdater());
- mapredEnvMap.put("jtnode_opt_newsize", new MPropertyUpdater());
- mapredEnvMap.put("jtnode_opt_maxnewsize", new MPropertyUpdater());
- mapredEnvMap.put("jtnode_heapsize", new MPropertyUpdater());
- hbaseEnvMap.put("hbase_master_heapsize", new MPropertyUpdater());
- hbaseEnvMap.put("hbase_regionserver_heapsize", new MPropertyUpdater());
- oozieEnvHeapSizeMap.put("oozie_heapsize", new MPropertyUpdater());
- oozieEnvHeapSizeMap.put("oozie_permsize", new MPropertyUpdater());
- zookeeperEnvMap.put("zk_server_heapsize", new MPropertyUpdater());
+ addUnitPropertyUpdaters();
hawqSiteMap.put("hawq_master_address_host", new SingleHostTopologyUpdater("HAWQMASTER"));
hawqSiteMap.put("hawq_standby_address_host", new SingleHostTopologyUpdater("HAWQSTANDBY"));
@@ -2816,6 +2773,19 @@ public class BlueprintConfigurationProcessor {
});
}
+ private static void addUnitPropertyUpdaters() {
+ Map<String, List<UnitValidatedProperty>> propsPerConfigType = UnitValidatedProperty.ALL
+ .stream()
+ .collect(groupingBy(UnitValidatedProperty::getConfigType));
+ for (String configType : propsPerConfigType.keySet()) {
+ Map<String, PropertyUpdater> unitUpdaters = new HashMap<>();
+ for (UnitValidatedProperty each : propsPerConfigType.get(configType)) {
+ unitUpdaters.put(each.getPropertyName(), new UnitUpdater(each.getServiceName(), each.getConfigType()));
+ }
+ mPropertyUpdaters.put(configType, unitUpdaters);
+ }
+ }
+
private Collection<String> setupHDFSProxyUsers(Configuration configuration, Set<String> configTypesUpdated) {
// AMBARI-5206
final Map<String , String> userProps = new HashMap<>();
http://git-wip-us.apache.org/repos/asf/ambari/blob/93fe8487/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/Stack.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/Stack.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/Stack.java
index e1ea1cd..a28a3b5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/Stack.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/Stack.java
@@ -757,7 +757,7 @@ public class Stack {
private Set<PropertyDependencyInfo> dependsOnProperties =
Collections.emptySet();
- ConfigProperty(StackConfigurationResponse config) {
+ public ConfigProperty(StackConfigurationResponse config) {
this.name = config.getPropertyName();
this.value = config.getPropertyValue();
this.attributes = config.getPropertyAttributes();
http://git-wip-us.apache.org/repos/asf/ambari/blob/93fe8487/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UnitUpdater.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UnitUpdater.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UnitUpdater.java
new file mode 100644
index 0000000..8b7cb67
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UnitUpdater.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller.internal;
+
+import static org.apache.commons.lang.StringUtils.isBlank;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Optional;
+
+import org.apache.ambari.server.topology.ClusterTopology;
+import org.apache.ambari.server.topology.validators.UnitValidatedProperty;
+
+/**
+ * I append the stack defined unit to the original property value.
+ * For example, "1024" would be updated to "1024m" if the stack unit is MB
+ * Properties with any other unit than the stack defined unit are rejected.
+ */
+public class UnitUpdater implements BlueprintConfigurationProcessor.PropertyUpdater {
+ private final String serviceName;
+ private final String configType;
+
+ public UnitUpdater(String serviceName, String configType) {
+ this.serviceName = serviceName;
+ this.configType = configType;
+ }
+
+ /**
+ * @return property value with updated unit
+ */
+ @Override
+ public String updateForClusterCreate(String propertyName,
+ String origValue,
+ Map<String, Map<String, String>> properties,
+ ClusterTopology topology) {
+ PropertyUnit stackUnit = PropertyUnit.of(topology.getBlueprint().getStack(), serviceName, configType, propertyName);
+ PropertyValue value = PropertyValue.of(propertyName, origValue);
+ if (value.hasUnit(stackUnit)) {
+ return value.toString();
+ } else if (!value.hasAnyUnit()) {
+ return value.withUnit(stackUnit);
+ } else { // should not happen because of prevalidation in UnitValidator
+ throw new IllegalArgumentException("Property " + propertyName + "=" + origValue + " has an unsupported unit. Stack supported unit is: " + stackUnit + " or no unit");
+ }
+ }
+
+ @Override
+ public Collection<String> getRequiredHostGroups(String propertyName, String origValue, Map<String, Map<String, String>> properties, ClusterTopology topology) {
+ return Collections.emptySet();
+ }
+
+ public static class PropertyUnit {
+ private static final String DEFAULT_UNIT = "m";
+ private final String unit;
+
+ public static PropertyUnit of(Stack stack, UnitValidatedProperty property) {
+ return PropertyUnit.of(stack, property.getServiceName(), property.getConfigType(), property.getPropertyName());
+ }
+
+ public static PropertyUnit of(Stack stack, String serviceName, String configType, String propertyName) {
+ return new PropertyUnit(
+ stackUnit(stack, serviceName, configType, propertyName)
+ .map(PropertyUnit::toJvmUnit)
+ .orElse(DEFAULT_UNIT));
+ }
+
+ private static Optional<String> stackUnit(Stack stack, String serviceName, String configType, String propertyName) {
+ try {
+ return Optional.ofNullable(
+ stack.getConfigurationPropertiesWithMetadata(serviceName, configType)
+ .get(propertyName)
+ .getPropertyValueAttributes()
+ .getUnit());
+ } catch (NullPointerException e) {
+ return Optional.empty();
+ }
+ }
+
+ private static String toJvmUnit(String stackUnit) {
+ switch (stackUnit.toLowerCase()) {
+ case "mb" : return "m";
+ case "gb" : return "g";
+ case "b" :
+ case "bytes" : return "";
+ default: throw new IllegalArgumentException("Unsupported stack unit: " + stackUnit);
+ }
+ }
+
+ private PropertyUnit(String unit) {
+ this.unit = unit;
+ }
+
+ @Override
+ public String toString() {
+ return unit;
+ }
+ }
+
+ public static class PropertyValue {
+ private final String value;
+
+ public static PropertyValue of(String name, String value) {
+ return new PropertyValue(normalized(name, value));
+ }
+
+ private static String normalized(String name, String value) {
+ if (isBlank(value)) {
+ throw new IllegalArgumentException("Missing property value " + name);
+ }
+ return value.trim().toLowerCase();
+ }
+
+ private PropertyValue(String value) {
+ this.value = value;
+ }
+
+ public boolean hasUnit(PropertyUnit unit) {
+ return value.endsWith(unit.toString());
+ }
+
+ public boolean hasAnyUnit() {
+ return !Character.isDigit(value.charAt(value.length() -1));
+ }
+
+ public String withUnit(PropertyUnit unit) {
+ return value + unit;
+ }
+
+ @Override
+ public String toString() {
+ return value;
+ }
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/93fe8487/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidatorFactory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidatorFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidatorFactory.java
index 5a6f64e..bc76bff 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidatorFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/TopologyValidatorFactory.java
@@ -25,7 +25,7 @@ public class TopologyValidatorFactory {
public TopologyValidatorFactory() {
validators = ImmutableList.of(new RequiredConfigPropertiesValidator(), new RequiredPasswordValidator(), new HiveServiceValidator(),
- new StackConfigTypeValidator());
+ new StackConfigTypeValidator(), new UnitValidator(UnitValidatedProperty.ALL));
}
public TopologyValidator createConfigurationValidatorChain() {
http://git-wip-us.apache.org/repos/asf/ambari/blob/93fe8487/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/UnitValidatedProperty.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/UnitValidatedProperty.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/UnitValidatedProperty.java
new file mode 100644
index 0000000..61f01db
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/UnitValidatedProperty.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.topology.validators;
+
+import java.util.Set;
+
+import org.apache.commons.lang.builder.EqualsBuilder;
+import org.apache.commons.lang.builder.HashCodeBuilder;
+
+import com.google.common.collect.ImmutableSet;
+
+/**
+ * Some configuration values need to have "m" appended to them to be valid values.
+ * Required due to AMBARI-4933.
+ */
+public class UnitValidatedProperty {
+ public static final Set<UnitValidatedProperty> ALL = ImmutableSet.<UnitValidatedProperty>builder()
+ .add(new UnitValidatedProperty("HDFS", "hadoop-env", "namenode_heapsize"))
+ .add(new UnitValidatedProperty("HDFS", "hadoop-env", "namenode_opt_newsize"))
+ .add(new UnitValidatedProperty("HDFS", "hadoop-env", "namenode_opt_maxnewsize"))
+ .add(new UnitValidatedProperty("HDFS", "hadoop-env", "namenode_opt_permsize"))
+ .add(new UnitValidatedProperty("HDFS", "hadoop-env", "namenode_opt_maxpermsize"))
+ .add(new UnitValidatedProperty("HDFS", "hadoop-env", "dtnode_heapsize"))
+ .add(new UnitValidatedProperty("MAPREDUCE2", "mapred-env","jtnode_opt_newsize"))
+ .add(new UnitValidatedProperty("MAPREDUCE2", "mapred-env","jtnode_opt_maxnewsize"))
+ .add(new UnitValidatedProperty("MAPREDUCE2", "mapred-env","jtnode_heapsize"))
+ .add(new UnitValidatedProperty("HBASE", "hbase-env", "hbase_master_heapsize"))
+ .add(new UnitValidatedProperty("HBASE", "hbase-env","hbase_regionserver_heapsize"))
+ .add(new UnitValidatedProperty("OOZIE", "oozie-env","oozie_heapsize"))
+ .add(new UnitValidatedProperty("OOZIE", "oozie-env", "oozie_permsize"))
+ .add(new UnitValidatedProperty("ZOOKEEPER", "zookeeper-env", "zk_server_heapsize"))
+ .build();
+
+ private final String configType;
+ private final String serviceName;
+ private final String propertyName;
+
+ public UnitValidatedProperty(String serviceName, String configType, String propertyName) {
+ this.configType = configType;
+ this.serviceName = serviceName;
+ this.propertyName = propertyName;
+ }
+
+ public boolean hasTypeAndName(String configType, String propertyName) {
+ return configType.equals(this.getConfigType()) && propertyName.equals(this.getPropertyName());
+ }
+
+ public String getConfigType() {
+ return configType;
+ }
+
+ public String getServiceName() {
+ return serviceName;
+ }
+
+ public String getPropertyName() {
+ return propertyName;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ UnitValidatedProperty that = (UnitValidatedProperty) o;
+ return new EqualsBuilder()
+ .append(configType, that.configType)
+ .append(serviceName, that.serviceName)
+ .append(propertyName, that.propertyName)
+ .isEquals();
+ }
+
+ @Override
+ public int hashCode() {
+ return new HashCodeBuilder(17, 37)
+ .append(configType)
+ .append(serviceName)
+ .append(propertyName)
+ .toHashCode();
+ }
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/93fe8487/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/UnitValidator.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/UnitValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/UnitValidator.java
new file mode 100644
index 0000000..e75ffa4
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/UnitValidator.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.topology.validators;
+
+import static org.apache.ambari.server.controller.internal.UnitUpdater.PropertyValue;
+
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.ambari.server.controller.internal.Stack;
+import org.apache.ambari.server.controller.internal.UnitUpdater.PropertyUnit;
+import org.apache.ambari.server.topology.ClusterTopology;
+import org.apache.ambari.server.topology.HostGroupInfo;
+import org.apache.ambari.server.topology.InvalidTopologyException;
+import org.apache.ambari.server.topology.TopologyValidator;
+
+/**
+ * I validate the unit of properties by checking if it matches to the stack defined unit.
+ * Properties with different unit than the stack defined unit are rejected.
+ */
+public class UnitValidator implements TopologyValidator {
+ private final Set<UnitValidatedProperty> relevantProps;
+
+ public UnitValidator(Set<UnitValidatedProperty> propertiesToBeValidated) {
+ this.relevantProps = propertiesToBeValidated;
+ }
+
+ @Override
+ public void validate(ClusterTopology topology) throws InvalidTopologyException {
+ Stack stack = topology.getBlueprint().getStack();
+ validateConfig(topology.getConfiguration().getFullProperties(), stack);
+ for (HostGroupInfo hostGroup : topology.getHostGroupInfo().values()) {
+ validateConfig(hostGroup.getConfiguration().getFullProperties(), stack);
+ }
+ }
+
+ private void validateConfig(Map<String, Map<String, String>> configuration, Stack stack) {
+ for (Map.Entry<String, Map<String, String>> each : configuration.entrySet()) {
+ validateConfigType(each.getKey(), each.getValue(), stack);
+ }
+ }
+
+ private void validateConfigType(String configType, Map<String, String> config, Stack stack) {
+ for (String propertyName : config.keySet()) {
+ validateProperty(configType, config, propertyName, stack);
+ }
+ }
+
+ private void validateProperty(String configType, Map<String, String> config, String propertyName, Stack stack) {
+ relevantProps.stream()
+ .filter(each -> each.hasTypeAndName(configType, propertyName))
+ .findFirst()
+ .ifPresent(relevantProperty -> checkUnit(config, stack, relevantProperty));
+ }
+
+ private void checkUnit(Map<String, String> configToBeValidated, Stack stack, UnitValidatedProperty prop) {
+ PropertyUnit stackUnit = PropertyUnit.of(stack, prop);
+ PropertyValue value = PropertyValue.of(prop.getPropertyName(), configToBeValidated.get(prop.getPropertyName()));
+ if (value.hasAnyUnit() && !value.hasUnit(stackUnit)) {
+ throw new IllegalArgumentException("Property " + prop.getPropertyName() + "=" + value + " has an unsupported unit. Stack supported unit is: " + stackUnit + " or no unit");
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/93fe8487/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UnitUpdaterTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UnitUpdaterTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UnitUpdaterTest.java
new file mode 100644
index 0000000..6de6cd1
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UnitUpdaterTest.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller.internal;
+
+import static org.easymock.EasyMock.expect;
+import static org.junit.Assert.assertEquals;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.ambari.server.controller.StackConfigurationResponse;
+import org.apache.ambari.server.state.ValueAttributesInfo;
+import org.apache.ambari.server.topology.Blueprint;
+import org.apache.ambari.server.topology.ClusterTopology;
+import org.apache.ambari.server.topology.InvalidTopologyException;
+import org.easymock.EasyMockRule;
+import org.easymock.EasyMockSupport;
+import org.easymock.Mock;
+import org.junit.Rule;
+import org.junit.Test;
+
+public class UnitUpdaterTest extends EasyMockSupport {
+ public static final String HEAPSIZE = "oozie_heapsize";
+ @Rule public EasyMockRule mocks = new EasyMockRule(this);
+ public static final String OOZIE = "OOZIE";
+ public static final String OOZIE_ENV = "oozie-env";
+ private Map<String, Stack.ConfigProperty> stackConfigWithMetadata = new HashMap<>();
+ private UnitUpdater unitUpdater;
+ private @Mock ClusterTopology clusterTopology;
+ private @Mock Blueprint blueprint;
+ private @Mock Stack stack;
+
+ @Test
+ public void testStackUnitIsAppendedWhereUnitIsNotDefined() throws Exception {
+ stackUnitIs(HEAPSIZE, "GB");
+ assertEquals("1g", updateUnit(OOZIE, OOZIE_ENV, HEAPSIZE, "1"));
+ }
+
+ @Test
+ public void testDefaultMbStackUnitIsAppendedWhereUnitIsNotDefined() throws Exception {
+ assertEquals("4096m", updateUnit(OOZIE, OOZIE_ENV, HEAPSIZE, "4096"));
+ }
+
+ @Test
+ public void testNoUnitIsAppendedWhenPropertyAlreadyHasTheStackUnit() throws Exception {
+ stackUnitIs(HEAPSIZE, "MB");
+ assertEquals("128m", updateUnit(OOZIE, OOZIE_ENV, HEAPSIZE, "128m"));
+ }
+
+ @Test
+ public void testNoUnitIsAppendedIfStackUnitIsInBytes() throws Exception {
+ stackUnitIs(HEAPSIZE, "Bytes");
+ assertEquals("128", updateUnit(OOZIE, OOZIE_ENV, HEAPSIZE, "128"));
+ }
+
+ @Test
+ public void testUnitSuffixIsCaseInsenitiveAndWhiteSpaceTolerant() throws Exception {
+ stackUnitIs(HEAPSIZE, "GB");
+ assertEquals("1g", updateUnit(OOZIE, OOZIE_ENV, HEAPSIZE, " 1G "));
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testRejectValuesWhereStackUnitDoesNotMatchToGiveUnit() throws Exception {
+ stackUnitIs(HEAPSIZE, "MB");
+ updateUnit(OOZIE, OOZIE_ENV, HEAPSIZE, "2g");
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testRejectEmptyPropertyValue() throws Exception {
+ updateUnit(OOZIE, OOZIE_ENV, HEAPSIZE, "");
+ }
+
+ private void stackUnitIs(String name, String unit) {
+ ValueAttributesInfo propertyValueAttributes = new ValueAttributesInfo();
+ propertyValueAttributes.setUnit(unit);
+ stackConfigWithMetadata.put(name, new Stack.ConfigProperty(new StackConfigurationResponse(
+ name,
+ "any",
+ "any",
+ "any",
+ "any",
+ true,
+ Collections.emptySet(),
+ Collections.emptyMap(),
+ propertyValueAttributes,
+ Collections.emptySet()
+ )));
+ }
+
+ private String updateUnit(String serviceName, String configType, String propName, String propValue) throws InvalidTopologyException, ConfigurationTopologyException {
+ UnitUpdater updater = new UnitUpdater(serviceName, configType);
+ expect(clusterTopology.getBlueprint()).andReturn(blueprint).anyTimes();
+ expect(blueprint.getStack()).andReturn(stack).anyTimes();
+ expect(stack.getConfigurationPropertiesWithMetadata(serviceName, configType)).andReturn(stackConfigWithMetadata).anyTimes();
+ replayAll();
+ return updater.updateForClusterCreate(propName, propValue, Collections.emptyMap(), clusterTopology);
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/93fe8487/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/UnitValidatorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/UnitValidatorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/UnitValidatorTest.java
new file mode 100644
index 0000000..334ee4b
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/UnitValidatorTest.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.topology.validators;
+
+import static com.google.common.collect.Sets.newHashSet;
+import static java.util.Collections.emptyMap;
+import static org.easymock.EasyMock.expect;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.ambari.server.controller.StackConfigurationResponse;
+import org.apache.ambari.server.controller.internal.ConfigurationTopologyException;
+import org.apache.ambari.server.controller.internal.Stack;
+import org.apache.ambari.server.state.ValueAttributesInfo;
+import org.apache.ambari.server.topology.Blueprint;
+import org.apache.ambari.server.topology.ClusterTopology;
+import org.apache.ambari.server.topology.Configuration;
+import org.apache.ambari.server.topology.InvalidTopologyException;
+import org.easymock.EasyMockRule;
+import org.easymock.EasyMockSupport;
+import org.easymock.Mock;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+
+
+public class UnitValidatorTest extends EasyMockSupport {
+ private static final String CONFIG_TYPE = "config-type";
+ private static final String SERVICE = "service";
+ @Rule public EasyMockRule mocks = new EasyMockRule(this);
+ private Map<String, Stack.ConfigProperty> stackConfigWithMetadata = new HashMap<>();
+ private UnitValidator validator;
+ private @Mock ClusterTopology clusterTopology;
+ private @Mock Blueprint blueprint;
+ private @Mock Stack stack;
+
+ @Test(expected = IllegalArgumentException.class)
+ public void rejectsPropertyWithDifferentUnitThanStackUnit() throws Exception {
+ stackUnitIs("property1", "MB");
+ propertyToBeValidatedIs("property1", "12G");
+ validate("property1");
+ }
+
+ @Test
+ public void acceptsPropertyWithSameUnitThanStackUnit() throws Exception {
+ stackUnitIs("property1", "MB");
+ propertyToBeValidatedIs("property1", "12m");
+ validate("property1");
+ }
+
+ @Test
+ public void skipsValidatingIrrelevantProperty() throws Exception {
+ stackUnitIs("property1", "MB");
+ propertyToBeValidatedIs("property1", "12g");
+ validate("property2");
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ expect(clusterTopology.getBlueprint()).andReturn(blueprint).anyTimes();
+ expect(clusterTopology.getHostGroupInfo()).andReturn(Collections.emptyMap()).anyTimes();
+ expect(blueprint.getStack()).andReturn(stack).anyTimes();
+ expect(stack.getConfigurationPropertiesWithMetadata(SERVICE, CONFIG_TYPE)).andReturn(stackConfigWithMetadata).anyTimes();
+ }
+
+ private void propertyToBeValidatedIs(String propertyName, String propertyValue) throws InvalidTopologyException, ConfigurationTopologyException {
+ Map<String, Map<String, String>> propertiesToBeValidated = new HashMap<String, Map<String, String>>() {{
+ put(CONFIG_TYPE, new HashMap<String, String>(){{
+ put(propertyName, propertyValue);
+ }});
+ }};
+ expect(clusterTopology.getConfiguration()).andReturn(new Configuration(propertiesToBeValidated, emptyMap())).anyTimes();
+ replayAll();
+ }
+
+ private void validate(String propertyName) throws InvalidTopologyException {
+ validator = new UnitValidator(newHashSet(new UnitValidatedProperty(SERVICE, CONFIG_TYPE, propertyName)));
+ validator.validate(clusterTopology);
+ }
+
+ private void stackUnitIs(String name, String unit) {
+ ValueAttributesInfo propertyValueAttributes = new ValueAttributesInfo();
+ propertyValueAttributes.setUnit(unit);
+ stackConfigWithMetadata.put(name, new Stack.ConfigProperty(new StackConfigurationResponse(
+ name,
+ "any",
+ "any",
+ "any",
+ "any",
+ true,
+ Collections.emptySet(),
+ Collections.emptyMap(),
+ propertyValueAttributes,
+ Collections.emptySet()
+ )));
+ }
+}
\ No newline at end of file
[06/21] ambari git commit: AMBARI-21460. Add new kafka client
properties to the ambari managed atlas config (smohanty)
Posted by rl...@apache.org.
AMBARI-21460. Add new kafka client properties to the ambari managed atlas config (smohanty)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c7f42285
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c7f42285
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c7f42285
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: c7f42285a2bd36a215b6c8988cfd7fd025461285
Parents: 0a8c397
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Fri Jul 14 15:42:52 2017 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Fri Jul 14 15:44:25 2017 -0700
----------------------------------------------------------------------
.../stacks/HDP/2.5/upgrades/config-upgrade.xml | 4 ++++
.../HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml | 6 +++++-
.../stacks/HDP/2.5/upgrades/upgrade-2.6.xml | 1 +
.../ATLAS/configuration/application-properties.xml | 17 +++++++++++++++++
.../stacks/HDP/2.6/upgrades/config-upgrade.xml | 4 ++++
.../HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml | 4 ++++
.../stacks/HDP/2.6/upgrades/upgrade-2.6.xml | 1 +
7 files changed, 36 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/c7f42285/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
index 30796cc..db3ef59 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
@@ -153,6 +153,10 @@
<type>atlas-env</type>
<replace key="content" find="-Xloggc:$ATLAS_LOG_DIRgc-worker.log" replace-with="-Xloggc:$ATLAS_LOG_DIR/gc-worker.log"/>
</definition>
+ <definition xsi:type="configure" id="hdp_2_6_atlas_kafka_auto_commit_enable_property_delete" summary="Updating Atlas Kafka configurations.">
+ <type>application-properties</type>
+ <transfer operation="delete" delete-key="atlas.kafka.auto.commit.enable"/>
+ </definition>
</changes>
</component>
</service>
http://git-wip-us.apache.org/repos/asf/ambari/blob/c7f42285/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
index 1f37389..cfd429f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
@@ -456,7 +456,7 @@
<summary>Updating the Atlas Log4J properties to include parameterizations</summary>
</task>
</execute-stage>
-
+
<execute-stage service="ATLAS" component="ATLAS_SERVER" title="Updating Atlas TLS Exclude Protocols">
<task xsi:type="configure" id="hdp_2_5_4_0_atlas_exclude_tls_protocol">
<summary>Updating Atlas TLS Exclude Protocols to exclude TLS v1.2</summary>
@@ -475,6 +475,10 @@
</task>
</execute-stage>
+ <execute-stage service="ATLAS" component="ATLAS_SERVER" title="Updating Atlas Kafka configurations.">
+ <task xsi:type="configure" id="hdp_2_6_atlas_kafka_auto_commit_enable_property_delete"/>
+ </execute-stage>
+
<!--KAFKA-->
<execute-stage service="KAFKA" component="KAFKA_BROKER" title="Parameterizing Kafka Log4J Properties">
<task xsi:type="configure" id="kafka_log4j_parameterize">
http://git-wip-us.apache.org/repos/asf/ambari/blob/c7f42285/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
index 22c9a8d..840b17d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
@@ -1101,6 +1101,7 @@
<task xsi:type="configure" id="hdp_2_5_4_0_atlas_exclude_tls_protocol"/>
<task xsi:type="configure" id="increase_atlas_zookeeper_timeouts"/>
<task xsi:type="configure" id="atlas_env_gc_worker"/>
+ <task xsi:type="configure" id="hdp_2_6_atlas_kafka_auto_commit_enable_property_delete"/>
</pre-upgrade>
<pre-downgrade />
<upgrade>
http://git-wip-us.apache.org/repos/asf/ambari/blob/c7f42285/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/application-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/application-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/application-properties.xml
index 91de1b0..c271dc3 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/application-properties.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/ATLAS/configuration/application-properties.xml
@@ -92,4 +92,21 @@
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
+ <property>
+ <name>atlas.kafka.session.timeout.ms</name>
+ <value>30000</value>
+ <description>New Kafka consumer API</description>
+ <on-ambari-upgrade add="false"/>
+ </property>
+ <property>
+ <name>atlas.kafka.enable.auto.commit</name>
+ <value>false</value>
+ <description>New Kafka consumer API</description>
+ <on-ambari-upgrade add="false"/>
+ </property>
+ <property>
+ <name>atlas.kafka.auto.commit.enable</name>
+ <deleted>true</deleted>
+ <on-ambari-upgrade add="false"/>
+ </property>
</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/c7f42285/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
index 6dd2129..c2c1532 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
@@ -171,6 +171,10 @@
<type>atlas-env</type>
<replace key="content" find="-Xloggc:$ATLAS_LOG_DIRgc-worker.log" replace-with="-Xloggc:$ATLAS_LOG_DIR/gc-worker.log"/>
</definition>
+ <definition xsi:type="configure" id="hdp_2_6_atlas_kafka_auto_commit_enable_property_delete" summary="Updating Atlas Kafka configurations.">
+ <type>application-properties</type>
+ <transfer operation="delete" delete-key="atlas.kafka.auto.commit.enable"/>
+ </definition>
</changes>
</component>
</service>
http://git-wip-us.apache.org/repos/asf/ambari/blob/c7f42285/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
index e262971..df609cd 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
@@ -358,6 +358,10 @@
<task xsi:type="configure" id="atlas_env_gc_worker"/>
</execute-stage>
+ <execute-stage service="ATLAS" component="ATLAS_SERVER" title="Updating Atlas Kafka configurations.">
+ <task xsi:type="configure" id="hdp_2_6_atlas_kafka_auto_commit_enable_property_delete"/>
+ </execute-stage>
+
<!-- KMS -->
<execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Apply config changes for Ranger Kms plugin">
<task xsi:type="configure" id="hdp_2_6_maint_ranger_kms_plugin_cluster_name"/>
http://git-wip-us.apache.org/repos/asf/ambari/blob/c7f42285/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
index 6b01ce9..b376fa7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
@@ -1034,6 +1034,7 @@
<pre-upgrade>
<task xsi:type="configure" id="hdp_2_6_maint_ranger_atlas_plugin_cluster_name"/>
<task xsi:type="configure" id="atlas_env_gc_worker"/>
+ <task xsi:type="configure" id="hdp_2_6_atlas_kafka_auto_commit_enable_property_delete"/>
</pre-upgrade>
<pre-downgrade/> <!-- no-op to prevent config changes on downgrade -->
<upgrade>
[08/21] ambari git commit: AMBARI-21459. Add lucene index migration
script to infra solr client package (oleewere)
Posted by rl...@apache.org.
AMBARI-21459. Add lucene index migration script to infra solr client package (oleewere)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f072dd21
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f072dd21
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f072dd21
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: f072dd2156e83d8a487ce0c3229c1ae22788c6be
Parents: e799f52
Author: oleewere <ol...@gmail.com>
Authored: Wed Jul 12 21:04:54 2017 +0200
Committer: oleewere <ol...@gmail.com>
Committed: Sat Jul 15 19:04:53 2017 +0200
----------------------------------------------------------------------
ambari-infra/ambari-infra-solr-client/build.xml | 1 +
ambari-infra/ambari-infra-solr-client/pom.xml | 10 ++
.../src/main/resources/solrIndexHelper.sh | 156 +++++++++++++++++++
3 files changed, 167 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/f072dd21/ambari-infra/ambari-infra-solr-client/build.xml
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-solr-client/build.xml b/ambari-infra/ambari-infra-solr-client/build.xml
index a54e336..9b8b6cc 100644
--- a/ambari-infra/ambari-infra-solr-client/build.xml
+++ b/ambari-infra/ambari-infra-solr-client/build.xml
@@ -35,6 +35,7 @@
</copy>
<copy todir="target/package" includeEmptyDirs="no">
<fileset file="src/main/resources/solrCloudCli.sh"/>
+ <fileset file="src/main/resources/solrIndexHelper.sh"/>
</copy>
<copy todir="target/package" includeEmptyDirs="no">
<fileset file="src/main/resources/log4j.properties"/>
http://git-wip-us.apache.org/repos/asf/ambari/blob/f072dd21/ambari-infra/ambari-infra-solr-client/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-solr-client/pom.xml b/ambari-infra/ambari-infra-solr-client/pom.xml
index d103003..3818aba 100644
--- a/ambari-infra/ambari-infra-solr-client/pom.xml
+++ b/ambari-infra/ambari-infra-solr-client/pom.xml
@@ -36,6 +36,16 @@
<version>${solr.version}</version>
</dependency>
<dependency>
+ <groupId>org.apache.lucene</groupId>
+ <artifactId>lucene-core</artifactId>
+ <version>${solr.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.lucene</groupId>
+ <artifactId>lucene-backward-codecs</artifactId>
+ <version>${solr.version}</version>
+ </dependency>
+ <dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
<version>3.4.9</version>
http://git-wip-us.apache.org/repos/asf/ambari/blob/f072dd21/ambari-infra/ambari-infra-solr-client/src/main/resources/solrIndexHelper.sh
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-solr-client/src/main/resources/solrIndexHelper.sh b/ambari-infra/ambari-infra-solr-client/src/main/resources/solrIndexHelper.sh
new file mode 100755
index 0000000..12e6a77
--- /dev/null
+++ b/ambari-infra/ambari-infra-solr-client/src/main/resources/solrIndexHelper.sh
@@ -0,0 +1,156 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+JVM="java"
+sdir="`dirname \"$0\"`"
+: ${JAVA_HOME:?"Please set the JAVA_HOME for lucene index migration!"}
+
+function print_help() {
+ cat << EOF
+
+ Usage: solrIndexHelper.sh [<command>] [<arguments with flags>]
+
+ commands:
+ upgrade-index Check and upgrade solr index data in core directories.
+ run-check-index-tool call 'java -cp ... org.apache.lucene.index.IndexUpgrader' directly
+ run-upgrade-index-tool call 'java -cp ... org.apache.lucene.index.CheckIndex' directly
+ help print usage
+
+
+ upgrade-index command arguments:
+ -d, --index-data-dir <DIRECTORY> Location of the solr cores (e.g.: /opt/ambari_infra_solr/data)
+ -c, --core-filter <FILTER1,FILTER2> Comma separated name filters of core directoies (default: hadoop_logs,audit_logs,history)
+ -f, --force Force to start index upgrade, even is the version is at least 6.
+
+EOF
+}
+
+function upgrade_core() {
+ local INDEX_DIR=${1:?"usage: <index_base_dir> e.g.: /opt/ambari_infra_solr/data"}
+ local FORCE_UPDATE=${2:?"usage <force_update_flag> e.g.: true"}
+ local SOLR_CORE_FILTERS=${3:?"usage: <comma separated core filters> e.g.: hadoop_logs,audit_logs,history"}
+
+ SOLR_CORE_FILTER_ARR=$(echo $SOLR_CORE_FILTERS | sed "s/,/ /g")
+
+ for coll in $SOLR_CORE_FILTER_ARR; do
+ if [[ "$1" == *"$coll"* ]]; then
+ echo "Core '$1' dir name contains $coll (core filter)'";
+ version=$(PATH=$JAVA_HOME/bin:$PATH $JVM -classpath "$sdir/libs/lucene-core-6.6.0.jar:$sdir/libs/lucene-backward-codecs-6.6.0.jar" org.apache.lucene.index.CheckIndex -fast $1|grep " version="|sed -e 's/.*=//g'|head -1)
+ if [ -z $version ] ; then
+ echo "Core '$1' - Empty index?"
+ return
+ fi
+ majorVersion=$(echo $version|cut -c 1)
+ if [ $majorVersion -ge 6 ] && [ $FORCE_UPDATE == "false" ] ; then
+ echo "Core '$1' - Already on version $version, not upgrading. Use -f or --force option to run upgrade anyway."
+ else
+ echo "Core '$1' - Index version is $version, upgrading ..."
+ PATH=$JAVA_HOME/bin:$PATH $JVM -classpath "$sdir/libs/lucene-core-6.6.0.jar:$sdir/libs/lucene-backward-codecs-6.6.0.jar" org.apache.lucene.index.IndexUpgrader -delete-prior-commits $1
+ echo "Upgrading core '$1' has finished"
+ fi
+ fi
+ done
+}
+
+function upgrade_index() {
+ while [[ $# -gt 0 ]]
+ do
+ key="$1"
+ case $key in
+ -c|--core-filters)
+ local SOLR_CORE_FILTERS="$2"
+ shift 2
+ ;;
+ -f|--force)
+ local FORCE_UPDATE="true"
+ shift
+ ;;
+ -d|--index-data-dir)
+ local INDEX_DIR="$2"
+ shift 2
+ ;;
+ *)
+ echo "Unknown option: $1"
+ exit 1
+ ;;
+ esac
+ done
+ if [[ -z "$INDEX_DIR" ]] ; then
+ echo "Index data dirctory option is required (-d or --index-data-dir). Exiting..."
+ exit 1
+ fi
+
+ if [[ -z "$SOLR_CORE_FILTERS" ]] ; then
+ SOLR_CORE_FILTERS="hadoop_logs,audit_logs,history"
+ fi
+
+ if [[ -z "$FORCE_UPDATE" ]] ; then
+ FORCE_UPDATE="false"
+ else
+ echo "NOTE: Forcing index upgrade is set."
+ fi
+
+ CORES=$(for replica_dir in `find $INDEX_DIR -name data`; do dirname $replica_dir; done);
+ if [[ -z "$CORES" ]] ; then
+ echo "No indices found on path $INDEX_DIR"
+ else
+ for c in $CORES ; do
+ if find $c/data -maxdepth 1 -type d -name 'index*' 1> /dev/null 2>&1; then
+ name=$(echo $c | sed -e 's/.*\///g')
+ abspath=$(cd "$(dirname "$c")"; pwd)/$(basename "$c")
+ find $c/data -maxdepth 1 -type d -name 'index*' | while read indexDir; do
+ echo "Checking core $name - $abspath"
+ upgrade_core "$indexDir" "$FORCE_UPDATE" "$SOLR_CORE_FILTERS"
+ done
+ else
+ echo "No index folder found for $name"
+ fi
+ done
+ echo "DONE"
+ fi
+}
+
+function upgrade_index_tool() {
+ # see: https://cwiki.apache.org/confluence/display/solr/IndexUpgrader+Tool
+ PATH=$JAVA_HOME/bin:$PATH $JVM -classpath "$sdir/libs/lucene-core-6.6.0.jar:$sdir/libs/lucene-backward-codecs-6.6.0.jar" org.apache.lucene.index.IndexUpgrader ${@}
+}
+
+function check_index_tool() {
+ PATH=$JAVA_HOME/bin:$PATH $JVM -classpath "$sdir/libs/lucene-core-6.6.0.jar:$sdir/libs/lucene-backward-codecs-6.6.0.jar" org.apache.lucene.index.CheckIndex ${@}
+}
+
+function main() {
+ command="$1"
+ case $command in
+ "upgrade-index")
+ upgrade_index "${@:2}"
+ ;;
+ "run-check-index-tool")
+ check_index_tool "${@:2}"
+ ;;
+ "run-upgrade-index-tool")
+ upgrade_index_tool "${@:2}"
+ ;;
+ "help")
+ print_help
+ ;;
+ *)
+ echo "Available commands: (upgrade-index | run-check-index-tool | run-upgrade-index-tool | help)"
+ ;;
+ esac
+}
+
+main ${1+"$@"}
[02/21] ambari git commit: AMBARI-19038. Support migration of LDAP
users & groups to PAM (rlevas)
Posted by rl...@apache.org.
AMBARI-19038. Support migration of LDAP users & groups to PAM (rlevas)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f7fac037
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f7fac037
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f7fac037
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: f7fac03778fda337bc96ed49ed7507e1af118b7d
Parents: f22256e
Author: Robert Levas <rl...@hortonworks.com>
Authored: Fri Jul 14 10:47:17 2017 -0400
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Fri Jul 14 10:47:17 2017 -0400
----------------------------------------------------------------------
.../controllers/groups/GroupsEditCtrl.js | 3 +
ambari-server/pom.xml | 2 +-
ambari-server/sbin/ambari-server | 6 +-
.../LdapToPamMigrationHelper.java | 73 ++++++++++++
.../server/security/authorization/Users.java | 4 +
ambari-server/src/main/python/ambari-server.py | 14 ++-
.../main/python/ambari_server/setupActions.py | 1 +
.../main/python/ambari_server/setupSecurity.py | 119 ++++++++++++++++---
8 files changed, 198 insertions(+), 24 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/f7fac037/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/groups/GroupsEditCtrl.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/groups/GroupsEditCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/groups/GroupsEditCtrl.js
index 21d0fd6..a63ebe2 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/groups/GroupsEditCtrl.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/groups/GroupsEditCtrl.js
@@ -68,6 +68,7 @@ angular.module('ambariAdminConsole')
function loadMembers(){
$scope.group.getMembers().then(function(members) {
+ $scope.group.groupTypeName = $t(GroupConstants.TYPES[$scope.group.group_type].LABEL_KEY);
$scope.groupMembers = members;
$scope.group.editingUsers = angular.copy($scope.groupMembers);
});
@@ -81,6 +82,8 @@ angular.module('ambariAdminConsole')
loadMembers();
});
+ $scope.group.getGroupType();
+
$scope.deleteGroup = function(group) {
ConfirmationModal.show(
$t('common.delete', {
http://git-wip-us.apache.org/repos/asf/ambari/blob/f7fac037/ambari-server/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml
index 878665e..70907da 100644
--- a/ambari-server/pom.xml
+++ b/ambari-server/pom.xml
@@ -1684,7 +1684,7 @@
<dependency>
<groupId>net.java.dev.jna</groupId>
<artifactId>jna</artifactId>
- <version>4.1.0</version>
+ <version>4.3.0</version>
</dependency>
</dependencies>
http://git-wip-us.apache.org/repos/asf/ambari/blob/f7fac037/ambari-server/sbin/ambari-server
----------------------------------------------------------------------
diff --git a/ambari-server/sbin/ambari-server b/ambari-server/sbin/ambari-server
index 24ec43a..1c6c612 100755
--- a/ambari-server/sbin/ambari-server
+++ b/ambari-server/sbin/ambari-server
@@ -137,6 +137,10 @@ case "${1:-}" in
echo -e "Setting up PAM properties..."
$PYTHON "$AMBARI_PYTHON_EXECUTABLE" $@
;;
+ migrate-ldap-pam)
+ echo -e "Migration LDAP to PAM"
+ $PYTHON "$AMBARI_PYTHON_EXECUTABLE" $@
+ ;;
setup-ldap)
echo -e "Setting up LDAP properties..."
$PYTHON "$AMBARI_PYTHON_EXECUTABLE" $@
@@ -203,7 +207,7 @@ case "${1:-}" in
;;
*)
echo "Usage: $AMBARI_EXECUTABLE
- {start|stop|reset|restart|upgrade|status|upgradestack|setup|setup-jce|setup-ldap|sync-ldap|set-current|setup-security|refresh-stack-hash|backup|restore|update-host-names|check-database|enable-stack|setup-sso|db-purge-history|install-mpack|uninstall-mpack|upgrade-mpack|setup-kerberos} [options]
+ {start|stop|reset|restart|upgrade|status|upgradestack|setup|setup-jce|setup-ldap|sync-ldap|set-current|setup-security|refresh-stack-hash|backup|restore|update-host-names|check-database|enable-stack|setup-sso|db-purge-history|install-mpack|uninstall-mpack|upgrade-mpack|setup-kerberos|setup-pam|migrate-ldap-pam} [options]
Use $AMBARI_PYTHON_EXECUTABLE <action> --help to get details on options available.
Or, simply invoke ambari-server.py --help to print the options."
exit 1
http://git-wip-us.apache.org/repos/asf/ambari/blob/f7fac037/ambari-server/src/main/java/org/apache/ambari/server/security/authentication/LdapToPamMigrationHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/security/authentication/LdapToPamMigrationHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/security/authentication/LdapToPamMigrationHelper.java
new file mode 100644
index 0000000..8a3a012
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/security/authentication/LdapToPamMigrationHelper.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.security.authentication;
+
+import java.sql.SQLException;
+
+import org.apache.ambari.server.audit.AuditLoggerModule;
+import org.apache.ambari.server.controller.ControllerModule;
+import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.DBAccessor.DbType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.inject.Guice;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+
+public class LdapToPamMigrationHelper {
+ private static final Logger LOG = LoggerFactory.getLogger(LdapToPamMigrationHelper.class);
+
+ @Inject
+ private DBAccessor dbAccessor;
+
+ /**
+ * Migrate LDAP user & groups to PAM
+ *
+ * @throws SQLException if an error occurs while executing the needed SQL statements
+ */
+ private void migrateLdapUsersGroups() throws SQLException {
+ if (dbAccessor.getDbType() != DbType.ORACLE) { // Tested MYSQL, POSTGRES && MYSQL)
+ dbAccessor.executeQuery("UPDATE users SET user_type='PAM',ldap_user=0 WHERE ldap_user=1 and user_name not in (select user_name from (select user_name from users where user_type = 'PAM') as a)");
+ dbAccessor.executeQuery("UPDATE groups SET group_type='PAM',ldap_group=0 WHERE ldap_group=1 and group_name not in (select group_name from (select group_name from groups where group_type = 'PAM') as a)");
+ } else { // Tested ORACLE
+ dbAccessor.executeQuery("UPDATE users SET user_type='PAM',ldap_user=0 WHERE ldap_user=1 and user_name not in (select user_name from users where user_type = 'PAM')");
+ dbAccessor.executeQuery("UPDATE groups SET group_type='PAM',ldap_group=0 WHERE ldap_group=1 and group_name not in (select group_name from groups where group_type = 'PAM')");
+ }
+ }
+
+ /**
+ * Support changes needed to migrate LDAP users & groups to PAM
+ *
+ * @param args Simple key value json map
+ */
+ public static void main(String[] args) {
+
+ try {
+ Injector injector = Guice.createInjector(new ControllerModule(), new AuditLoggerModule());
+ LdapToPamMigrationHelper migrationHelper = injector.getInstance(LdapToPamMigrationHelper.class);
+
+ migrationHelper.migrateLdapUsersGroups();
+
+ } catch (Throwable t) {
+ LOG.error("Caught exception on migration. Exiting...", t);
+ System.exit(1);
+ }
+
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/f7fac037/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java b/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
index 9cdde8f..16c6c16 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
@@ -162,6 +162,10 @@ public class Users {
if (userEntity != null) {
userEntities.add(userEntity);
}
+ userEntity = userDAO.findUserByNameAndType(userName, UserType.PAM);
+ if (userEntity != null) {
+ userEntities.add(userEntity);
+ }
return (userEntities.isEmpty() || userEntities.size() > 1) ? null : new User(userEntities.get(0));
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/f7fac037/ambari-server/src/main/python/ambari-server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari-server.py b/ambari-server/src/main/python/ambari-server.py
index 5adcb04..8fcde77 100755
--- a/ambari-server/src/main/python/ambari-server.py
+++ b/ambari-server/src/main/python/ambari-server.py
@@ -52,8 +52,8 @@ from ambari_server.setupActions import BACKUP_ACTION, LDAP_SETUP_ACTION, LDAP_SY
SETUP_ACTION, SETUP_SECURITY_ACTION,START_ACTION, STATUS_ACTION, STOP_ACTION, RESTART_ACTION, UPGRADE_ACTION, \
SETUP_JCE_ACTION, SET_CURRENT_ACTION, START_ACTION, STATUS_ACTION, STOP_ACTION, UPGRADE_ACTION, \
SETUP_JCE_ACTION, SET_CURRENT_ACTION, ENABLE_STACK_ACTION, SETUP_SSO_ACTION, \
- DB_PURGE_ACTION, INSTALL_MPACK_ACTION, UNINSTALL_MPACK_ACTION, UPGRADE_MPACK_ACTION, PAM_SETUP_ACTION, KERBEROS_SETUP_ACTION
-from ambari_server.setupSecurity import setup_ldap, sync_ldap, setup_master_key, setup_ambari_krb5_jaas, setup_pam
+ DB_PURGE_ACTION, INSTALL_MPACK_ACTION, UNINSTALL_MPACK_ACTION, UPGRADE_MPACK_ACTION, PAM_SETUP_ACTION, MIGRATE_LDAP_PAM_ACTION, KERBEROS_SETUP_ACTION
+from ambari_server.setupSecurity import setup_ldap, sync_ldap, setup_master_key, setup_ambari_krb5_jaas, setup_pam, migrate_ldap_pam
from ambari_server.userInput import get_validated_string_input
from ambari_server.kerberos_setup import setup_kerberos
@@ -540,6 +540,11 @@ def init_ldap_setup_parser_options(parser):
parser.add_option('--ldap-sync-username-collisions-behavior', default=None, help="Handling behavior for username collisions [convert/skip] for LDAP sync", dest="ldap_sync_username_collisions_behavior")
@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_pam_setup_parser_options(parser):
+ parser.add_option('--pam-config-file', default=None, help="Path to the PAM configuration file", dest="pam_config_file")
+ parser.add_option('--pam-auto-create-groups', default=None, help="Automatically create groups for authenticated users [true/false]", dest="pam_auto_create_groups")
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
def init_set_current_parser_options(parser):
parser.add_option('--cluster-name', default=None, help="Cluster name", dest="cluster_name")
parser.add_option('--version-display-name', default=None, help="Display name of desired repo version", dest="desired_repo_version")
@@ -783,7 +788,8 @@ def create_user_action_map(args, options):
INSTALL_MPACK_ACTION: UserAction(install_mpack, options),
UNINSTALL_MPACK_ACTION: UserAction(uninstall_mpack, options),
UPGRADE_MPACK_ACTION: UserAction(upgrade_mpack, options),
- PAM_SETUP_ACTION: UserAction(setup_pam),
+ PAM_SETUP_ACTION: UserAction(setup_pam, options),
+ MIGRATE_LDAP_PAM_ACTION: UserAction(migrate_ldap_pam, options),
KERBEROS_SETUP_ACTION: UserAction(setup_kerberos, options)
}
return action_map
@@ -814,7 +820,7 @@ def init_action_parser(action, parser):
INSTALL_MPACK_ACTION: init_install_mpack_parser_options,
UNINSTALL_MPACK_ACTION: init_uninstall_mpack_parser_options,
UPGRADE_MPACK_ACTION: init_upgrade_mpack_parser_options,
- PAM_SETUP_ACTION: init_empty_parser_options,
+ PAM_SETUP_ACTION: init_pam_setup_parser_options,
KERBEROS_SETUP_ACTION: init_kerberos_setup_parser_options,
}
parser.add_option("-v", "--verbose",
http://git-wip-us.apache.org/repos/asf/ambari/blob/f7fac037/ambari-server/src/main/python/ambari_server/setupActions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/setupActions.py b/ambari-server/src/main/python/ambari_server/setupActions.py
index 707cb84..61d20af 100644
--- a/ambari-server/src/main/python/ambari_server/setupActions.py
+++ b/ambari-server/src/main/python/ambari_server/setupActions.py
@@ -47,4 +47,5 @@ INSTALL_MPACK_ACTION = "install-mpack"
UNINSTALL_MPACK_ACTION = "uninstall-mpack"
UPGRADE_MPACK_ACTION = "upgrade-mpack"
PAM_SETUP_ACTION = "setup-pam"
+MIGRATE_LDAP_PAM_ACTION = "migrate-ldap-pam"
KERBEROS_SETUP_ACTION = "setup-kerberos"
http://git-wip-us.apache.org/repos/asf/ambari/blob/f7fac037/ambari-server/src/main/python/ambari_server/setupSecurity.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/setupSecurity.py b/ambari-server/src/main/python/ambari_server/setupSecurity.py
index 17d1025..f175d7c 100644
--- a/ambari-server/src/main/python/ambari_server/setupSecurity.py
+++ b/ambari-server/src/main/python/ambari_server/setupSecurity.py
@@ -37,9 +37,9 @@ from ambari_commons.os_check import OSConst
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons.os_utils import is_root, set_file_permissions, \
run_os_command, search_file, is_valid_filepath, change_owner, get_ambari_repo_file_full_name, get_file_owner
-from ambari_server.serverConfiguration import configDefaults, \
+from ambari_server.serverConfiguration import configDefaults, parse_properties_file, \
encrypt_password, find_jdk, find_properties_file, get_alias_string, get_ambari_properties, get_conf_dir, \
- get_credential_store_location, get_is_persisted, get_is_secure, get_master_key_location, write_property, \
+ get_credential_store_location, get_is_persisted, get_is_secure, get_master_key_location, get_db_type, write_property, \
get_original_master_key, get_value_from_properties, get_java_exe_path, is_alias_string, read_ambari_user, \
read_passwd_for_alias, remove_password_file, save_passwd_for_alias, store_password_file, update_properties_2, \
BLIND_PASSWORD, BOOTSTRAP_DIR_PROPERTY, IS_LDAP_CONFIGURED, JDBC_PASSWORD_FILENAME, JDBC_PASSWORD_PROPERTY, \
@@ -54,6 +54,8 @@ from ambari_server.serverUtils import is_server_runing, get_ambari_server_api_ba
from ambari_server.setupActions import SETUP_ACTION, LDAP_SETUP_ACTION
from ambari_server.userInput import get_validated_string_input, get_prompt_default, read_password, get_YN_input, quit_if_has_answer
from ambari_server.serverClassPath import ServerClassPath
+from ambari_server.dbConfiguration import DBMSConfigFactory, check_jdbc_drivers, \
+ get_jdbc_driver_path, ensure_jdbc_driver_is_installed, LINUX_DBMS_KEYS_LIST
logger = logging.getLogger(__name__)
@@ -64,6 +66,9 @@ REGEX_TRUE_FALSE = "^(true|false)?$"
REGEX_SKIP_CONVERT = "^(skip|convert)?$"
REGEX_REFERRAL = "^(follow|ignore)?$"
REGEX_ANYTHING = ".*"
+LDAP_TO_PAM_MIGRATION_HELPER_CMD = "{0} -cp {1} " + \
+ "org.apache.ambari.server.security.authentication.LdapToPamMigrationHelper" + \
+ " >> " + configDefaults.SERVER_OUT_FILE + " 2>&1"
CLIENT_SECURITY_KEY = "client.security"
@@ -621,8 +626,12 @@ def setup_ldap(options):
properties = get_ambari_properties()
if get_value_from_properties(properties,CLIENT_SECURITY_KEY,"") == 'pam':
- err = "PAM is configured. Can not setup LDAP."
- raise FatalException(1, err)
+ query = "PAM is currently configured, do you wish to use LDAP instead [y/n] (n)? "
+ if get_YN_input(query, False):
+ pass
+ else:
+ err = "PAM is configured. Can not setup LDAP."
+ raise FatalException(1, err)
isSecure = get_is_secure(properties)
@@ -824,38 +833,112 @@ def ensure_can_start_under_current_user(ambari_user):
return current_user
class PamPropTemplate:
- def __init__(self, properties, i_prop_name, i_prop_val_pattern, i_prompt_regex, i_allow_empty_prompt, i_prop_name_default=None):
+ def __init__(self, properties, i_option, i_prop_name, i_prop_val_pattern, i_prompt_regex, i_allow_empty_prompt, i_prop_name_default=None):
self.prop_name = i_prop_name
+ self.option = i_option
self.pam_prop_name = get_value_from_properties(properties, i_prop_name, i_prop_name_default)
self.pam_prop_val_prompt = i_prop_val_pattern.format(get_prompt_default(self.pam_prop_name))
self.prompt_regex = i_prompt_regex
self.allow_empty_prompt = i_allow_empty_prompt
-def setup_pam():
+def init_pam_properties_list_reqd(properties, options):
+ properties = [
+ PamPropTemplate(properties, options.pam_config_file, PAM_CONFIG_FILE, "PAM configuration file* {0}: ", REGEX_ANYTHING, False, "/etc/pam.d/ambari"),
+ PamPropTemplate(properties, options.pam_auto_create_groups, AUTO_GROUP_CREATION, "Do you want to allow automatic group creation* [true/false] {0}: ", REGEX_TRUE_FALSE, False, "false"),
+ ]
+ return properties
+
+def setup_pam(options):
if not is_root():
- err = 'Ambari-server setup-pam should be run with ' \
- 'root-level privileges'
+ err = 'Ambari-server setup-pam should be run with root-level privileges'
raise FatalException(4, err)
properties = get_ambari_properties()
if get_value_from_properties(properties,CLIENT_SECURITY_KEY,"") == 'ldap':
- err = "LDAP is configured. Can not setup PAM."
- raise FatalException(1, err)
+ query = "LDAP is currently configured, do you wish to use PAM instead [y/n] (n)? "
+ if get_YN_input(query, False):
+ pass
+ else:
+ err = "LDAP is configured. Can not setup PAM."
+ raise FatalException(1, err)
+
+ pam_property_list_reqd = init_pam_properties_list_reqd(properties, options)
pam_property_value_map = {}
pam_property_value_map[CLIENT_SECURITY_KEY] = 'pam'
- pamConfig = get_validated_string_input("Enter PAM configuration file: ", PAM_CONFIG_FILE, REGEX_ANYTHING,
- "Invalid characters in the input!", False, False)
-
- pam_property_value_map[PAM_CONFIG_FILE] = pamConfig
+ for pam_prop in pam_property_list_reqd:
+ input = get_validated_string_input(pam_prop.pam_prop_val_prompt, pam_prop.pam_prop_name, pam_prop.prompt_regex,
+ "Invalid characters in the input!", False, pam_prop.allow_empty_prompt,
+ answer = pam_prop.option)
+ if input is not None and input != "":
+ pam_property_value_map[pam_prop.prop_name] = input
- if get_YN_input("Do you want to allow automatic group creation [y/n] (y)? ", True):
- pam_property_value_map[AUTO_GROUP_CREATION] = 'true'
- else:
- pam_property_value_map[AUTO_GROUP_CREATION] = 'false'
+ # Verify that the PAM config file exists, else show warning...
+ pam_config_file = pam_property_value_map[PAM_CONFIG_FILE]
+ if not os.path.exists(pam_config_file):
+ print_warning_msg("The PAM configuration file, {0} does not exist. " \
+ "Please create it before restarting Ambari.".format(pam_config_file))
update_properties_2(properties, pam_property_value_map)
print 'Saving...done'
return 0
+
+#
+# Migration of LDAP users & groups to PAM
+#
+def migrate_ldap_pam(args):
+ properties = get_ambari_properties()
+
+ if get_value_from_properties(properties,CLIENT_SECURITY_KEY,"") != 'pam':
+ err = "PAM is not configured. Please configure PAM authentication first."
+ raise FatalException(1, err)
+
+ db_title = get_db_type(properties).title
+ confirm = get_YN_input("Ambari Server configured for %s. Confirm "
+ "you have made a backup of the Ambari Server database [y/n] (y)? " % db_title, True)
+
+ if not confirm:
+ print_error_msg("Database backup is not confirmed")
+ return 1
+
+ jdk_path = get_java_exe_path()
+ if jdk_path is None:
+ print_error_msg("No JDK found, please run the \"setup\" "
+ "command to install a JDK automatically or install any "
+ "JDK manually to " + configDefaults.JDK_INSTALL_DIR)
+ return 1
+
+ # At this point, the args does not have the ambari database information.
+ # Augment the args with the correct ambari database information
+ parse_properties_file(args)
+
+ ensure_jdbc_driver_is_installed(args, properties)
+
+ print 'Migrating LDAP Users & Groups to PAM'
+
+ serverClassPath = ServerClassPath(properties, args)
+ class_path = serverClassPath.get_full_ambari_classpath_escaped_for_shell()
+
+ command = LDAP_TO_PAM_MIGRATION_HELPER_CMD.format(jdk_path, class_path)
+
+ ambari_user = read_ambari_user()
+ current_user = ensure_can_start_under_current_user(ambari_user)
+ environ = generate_env(args, ambari_user, current_user)
+
+ (retcode, stdout, stderr) = run_os_command(command, env=environ)
+ print_info_msg("Return code from LDAP to PAM migration command, retcode = " + str(retcode))
+ if stdout:
+ print "Console output from LDAP to PAM migration command:"
+ print stdout
+ print
+ if stderr:
+ print "Error output from LDAP to PAM migration command:"
+ print stderr
+ print
+ if retcode > 0:
+ print_error_msg("Error executing LDAP to PAM migration, please check the server logs.")
+ else:
+ print_info_msg('LDAP to PAM migration completed')
+ return retcode
[20/21] ambari git commit: AMBARI-21473. Zeppelin does not start and
returns: params.zookeeper_znode_parent not in
interpreter['properties']['phoenix.url']: KeyError: 'phoenix.url'(Prabhjyot
Singh via Venkata Sairam)
Posted by rl...@apache.org.
AMBARI-21473. Zeppelin does not start and returns: params.zookeeper_znode_parent not in interpreter['properties']['phoenix.url']: KeyError: 'phoenix.url'(Prabhjyot Singh via Venkata Sairam)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/56f05f09
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/56f05f09
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/56f05f09
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 56f05f099aae18d11f849266d14bccf36ae79ad0
Parents: 8de6517
Author: Venkata Sairam <ve...@gmail.com>
Authored: Wed Jul 19 14:27:31 2017 +0530
Committer: Venkata Sairam <ve...@gmail.com>
Committed: Wed Jul 19 14:27:31 2017 +0530
----------------------------------------------------------------------
.../common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py | 1 +
1 file changed, 1 insertion(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/56f05f09/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py
index 0013ab0..ba46dc8 100644
--- a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py
@@ -295,6 +295,7 @@ class Master(Script):
interpreter['properties']['zeppelin.jdbc.keytab.location'] = params.zeppelin_kerberos_keytab
if params.zookeeper_znode_parent \
and params.hbase_zookeeper_quorum \
+ and 'phoenix.url' in interpreter['properties'] \
and params.zookeeper_znode_parent not in interpreter['properties']['phoenix.url']:
interpreter['properties']['phoenix.url'] = "jdbc:phoenix:" + \
params.hbase_zookeeper_quorum + ':' + \
[19/21] ambari git commit: AMBARI-21512. Stack Advisor reported an
error: KeyError: stack_name while Issued INSTALLED as new state for
NODEMANAGER (smohanty)
Posted by rl...@apache.org.
AMBARI-21512. Stack Advisor reported an error: KeyError: stack_name while Issued INSTALLED as new state for NODEMANAGER (smohanty)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8de65173
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8de65173
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8de65173
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 8de651738a8338e4744b0a2a661577b08ae01778
Parents: 016df4e
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Tue Jul 18 22:05:46 2017 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Tue Jul 18 22:07:22 2017 -0700
----------------------------------------------------------------------
.../src/main/java/org/apache/ambari/server/agent/hello.rs | 0
ambari-server/src/main/resources/stacks/stack_advisor.py | 7 ++++---
2 files changed, 4 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/8de65173/ambari-server/src/main/java/org/apache/ambari/server/agent/hello.rs
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/hello.rs b/ambari-server/src/main/java/org/apache/ambari/server/agent/hello.rs
new file mode 100644
index 0000000..e69de29
http://git-wip-us.apache.org/repos/asf/ambari/blob/8de65173/ambari-server/src/main/resources/stacks/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/stack_advisor.py b/ambari-server/src/main/resources/stacks/stack_advisor.py
index 67f7fe0..321ac4e 100644
--- a/ambari-server/src/main/resources/stacks/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/stack_advisor.py
@@ -2017,9 +2017,10 @@ class DefaultStackAdvisor(StackAdvisor):
if cluster_env and "stack_root" in cluster_env:
stack_root_as_str = cluster_env["stack_root"]
stack_roots = json.loads(stack_root_as_str)
- stack_name = cluster_env["stack_name"]
- if stack_name in stack_roots:
- stack_root = stack_roots[stack_name]
+ if "stack_name" in cluster_env:
+ stack_name = cluster_env["stack_name"]
+ if stack_name in stack_roots:
+ stack_root = stack_roots[stack_name]
return stack_root
[21/21] ambari git commit: Merge branch 'trunk' into
branch-feature-AMBARI-20859
Posted by rl...@apache.org.
Merge branch 'trunk' into branch-feature-AMBARI-20859
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/903cd1a0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/903cd1a0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/903cd1a0
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 903cd1a06c9016efeb3be29c7a323746960ab2e5
Parents: 3adbbad 56f05f0
Author: Robert Levas <rl...@hortonworks.com>
Authored: Wed Jul 19 08:38:25 2017 -0400
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Wed Jul 19 08:38:25 2017 -0400
----------------------------------------------------------------------
.../controllers/groups/GroupsEditCtrl.js | 3 +
.../stackVersions/StackVersionsCreateCtrl.js | 3 +-
.../libraries/functions/get_stack_version.py | 4 +-
ambari-infra/ambari-infra-solr-client/build.xml | 1 +
ambari-infra/ambari-infra-solr-client/pom.xml | 10 +
.../src/main/resources/solrIndexHelper.sh | 156 ++++
.../kafka/KafkaTimelineMetricsReporter.java | 6 +-
ambari-server/pom.xml | 2 +-
ambari-server/sbin/ambari-server | 6 +-
.../org/apache/ambari/server/agent/hello.rs | 0
.../AmbariCustomCommandExecutionHelper.java | 21 +-
.../AmbariManagementControllerImpl.java | 124 ++-
.../BlueprintConfigurationProcessor.java | 64 +-
.../internal/ClientConfigResourceProvider.java | 3 +-
.../internal/HostResourceProvider.java | 1 +
.../server/controller/internal/Stack.java | 2 +-
.../server/controller/internal/UnitUpdater.java | 150 ++++
.../LdapToPamMigrationHelper.java | 73 ++
.../ambari/server/state/ConfigHelper.java | 17 +
.../ambari/server/state/PropertyInfo.java | 2 +
.../KerberosDescriptorUpdateHelper.java | 9 +-
.../validators/TopologyValidatorFactory.java | 2 +-
.../validators/UnitValidatedProperty.java | 95 +++
.../topology/validators/UnitValidator.java | 79 ++
ambari-server/src/main/python/ambari-server.py | 14 +-
.../main/python/ambari_server/setupActions.py | 1 +
.../main/python/ambari_server/setupSecurity.py | 119 ++-
.../HDFS/2.1.0.2.0/configuration/hdfs-site.xml | 6 +
.../HDFS/2.1.0.2.0/package/scripts/hdfs.py | 8 +
.../2.1.0.2.0/package/scripts/hdfs_namenode.py | 25 +-
.../HDFS/2.1.0.2.0/package/scripts/namenode.py | 4 +
.../2.1.0.2.0/package/scripts/params_linux.py | 9 +-
.../2.1.0.2.0/package/scripts/params_windows.py | 7 +
.../package/templates/include_hosts_list.j2 | 21 +
.../HDFS/3.0.0.3.0/package/scripts/hdfs.py | 8 +
.../3.0.0.3.0/package/scripts/hdfs_namenode.py | 17 +-
.../3.0.0.3.0/package/scripts/params_linux.py | 8 +-
.../3.0.0.3.0/package/scripts/params_windows.py | 7 +
.../package/templates/include_hosts_list.j2 | 21 +
.../KAFKA/0.10.0.3.0/metainfo.xml | 1 +
.../KAFKA/0.10.0.3.0/package/scripts/kafka.py | 10 +
.../KAFKA/0.10.0.3.0/package/scripts/params.py | 3 +
.../common-services/KAFKA/0.10.0/metainfo.xml | 1 +
.../KAFKA/0.8.1/package/scripts/kafka.py | 12 +
.../KAFKA/0.8.1/package/scripts/params.py | 3 +
.../KAFKA/0.9.0/configuration/kafka-broker.xml | 2 +-
.../YARN/2.1.0.2.0/configuration/yarn-site.xml | 6 +
.../2.1.0.2.0/package/scripts/params_linux.py | 12 +-
.../2.1.0.2.0/package/scripts/params_windows.py | 10 +-
.../package/scripts/resourcemanager.py | 18 +-
.../YARN/2.1.0.2.0/package/scripts/service.py | 4 +
.../package/templates/include_hosts_list.j2 | 21 +
.../YARN/3.0.0.3.0/configuration/yarn-site.xml | 6 +
.../3.0.0.3.0/package/scripts/params_linux.py | 11 +-
.../3.0.0.3.0/package/scripts/params_windows.py | 10 +-
.../package/scripts/resourcemanager.py | 18 +-
.../package/templates/include_hosts_list.j2 | 21 +
.../YARN/3.0.0.3.0/service_advisor.py | 7 +-
.../0.6.0.2.5/package/scripts/master.py | 1 +
.../services/HDFS/configuration/hdfs-site.xml | 6 +
.../HDFS/package/scripts/hdfs_namenode.py | 12 +-
.../0.8/services/HDFS/package/scripts/params.py | 11 +-
.../package/templates/include_hosts_list.j2 | 21 +
.../services/YARN/configuration/yarn-site.xml | 6 +
.../0.8/services/YARN/package/scripts/params.py | 10 +-
.../YARN/package/scripts/resourcemanager.py | 9 +-
.../package/templates/include_hosts_list.j2 | 21 +
.../services/YARN/configuration/yarn-site.xml | 6 +
.../services/YARN/package/scripts/params.py | 9 +-
.../YARN/package/scripts/resourcemanager.py | 9 +-
.../package/templates/exclude_hosts_list.j2 | 21 +
.../package/templates/include_hosts_list.j2 | 21 +
.../hooks/before-ANY/files/changeToSecureUid.sh | 13 +-
.../before-ANY/scripts/shared_initialization.py | 45 +-
.../services/YARN/configuration/yarn-site.xml | 6 +
.../services/YARN/package/scripts/params.py | 10 +-
.../YARN/package/scripts/resourcemanager.py | 9 +-
.../package/templates/include_hosts_list.j2 | 21 +
.../stacks/HDP/2.5/services/stack_advisor.py | 13 +-
.../stacks/HDP/2.5/upgrades/config-upgrade.xml | 4 +
.../HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml | 6 +-
.../stacks/HDP/2.5/upgrades/upgrade-2.6.xml | 1 +
.../configuration/application-properties.xml | 17 +
.../stacks/HDP/2.6/services/HIVE/kerberos.json | 151 ----
.../stacks/HDP/2.6/services/YARN/kerberos.json | 2 +-
.../stacks/HDP/2.6/upgrades/config-upgrade.xml | 4 +
.../HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml | 4 +
.../stacks/HDP/2.6/upgrades/upgrade-2.6.xml | 1 +
.../HDP/3.0/configuration/cluster-env.xml | 4 +-
.../HDP/3.0/properties/stack_features.json | 752 ++++++++++---------
.../stacks/HDP/3.0/properties/stack_tools.json | 14 +-
.../src/main/resources/stacks/stack_advisor.py | 7 +-
.../AmbariManagementControllerImplTest.java | 12 +-
.../AmbariManagementControllerTest.java | 8 +-
.../controller/internal/UnitUpdaterTest.java | 114 +++
.../ambari/server/state/ConfigHelperTest.java | 22 +
.../KerberosDescriptorUpdateHelperTest.java | 70 ++
.../topology/validators/UnitValidatorTest.java | 114 +++
.../python/stacks/2.0.6/HDFS/test_namenode.py | 2 +-
.../stacks/2.0.6/configs/altfs_plus_hdfs.json | 2 +-
.../python/stacks/2.0.6/configs/default.json | 2 +-
.../2.0.6/configs/default_ams_embedded.json | 2 +-
.../2.0.6/configs/default_hive_nn_ha.json | 2 +-
.../2.0.6/configs/default_hive_nn_ha_2.json | 2 +-
.../2.0.6/configs/default_hive_non_hdfs.json | 2 +-
.../2.0.6/configs/default_no_install.json | 2 +-
.../2.0.6/configs/default_oozie_mysql.json | 2 +-
.../default_update_exclude_file_only.json | 2 +-
.../2.0.6/configs/default_with_bucket.json | 2 +-
.../python/stacks/2.0.6/configs/flume_22.json | 2 +-
.../python/stacks/2.0.6/configs/flume_only.json | 2 +-
.../stacks/2.0.6/configs/hbase_no_phx.json | 2 +-
.../stacks/2.0.6/configs/hbase_with_phx.json | 2 +-
.../2.0.6/configs/oozie_existing_sqla.json | 2 +-
.../stacks/2.0.6/configs/repository_file.json | 2 +-
.../python/stacks/2.0.6/configs/secured.json | 2 +-
.../2.0.6/hooks/before-ANY/test_before_any.py | 294 +++++---
.../test/python/stacks/2.3/configs/ats_1_5.json | 2 +-
.../stacks/2.5/common/test_stack_advisor.py | 4 +-
.../python/stacks/2.5/configs/hsi_default.json | 2 +-
.../2.5/configs/hsi_default_for_restart.json | 2 +-
.../test/python/stacks/2.5/configs/hsi_ha.json | 2 +-
.../app/controllers/wizard/step7_controller.js | 67 ++
.../configs/stack_config_properties_mapper.js | 14 +-
ambari-web/app/styles/application.less | 15 +
...ontrols_service_config_usergroup_with_id.hbs | 27 +
ambari-web/app/utils/ajax/ajax.js | 2 +-
ambari-web/app/utils/config.js | 3 +
.../configs/service_configs_by_category_view.js | 6 +
ambari-web/app/views/common/controls_view.js | 39 +
.../services/YARN/configuration/yarn-site.xml | 6 +
.../YARN/package/scripts/params_linux.py | 9 +-
.../YARN/package/scripts/params_windows.py | 10 +-
.../YARN/package/scripts/resourcemanager.py | 18 +-
.../package/templates/include_hosts_list.j2 | 21 +
135 files changed, 2590 insertions(+), 860 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/903cd1a0/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/903cd1a0/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
[03/21] ambari git commit: AMBARI-21470 : Kafka Sink does not exclude
excluded metrics of type 'guage'. (Qin Liu via avijayan)
Posted by rl...@apache.org.
AMBARI-21470 : Kafka Sink does not exclude excluded metrics of type 'guage'. (Qin Liu via avijayan)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9bfea653
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9bfea653
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9bfea653
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 9bfea6530bc9759f518fd616e15bb08244152ab1
Parents: f7fac03
Author: Aravindan Vijayan <av...@hortonworks.com>
Authored: Fri Jul 14 09:47:47 2017 -0700
Committer: Aravindan Vijayan <av...@hortonworks.com>
Committed: Fri Jul 14 09:47:47 2017 -0700
----------------------------------------------------------------------
.../metrics2/sink/kafka/KafkaTimelineMetricsReporter.java | 6 ++++--
.../common-services/KAFKA/0.9.0/configuration/kafka-broker.xml | 2 +-
2 files changed, 5 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/9bfea653/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java b/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
index 6f5e9e0..e126016 100644
--- a/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
+++ b/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
@@ -406,8 +406,10 @@ public class KafkaTimelineMetricsReporter extends AbstractTimelineMetricsSink
final String sanitizedName = sanitizeName(name);
try {
- cacheSanitizedTimelineMetric(currentTimeMillis, sanitizedName, "", Double.parseDouble(String.valueOf(gauge.value())));
- populateMetricsList(context, MetricType.GAUGE, sanitizedName);
+ if (!isExcludedMetric(sanitizedName)) {
+ cacheSanitizedTimelineMetric(currentTimeMillis, sanitizedName, "", Double.parseDouble(String.valueOf(gauge.value())));
+ populateMetricsList(context, MetricType.GAUGE, sanitizedName);
+ }
} catch (NumberFormatException ex) {
LOG.debug(ex.getMessage());
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/9bfea653/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/kafka-broker.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/kafka-broker.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/kafka-broker.xml
index 73a5eff..4cd2b0d 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/kafka-broker.xml
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/kafka-broker.xml
@@ -138,7 +138,7 @@
</property>
<property>
<name>external.kafka.metrics.exclude.prefix</name>
- <value>kafka.network.RequestMetrics,kafka.server.DelayedOperationPurgatory,kafka.server.BrokerTopicMetrics.BytesRejectedPerSec</value>
+ <value>kafka.network.RequestMetrics,kafka.server.DelayedOperationPurgatory,kafka.server.BrokerTopicMetrics.BytesRejectedPerSec,kafka.server.KafkaServer.ClusterId</value>
<description>
Exclude metrics starting with these prefixes from being collected.
</description>
[16/21] ambari git commit: AMBARI-21501. Make HSI's
'hive.llap.zk.sm.keytab' and 'hive.service.keytab' group readable.
Posted by rl...@apache.org.
AMBARI-21501. Make HSI's 'hive.llap.zk.sm.keytab' and 'hive.service.keytab' group readable.
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f450eba5
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f450eba5
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f450eba5
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: f450eba5c23c0d35ab9181d531d9e1ef84cbf3e8
Parents: 01d60f4
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Mon Jul 17 15:04:37 2017 -0700
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Mon Jul 17 15:04:37 2017 -0700
----------------------------------------------------------------------
.../stacks/HDP/2.6/services/HIVE/kerberos.json | 151 -------------------
.../stacks/HDP/2.6/services/YARN/kerberos.json | 2 +-
2 files changed, 1 insertion(+), 152 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/f450eba5/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json
deleted file mode 100644
index b6e57e1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json
+++ /dev/null
@@ -1,151 +0,0 @@
-{
- "services": [
- {
- "name": "HIVE",
- "identities": [
- {
- "name": "/spnego"
- },
- {
- "name": "/smokeuser"
- }
- ],
- "configurations": [
- {
- "hive-site": {
- "hive.metastore.sasl.enabled": "true",
- "hive.server2.authentication": "KERBEROS"
- }
- },
- {
- "ranger-hive-audit": {
- "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
- "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
- "xasecure.audit.jaas.Client.option.useKeyTab": "true",
- "xasecure.audit.jaas.Client.option.storeKey": "false",
- "xasecure.audit.jaas.Client.option.serviceName": "solr",
- "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
- }
- }
- ],
- "components": [
- {
- "name": "HIVE_METASTORE",
- "identities": [
- {
- "name": "/HIVE/HIVE_SERVER/hive_server_hive",
- "principal": {
- "configuration": "hive-site/hive.metastore.kerberos.principal"
- },
- "keytab": {
- "configuration": "hive-site/hive.metastore.kerberos.keytab.file"
- }
- }
- ]
- },
- {
- "name": "HIVE_SERVER",
- "identities": [
- {
- "name": "/HDFS/NAMENODE/hdfs"
- },
- {
- "name": "hive_server_hive",
- "principal": {
- "value": "hive/_HOST@${realm}",
- "type": "service",
- "configuration": "hive-site/hive.server2.authentication.kerberos.principal",
- "local_username": "${hive-env/hive_user}"
- },
- "keytab": {
- "file": "${keytab_dir}/hive.service.keytab",
- "owner": {
- "name": "${hive-env/hive_user}",
- "access": "r"
- },
- "group": {
- "name": "${cluster-env/user_group}",
- "access": ""
- },
- "configuration": "hive-site/hive.server2.authentication.kerberos.keytab"
- }
- },
- {
- "name": "atlas_kafka",
- "reference": "/HIVE/HIVE_SERVER/hive_server_hive",
- "principal": {
- "configuration": "hive-atlas-application.properties/atlas.jaas.KafkaClient.option.principal"
- },
- "keytab": {
- "configuration": "hive-atlas-application.properties/atlas.jaas.KafkaClient.option.keyTab"
- }
- },
- {
- "name": "/spnego",
- "principal": {
- "configuration": "hive-site/hive.server2.authentication.spnego.principal"
- },
- "keytab": {
- "configuration": "hive-site/hive.server2.authentication.spnego.keytab"
- }
- },
- {
- "name": "ranger_audit",
- "reference": "/HIVE/HIVE_SERVER/hive_server_hive",
- "principal": {
- "configuration": "ranger-hive-audit/xasecure.audit.jaas.Client.option.principal"
- },
- "keytab": {
- "configuration": "ranger-hive-audit/xasecure.audit.jaas.Client.option.keyTab"
- }
- }
- ]
- },
- {
- "name": "HIVE_SERVER_INTERACTIVE",
- "identities": [
- {
- "name": "/HDFS/NAMENODE/hdfs"
- },
- {
- "name": "/HIVE/HIVE_SERVER/hive_server_hive"
- },
- {
- "name": "/HIVE/HIVE_SERVER/spnego"
- },
- {
- "name": "/YARN/NODEMANAGER/llap_zk_hive"
- }
- ]
- },
- {
- "name": "WEBHCAT_SERVER",
- "identities": [
- {
- "name": "/spnego",
- "principal": {
- "configuration": "webhcat-site/templeton.kerberos.principal"
- },
- "keytab": {
- "configuration": "webhcat-site/templeton.kerberos.keytab"
- }
- }
- ],
- "configurations": [
- {
- "core-site": {
- "hadoop.proxyuser.HTTP.hosts": "${clusterHostInfo/webhcat_server_host|append(core-site/hadoop.proxyuser.HTTP.hosts, \\\\,, true)}"
- }
- },
- {
- "webhcat-site": {
- "templeton.kerberos.secret": "secret",
- "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=${clusterHostInfo/hive_metastore_host|each(thrift://%s:9083, \\\\,, \\s*\\,\\s*)},hive.metastore.sasl.enabled=true,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse,hive.exec.mode.local.auto=false,hive.metastore.kerberos.principal=hive/_HOST@${realm}"
- }
- }
- ]
- }
- ]
- }
- ]
-}
http://git-wip-us.apache.org/repos/asf/ambari/blob/f450eba5/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
index 60d50eb..b1501b8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
@@ -117,7 +117,7 @@
},
"group": {
"name": "${cluster-env/user_group}",
- "access": ""
+ "access": "r"
},
"configuration": "hive-interactive-site/hive.llap.zk.sm.keytab.file"
},
[04/21] ambari git commit: AMBARI-21479 Deploys failing with Namenode
install failure (dgrinenko)
Posted by rl...@apache.org.
AMBARI-21479 Deploys failing with Namenode install failure (dgrinenko)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4e1da58a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4e1da58a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4e1da58a
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 4e1da58a9479889f1624e9887bb681a54e2ae6ae
Parents: 9bfea65
Author: Dmytro Grinenko <ha...@apache.org>
Authored: Fri Jul 14 20:26:52 2017 +0300
Committer: Dmytro Grinenko <ha...@apache.org>
Committed: Fri Jul 14 20:26:52 2017 +0300
----------------------------------------------------------------------
.../HDP/3.0/configuration/cluster-env.xml | 4 +-
.../HDP/3.0/properties/stack_features.json | 752 ++++++++++---------
.../stacks/HDP/3.0/properties/stack_tools.json | 14 +-
3 files changed, 391 insertions(+), 379 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e1da58a/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml
index 341079b..ca3be1d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml
@@ -252,8 +252,8 @@ gpgcheck=0</value>
</property>
<property>
<name>stack_root</name>
- <value>/usr/hdp</value>
- <description>Stack root folder</description>
+ <value>{"HDP":"/usr/hdp"}</value>
+ <description>JSON which defines the stack root by stack name</description>
<value-attributes>
<read-only>true</read-only>
<overridable>false</overridable>
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e1da58a/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json
index b081ee1..9422cbc 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json
@@ -1,377 +1,379 @@
{
- "stack_features": [
- {
- "name": "snappy",
- "description": "Snappy compressor/decompressor support",
- "min_version": "2.0.0.0",
- "max_version": "2.2.0.0"
- },
- {
- "name": "lzo",
- "description": "LZO libraries support",
- "min_version": "2.2.1.0"
- },
- {
- "name": "express_upgrade",
- "description": "Express upgrade support",
- "min_version": "2.1.0.0"
- },
- {
- "name": "rolling_upgrade",
- "description": "Rolling upgrade support",
- "min_version": "2.2.0.0"
- },
- {
- "name": "kafka_acl_migration_support",
- "description": "ACL migration support",
- "min_version": "2.3.4.0"
- },
- {
- "name": "secure_zookeeper",
- "description": "Protect ZNodes with SASL acl in secure clusters",
- "min_version": "2.6.0.0"
- },
- {
- "name": "config_versioning",
- "description": "Configurable versions support",
- "min_version": "2.3.0.0"
- },
- {
- "name": "datanode_non_root",
- "description": "DataNode running as non-root support (AMBARI-7615)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "remove_ranger_hdfs_plugin_env",
- "description": "HDFS removes Ranger env files (AMBARI-14299)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "ranger",
- "description": "Ranger Service support",
- "min_version": "2.2.0.0"
- },
- {
- "name": "ranger_tagsync_component",
- "description": "Ranger Tagsync component support (AMBARI-14383)",
- "min_version": "2.5.0.0"
- },
- {
- "name": "phoenix",
- "description": "Phoenix Service support",
- "min_version": "2.3.0.0"
- },
- {
- "name": "nfs",
- "description": "NFS support",
- "min_version": "2.3.0.0"
- },
- {
- "name": "tez_for_spark",
- "description": "Tez dependency for Spark",
- "min_version": "2.2.0.0",
- "max_version": "2.3.0.0"
- },
- {
- "name": "timeline_state_store",
- "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "copy_tarball_to_hdfs",
- "description": "Copy tarball to HDFS support (AMBARI-12113)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "spark_16plus",
- "description": "Spark 1.6+",
- "min_version": "2.4.0.0"
- },
- {
- "name": "spark_thriftserver",
- "description": "Spark Thrift Server",
- "min_version": "2.3.2.0"
- },
- {
- "name": "storm_kerberos",
- "description": "Storm Kerberos support (AMBARI-7570)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "storm_ams",
- "description": "Storm AMS integration (AMBARI-10710)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "create_kafka_broker_id",
- "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
- "min_version": "2.2.0.0",
- "max_version": "2.3.0.0"
- },
- {
- "name": "kafka_listeners",
- "description": "Kafka listeners (AMBARI-10984)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "kafka_kerberos",
- "description": "Kafka Kerberos support (AMBARI-10984)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "pig_on_tez",
- "description": "Pig on Tez support (AMBARI-7863)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "ranger_usersync_non_root",
- "description": "Ranger Usersync as non-root user (AMBARI-10416)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "ranger_audit_db_support",
- "description": "Ranger Audit to DB support",
- "min_version": "2.2.0.0",
- "max_version": "2.5.0.0"
- },
- {
- "name": "accumulo_kerberos_user_auth",
- "description": "Accumulo Kerberos User Auth (AMBARI-10163)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "knox_versioned_data_dir",
- "description": "Use versioned data dir for Knox (AMBARI-13164)",
- "min_version": "2.3.2.0"
- },
- {
- "name": "knox_sso_topology",
- "description": "Knox SSO Topology support (AMBARI-13975)",
- "min_version": "2.3.8.0"
- },
- {
- "name": "atlas_rolling_upgrade",
- "description": "Rolling upgrade support for Atlas",
- "min_version": "2.3.0.0"
- },
- {
- "name": "oozie_admin_user",
- "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "oozie_create_hive_tez_configs",
- "description": "Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "oozie_setup_shared_lib",
- "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "oozie_host_kerberos",
- "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
- "min_version": "2.0.0.0"
- },
- {
- "name": "falcon_extensions",
- "description": "Falcon Extension",
- "min_version": "2.5.0.0"
- },
- {
- "name": "hive_metastore_upgrade_schema",
- "description": "Hive metastore upgrade schema support (AMBARI-11176)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "hive_server_interactive",
- "description": "Hive server interactive support (AMBARI-15573)",
- "min_version": "2.5.0.0"
- },
- {
- "name": "hive_webhcat_specific_configs",
- "description": "Hive webhcat specific configurations support (AMBARI-12364)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "hive_purge_table",
- "description": "Hive purge table support (AMBARI-12260)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "hive_server2_kerberized_env",
- "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
- "min_version": "2.2.3.0",
- "max_version": "2.2.5.0"
- },
- {
- "name": "hive_env_heapsize",
- "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
- "min_version": "2.2.0.0"
- },
- {
- "name": "ranger_kms_hsm_support",
- "description": "Ranger KMS HSM support (AMBARI-15752)",
- "min_version": "2.5.0.0"
- },
- {
- "name": "ranger_log4j_support",
- "description": "Ranger supporting log-4j properties (AMBARI-15681)",
- "min_version": "2.5.0.0"
- },
- {
- "name": "ranger_kerberos_support",
- "description": "Ranger Kerberos support",
- "min_version": "2.5.0.0"
- },
- {
- "name": "hive_metastore_site_support",
- "description": "Hive Metastore site support",
- "min_version": "2.5.0.0"
- },
- {
- "name": "ranger_usersync_password_jceks",
- "description": "Saving Ranger Usersync credentials in jceks",
- "min_version": "2.5.0.0"
- },
- {
- "name": "ranger_install_infra_client",
- "description": "Ambari Infra Service support",
- "min_version": "2.5.0.0"
- },
- {
- "name": "falcon_atlas_support_2_3",
- "description": "Falcon Atlas integration support for 2.3 stack",
- "min_version": "2.3.99.0",
- "max_version": "2.4.0.0"
- },
- {
- "name": "falcon_atlas_support",
- "description": "Falcon Atlas integration",
- "min_version": "2.5.0.0"
- },
- {
- "name": "hbase_home_directory",
- "description": "Hbase home directory in HDFS needed for HBASE backup",
- "min_version": "2.5.0.0"
- },
- {
- "name": "spark_livy",
- "description": "Livy as slave component of spark",
- "min_version": "2.5.0.0"
- },
- {
- "name": "atlas_ranger_plugin_support",
- "description": "Atlas Ranger plugin support",
- "min_version": "2.5.0.0"
- },
- {
- "name": "atlas_conf_dir_in_path",
- "description": "Prepend the Atlas conf dir (/etc/atlas/conf) to the classpath of Storm and Falcon",
- "min_version": "2.3.0.0",
- "max_version": "2.4.99.99"
- },
- {
- "name": "atlas_upgrade_support",
- "description": "Atlas supports express and rolling upgrades",
- "min_version": "2.5.0.0"
- },
- {
- "name": "atlas_hook_support",
- "description": "Atlas support for hooks in Hive, Storm, Falcon, and Sqoop",
- "min_version": "2.5.0.0"
- },
- {
- "name": "ranger_pid_support",
- "description": "Ranger Service support pid generation AMBARI-16756",
- "min_version": "2.5.0.0"
- },
- {
- "name": "ranger_kms_pid_support",
- "description": "Ranger KMS Service support pid generation",
- "min_version": "2.5.0.0"
- },
- {
- "name": "ranger_admin_password_change",
- "description": "Allow ranger admin credentials to be specified during cluster creation (AMBARI-17000)",
- "min_version": "2.5.0.0"
- },
- {
- "name": "storm_metrics_apache_classes",
- "description": "Metrics sink for Storm that uses Apache class names",
- "min_version": "2.5.0.0"
- },
- {
- "name": "spark_java_opts_support",
- "description": "Allow Spark to generate java-opts file",
- "min_version": "2.2.0.0",
- "max_version": "2.4.0.0"
- },
- {
- "name": "atlas_hbase_setup",
- "description": "Use script to create Atlas tables in Hbase and set permissions for Atlas user.",
- "min_version": "2.5.0.0"
- },
- {
- "name": "ranger_hive_plugin_jdbc_url",
- "description": "Handle Ranger hive repo config jdbc url change for stack 2.5 (AMBARI-18386)",
- "min_version": "2.5.0.0"
- },
- {
- "name": "zkfc_version_advertised",
- "description": "ZKFC advertise version",
- "min_version": "2.5.0.0"
- },
- {
- "name": "phoenix_core_hdfs_site_required",
- "description": "HDFS and CORE site required for Phoenix",
- "max_version": "2.5.9.9"
- },
- {
- "name": "ranger_tagsync_ssl_xml_support",
- "description": "Ranger Tagsync ssl xml support.",
- "min_version": "2.6.0.0"
- },
- {
- "name": "ranger_xml_configuration",
- "description": "Ranger code base support xml configurations",
- "min_version": "2.3.0.0"
- },
- {
- "name": "kafka_ranger_plugin_support",
- "description": "Ambari stack changes for Ranger Kafka Plugin (AMBARI-11299)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "yarn_ranger_plugin_support",
- "description": "Implement Stack changes for Ranger Yarn Plugin integration (AMBARI-10866)",
- "min_version": "2.3.0.0"
- },
- {
- "name": "ranger_solr_config_support",
- "description": "Showing Ranger solrconfig.xml on UI",
- "min_version": "2.6.0.0"
- },
- {
- "name": "core_site_for_ranger_plugins",
- "description": "Adding core-site.xml in when Ranger plugin is enabled for Storm, Kafka, and Knox.",
- "min_version": "2.6.0.0"
- },
- {
- "name": "secure_ranger_ssl_password",
- "description": "Securing Ranger Admin and Usersync SSL and Trustore related passwords in jceks",
- "min_version": "2.6.0.0"
- },
- {
- "name": "ranger_kms_ssl",
- "description": "Ranger KMS SSL properties in ambari stack",
- "min_version": "2.6.0.0"
- },
- {
- "name": "atlas_hdfs_site_on_namenode_ha",
- "description": "Need to create hdfs-site under atlas-conf dir when Namenode-HA is enabled.",
- "min_version": "2.6.0.0"
- }
- ]
+ "HDP": {
+ "stack_features": [
+ {
+ "name": "snappy",
+ "description": "Snappy compressor/decompressor support",
+ "min_version": "2.0.0.0",
+ "max_version": "2.2.0.0"
+ },
+ {
+ "name": "lzo",
+ "description": "LZO libraries support",
+ "min_version": "2.2.1.0"
+ },
+ {
+ "name": "express_upgrade",
+ "description": "Express upgrade support",
+ "min_version": "2.1.0.0"
+ },
+ {
+ "name": "rolling_upgrade",
+ "description": "Rolling upgrade support",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "kafka_acl_migration_support",
+ "description": "ACL migration support",
+ "min_version": "2.3.4.0"
+ },
+ {
+ "name": "secure_zookeeper",
+ "description": "Protect ZNodes with SASL acl in secure clusters",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "config_versioning",
+ "description": "Configurable versions support",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "datanode_non_root",
+ "description": "DataNode running as non-root support (AMBARI-7615)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "remove_ranger_hdfs_plugin_env",
+ "description": "HDFS removes Ranger env files (AMBARI-14299)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "ranger",
+ "description": "Ranger Service support",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "ranger_tagsync_component",
+ "description": "Ranger Tagsync component support (AMBARI-14383)",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "phoenix",
+ "description": "Phoenix Service support",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "nfs",
+ "description": "NFS support",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "tez_for_spark",
+ "description": "Tez dependency for Spark",
+ "min_version": "2.2.0.0",
+ "max_version": "2.3.0.0"
+ },
+ {
+ "name": "timeline_state_store",
+ "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "copy_tarball_to_hdfs",
+ "description": "Copy tarball to HDFS support (AMBARI-12113)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "spark_16plus",
+ "description": "Spark 1.6+",
+ "min_version": "2.4.0.0"
+ },
+ {
+ "name": "spark_thriftserver",
+ "description": "Spark Thrift Server",
+ "min_version": "2.3.2.0"
+ },
+ {
+ "name": "storm_kerberos",
+ "description": "Storm Kerberos support (AMBARI-7570)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "storm_ams",
+ "description": "Storm AMS integration (AMBARI-10710)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "create_kafka_broker_id",
+ "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
+ "min_version": "2.2.0.0",
+ "max_version": "2.3.0.0"
+ },
+ {
+ "name": "kafka_listeners",
+ "description": "Kafka listeners (AMBARI-10984)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "kafka_kerberos",
+ "description": "Kafka Kerberos support (AMBARI-10984)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "pig_on_tez",
+ "description": "Pig on Tez support (AMBARI-7863)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "ranger_usersync_non_root",
+ "description": "Ranger Usersync as non-root user (AMBARI-10416)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "ranger_audit_db_support",
+ "description": "Ranger Audit to DB support",
+ "min_version": "2.2.0.0",
+ "max_version": "2.5.0.0"
+ },
+ {
+ "name": "accumulo_kerberos_user_auth",
+ "description": "Accumulo Kerberos User Auth (AMBARI-10163)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "knox_versioned_data_dir",
+ "description": "Use versioned data dir for Knox (AMBARI-13164)",
+ "min_version": "2.3.2.0"
+ },
+ {
+ "name": "knox_sso_topology",
+ "description": "Knox SSO Topology support (AMBARI-13975)",
+ "min_version": "2.3.8.0"
+ },
+ {
+ "name": "atlas_rolling_upgrade",
+ "description": "Rolling upgrade support for Atlas",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "oozie_admin_user",
+ "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "oozie_create_hive_tez_configs",
+ "description": "Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "oozie_setup_shared_lib",
+ "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "oozie_host_kerberos",
+ "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
+ "min_version": "2.0.0.0"
+ },
+ {
+ "name": "falcon_extensions",
+ "description": "Falcon Extension",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "hive_metastore_upgrade_schema",
+ "description": "Hive metastore upgrade schema support (AMBARI-11176)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "hive_server_interactive",
+ "description": "Hive server interactive support (AMBARI-15573)",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "hive_webhcat_specific_configs",
+ "description": "Hive webhcat specific configurations support (AMBARI-12364)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "hive_purge_table",
+ "description": "Hive purge table support (AMBARI-12260)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "hive_server2_kerberized_env",
+ "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
+ "min_version": "2.2.3.0",
+ "max_version": "2.2.5.0"
+ },
+ {
+ "name": "hive_env_heapsize",
+ "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
+ "min_version": "2.2.0.0"
+ },
+ {
+ "name": "ranger_kms_hsm_support",
+ "description": "Ranger KMS HSM support (AMBARI-15752)",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_log4j_support",
+ "description": "Ranger supporting log-4j properties (AMBARI-15681)",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_kerberos_support",
+ "description": "Ranger Kerberos support",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "hive_metastore_site_support",
+ "description": "Hive Metastore site support",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_usersync_password_jceks",
+ "description": "Saving Ranger Usersync credentials in jceks",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_install_infra_client",
+ "description": "Ambari Infra Service support",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "falcon_atlas_support_2_3",
+ "description": "Falcon Atlas integration support for 2.3 stack",
+ "min_version": "2.3.99.0",
+ "max_version": "2.4.0.0"
+ },
+ {
+ "name": "falcon_atlas_support",
+ "description": "Falcon Atlas integration",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "hbase_home_directory",
+ "description": "Hbase home directory in HDFS needed for HBASE backup",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "spark_livy",
+ "description": "Livy as slave component of spark",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "atlas_ranger_plugin_support",
+ "description": "Atlas Ranger plugin support",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "atlas_conf_dir_in_path",
+ "description": "Prepend the Atlas conf dir (/etc/atlas/conf) to the classpath of Storm and Falcon",
+ "min_version": "2.3.0.0",
+ "max_version": "2.4.99.99"
+ },
+ {
+ "name": "atlas_upgrade_support",
+ "description": "Atlas supports express and rolling upgrades",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "atlas_hook_support",
+ "description": "Atlas support for hooks in Hive, Storm, Falcon, and Sqoop",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_pid_support",
+ "description": "Ranger Service support pid generation AMBARI-16756",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_kms_pid_support",
+ "description": "Ranger KMS Service support pid generation",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_admin_password_change",
+ "description": "Allow ranger admin credentials to be specified during cluster creation (AMBARI-17000)",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "storm_metrics_apache_classes",
+ "description": "Metrics sink for Storm that uses Apache class names",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "spark_java_opts_support",
+ "description": "Allow Spark to generate java-opts file",
+ "min_version": "2.2.0.0",
+ "max_version": "2.4.0.0"
+ },
+ {
+ "name": "atlas_hbase_setup",
+ "description": "Use script to create Atlas tables in Hbase and set permissions for Atlas user.",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "ranger_hive_plugin_jdbc_url",
+ "description": "Handle Ranger hive repo config jdbc url change for stack 2.5 (AMBARI-18386)",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "zkfc_version_advertised",
+ "description": "ZKFC advertise version",
+ "min_version": "2.5.0.0"
+ },
+ {
+ "name": "phoenix_core_hdfs_site_required",
+ "description": "HDFS and CORE site required for Phoenix",
+ "max_version": "2.5.9.9"
+ },
+ {
+ "name": "ranger_tagsync_ssl_xml_support",
+ "description": "Ranger Tagsync ssl xml support.",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "ranger_xml_configuration",
+ "description": "Ranger code base support xml configurations",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "kafka_ranger_plugin_support",
+ "description": "Ambari stack changes for Ranger Kafka Plugin (AMBARI-11299)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "yarn_ranger_plugin_support",
+ "description": "Implement Stack changes for Ranger Yarn Plugin integration (AMBARI-10866)",
+ "min_version": "2.3.0.0"
+ },
+ {
+ "name": "ranger_solr_config_support",
+ "description": "Showing Ranger solrconfig.xml on UI",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "core_site_for_ranger_plugins",
+ "description": "Adding core-site.xml in when Ranger plugin is enabled for Storm, Kafka, and Knox.",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "secure_ranger_ssl_password",
+ "description": "Securing Ranger Admin and Usersync SSL and Trustore related passwords in jceks",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "ranger_kms_ssl",
+ "description": "Ranger KMS SSL properties in ambari stack",
+ "min_version": "2.6.0.0"
+ },
+ {
+ "name": "atlas_hdfs_site_on_namenode_ha",
+ "description": "Need to create hdfs-site under atlas-conf dir when Namenode-HA is enabled.",
+ "min_version": "2.6.0.0"
+ }
+ ]
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/4e1da58a/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_tools.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_tools.json b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_tools.json
index d1aab4b..e1a65c2 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_tools.json
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_tools.json
@@ -1,4 +1,14 @@
{
- "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"],
- "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"]
+ "HDP": {
+ "stack_selector": [
+ "hdp-select",
+ "/usr/bin/hdp-select",
+ "hdp-select"
+ ],
+ "conf_selector": [
+ "conf-select",
+ "/usr/bin/conf-select",
+ "conf-select"
+ ]
+ }
}
\ No newline at end of file
[14/21] ambari git commit: AMBARI-21488 Default Base URL should be
there for OS_TYPE=redhat-ppc6 in IBM Power and change the OS _TYPE to
redhat7-ppc64. (atkach)
Posted by rl...@apache.org.
AMBARI-21488 Default Base URL should be there for OS_TYPE=redhat-ppc6 in IBM Power and change the OS _TYPE to redhat7-ppc64. (atkach)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ba2a29fd
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ba2a29fd
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ba2a29fd
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: ba2a29fd1da5aa264e7873e306c9320e0d3b2a45
Parents: cc412e6
Author: Andrii Tkach <at...@apache.org>
Authored: Mon Jul 17 15:20:36 2017 +0300
Committer: Andrii Tkach <at...@apache.org>
Committed: Mon Jul 17 16:05:39 2017 +0300
----------------------------------------------------------------------
.../scripts/controllers/stackVersions/StackVersionsCreateCtrl.js | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/ba2a29fd/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
index 70f6658..9d17075 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
@@ -195,8 +195,7 @@ angular.module('ambariAdminConsole')
if (!existingOSHash[stackOs.OperatingSystems.os_type]) {
stackOs.selected = false;
stackOs.repositories.forEach(function(repo) {
- repo.Repositories.base_url = '';
- repo.Repositories.initial_base_url = '';
+ repo.Repositories.initial_base_url = repo.Repositories.default_base_url;
});
$scope.osList.push(stackOs);
}
[12/21] ambari git commit: AMBARI-21345 Add host doesn't fully add a
node when include/exclude files are used (dsen)
Posted by rl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json
index 2ea07e4..f50a207 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json
@@ -33,7 +33,7 @@
"script": "scripts/service_check.py",
"excluded_hosts": "host1,host2",
"mark_draining_only" : "false",
- "update_exclude_file_only" : "true",
+ "update_files_only" : "true",
"xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
"env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
"properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/default_with_bucket.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_with_bucket.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_with_bucket.json
index 5080d30..c1eb868 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_with_bucket.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_with_bucket.json
@@ -38,7 +38,7 @@
"script": "scripts/service_check.py",
"excluded_hosts": "host1,host2",
"mark_draining_only" : "false",
- "update_exclude_file_only" : "false",
+ "update_files_only" : "false",
"xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
"env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
"properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json b/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json
index 6ec9ec9..c99d10b 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json
@@ -32,7 +32,7 @@
"script": "scripts/service_check.py",
"excluded_hosts": "host1,host2",
"mark_draining_only" : "false",
- "update_exclude_file_only" : "false",
+ "update_files_only" : "false",
"xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
"env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
"output_file":"HDFS_CLIENT-configs.tar.gz"
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/flume_only.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/flume_only.json b/ambari-server/src/test/python/stacks/2.0.6/configs/flume_only.json
index 1550715..1a4d676 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/flume_only.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/flume_only.json
@@ -31,7 +31,7 @@
"script": "scripts/service_check.py",
"excluded_hosts": "host1,host2",
"mark_draining_only" : "false",
- "update_exclude_file_only" : "false",
+ "update_files_only" : "false",
"xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
"env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
"properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json
index f572413..52a1fde 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json
@@ -35,7 +35,7 @@
"script": "scripts/service_check.py",
"excluded_hosts": "host1,host2",
"mark_draining_only" : "false",
- "update_exclude_file_only" : "false",
+ "update_files_only" : "false",
"xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
"env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
"properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json
index 9979e9d..7283bf5 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json
@@ -47,7 +47,7 @@
"script": "scripts/service_check.py",
"excluded_hosts": "host1,host2",
"mark_draining_only" : "false",
- "update_exclude_file_only" : "false",
+ "update_files_only" : "false",
"xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
"env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
"properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json b/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json
index 8d12b98..4ffa29f 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json
@@ -34,7 +34,7 @@
"script": "scripts/service_check.py",
"excluded_hosts": "host1,host2",
"mark_draining_only" : "false",
- "update_exclude_file_only" : "false",
+ "update_files_only" : "false",
"xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
"env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
"properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/repository_file.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/repository_file.json b/ambari-server/src/test/python/stacks/2.0.6/configs/repository_file.json
index 1554f1b..7efb7d9 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/repository_file.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/repository_file.json
@@ -77,7 +77,7 @@
"script": "scripts/service_check.py",
"excluded_hosts": "host1,host2",
"mark_draining_only" : "false",
- "update_exclude_file_only" : "false",
+ "update_files_only" : "false",
"xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
"env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
"properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
index 5327865..76a110e 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
@@ -38,7 +38,7 @@
"script": "scripts/yarn_client.py",
"excluded_hosts": "host1",
"mark_draining_only" : "false",
- "update_exclude_file_only" : "false"
+ "update_files_only" : "false"
},
"taskId": 186,
"public_hostname": "c6401.ambari.apache.org",
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.3/configs/ats_1_5.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/configs/ats_1_5.json b/ambari-server/src/test/python/stacks/2.3/configs/ats_1_5.json
index b4342ad..475a6f9 100644
--- a/ambari-server/src/test/python/stacks/2.3/configs/ats_1_5.json
+++ b/ambari-server/src/test/python/stacks/2.3/configs/ats_1_5.json
@@ -31,7 +31,7 @@
"script": "scripts/service_check.py",
"excluded_hosts": "host1,host2",
"mark_draining_only" : "false",
- "update_exclude_file_only" : "false",
+ "update_files_only" : "false",
"xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
"env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
"properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.5/configs/hsi_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/hsi_default.json b/ambari-server/src/test/python/stacks/2.5/configs/hsi_default.json
index 9dcb451..7622212 100644
--- a/ambari-server/src/test/python/stacks/2.5/configs/hsi_default.json
+++ b/ambari-server/src/test/python/stacks/2.5/configs/hsi_default.json
@@ -38,7 +38,7 @@
"script": "scripts/service_check.py",
"excluded_hosts": "host1,host2",
"mark_draining_only" : "false",
- "update_exclude_file_only" : "false",
+ "update_files_only" : "false",
"xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
"env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
"properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.5/configs/hsi_default_for_restart.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/hsi_default_for_restart.json b/ambari-server/src/test/python/stacks/2.5/configs/hsi_default_for_restart.json
index f6de1c4..c2320ba 100644
--- a/ambari-server/src/test/python/stacks/2.5/configs/hsi_default_for_restart.json
+++ b/ambari-server/src/test/python/stacks/2.5/configs/hsi_default_for_restart.json
@@ -39,7 +39,7 @@
"script": "scripts/service_check.py",
"excluded_hosts": "host1,host2",
"mark_draining_only" : "false",
- "update_exclude_file_only" : "false",
+ "update_files_only" : "false",
"xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
"env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
"properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/ambari-server/src/test/python/stacks/2.5/configs/hsi_ha.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/hsi_ha.json b/ambari-server/src/test/python/stacks/2.5/configs/hsi_ha.json
index 3fd9f72..7b79d84 100644
--- a/ambari-server/src/test/python/stacks/2.5/configs/hsi_ha.json
+++ b/ambari-server/src/test/python/stacks/2.5/configs/hsi_ha.json
@@ -38,7 +38,7 @@
"script": "scripts/service_check.py",
"excluded_hosts": "host1,host2",
"mark_draining_only" : "false",
- "update_exclude_file_only" : "false",
+ "update_files_only" : "false",
"xml_configs_list":[{"hdfs-site.xml":"hdfs-site"}],
"env_configs_list":[{"hadoop-env.sh":"hadoop-env"},{"log4j.properties":"hdfs-log4j,yarn-log4j"}],
"properties_configs_list":[{"runtime.properties":"falcon-runtime.properties"},{"startup.properties":"falcon-startup.properties"}],
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/configuration/yarn-site.xml b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/configuration/yarn-site.xml
index 59ff82b..f3ea462 100755
--- a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/configuration/yarn-site.xml
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/configuration/yarn-site.xml
@@ -388,6 +388,12 @@
<on-ambari-upgrade add="true"/>
</property>
<property>
+ <name>manage.include.files</name>
+ <value>false</value>
+ <description>If true Ambari will manage include file if yarn.resourcemanager.nodes.include-path is configured.</description>
+ <on-ambari-upgrade add="false"/>
+ </property>
+ <property>
<name>yarn.http.policy</name>
<value>HTTP_ONLY</value>
<description>
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_linux.py b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_linux.py
index 4d42861..da54b7c 100755
--- a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_linux.py
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_linux.py
@@ -233,6 +233,13 @@ has_ats = not len(ats_host) == 0
nm_hosts = default("/clusterHostInfo/nm_hosts", [])
+#incude file
+include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.include-path", None)
+include_hosts = None
+manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+ include_hosts = list(set(nm_hosts) - set(exclude_hosts))
+
# don't using len(nm_hosts) here, because check can take too much time on large clusters
number_of_nm = 1
@@ -315,7 +322,7 @@ HdfsResource = functools.partial(
immutable_paths = get_not_managed_resources(),
dfs_type = dfs_type
)
-update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
+update_files_only = default("/commandParams/update_files_only",False)
mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_windows.py b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_windows.py
index 0f8ce73..cac93ee 100755
--- a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_windows.py
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/params_windows.py
@@ -56,4 +56,12 @@ hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
-update_exclude_file_only = config['commandParams']['update_exclude_file_only']
+update_files_only = default("/commandParams/update_files_only", False)
+
+nm_hosts = default("/clusterHostInfo/nm_hosts", [])
+#incude file
+include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.include-path", None)
+include_hosts = None
+manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+ include_hosts = list(set(nm_hosts) - set(exclude_hosts))
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/resourcemanager.py b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/resourcemanager.py
index 6a7eea7..71c7bc1 100755
--- a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/resourcemanager.py
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/resourcemanager.py
@@ -91,7 +91,14 @@ class ResourcemanagerWindows(Resourcemanager):
mode="f"
)
- if params.update_exclude_file_only == False:
+ if params.include_hosts:
+ File(params.include_file_path,
+ content=Template("include_hosts_list.j2"),
+ owner=yarn_user,
+ mode="f"
+ )
+
+ if params.update_files_only == False:
Execute(yarn_refresh_cmd, user=yarn_user)
@@ -219,7 +226,14 @@ class ResourcemanagerDefault(Resourcemanager):
group=user_group
)
- if params.update_exclude_file_only == False:
+ if params.include_hosts:
+ File(params.include_file_path,
+ content=Template("include_hosts_list.j2"),
+ owner=yarn_user,
+ group=user_group
+ )
+
+ if params.update_files_only == False:
Execute(yarn_refresh_cmd,
environment= {'PATH' : params.execute_path },
user=yarn_user)
http://git-wip-us.apache.org/repos/asf/ambari/blob/cc412e66/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/templates/include_hosts_list.j2 b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/templates/include_hosts_list.j2
new file mode 100644
index 0000000..42e33c0
--- /dev/null
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in include_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file
[07/21] ambari git commit: AMBARI-21482. Blueprints: HSI config
'num_llap_nodes' and 'num_llap_nodes_for_llap_daemons' should be calculated
and recommended via Stack Advisor during Blueprint install only if
'num_llap_nodes' config value is not provided i
Posted by rl...@apache.org.
AMBARI-21482. Blueprints: HSI config 'num_llap_nodes' and 'num_llap_nodes_for_llap_daemons' should be calculated and recommended via Stack Advisor during Blueprint install only if 'num_llap_nodes' config value is not provided in Blueprint.
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e799f522
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e799f522
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e799f522
Branch: refs/heads/branch-feature-AMBARI-20859
Commit: e799f52268db9330a84b1e982b3b88e591b04649
Parents: c7f4228
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Fri Jul 14 18:15:52 2017 -0700
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Fri Jul 14 22:27:56 2017 -0700
----------------------------------------------------------------------
.../main/resources/stacks/HDP/2.5/services/stack_advisor.py | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/e799f522/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
index 4ca74ee..1c19d8b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
@@ -1013,8 +1013,10 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
# Set 'num_llap_nodes_requested' for 1st invocation, as it gets passed as 1 otherwise, read from config.
# Check if its : 1. 1st invocation from UI ('enable_hive_interactive' in changed-configurations)
- # OR 2. 1st invocation from BP (services['changed-configurations'] should be empty in this case)
- if (changed_configs_has_enable_hive_int or 0 == len(services['changed-configurations'])) \
+ # OR 2. 1st invocation from BP (services['changed-configurations'] should be empty in this case and 'num_llap_nodes' not defined)
+ if (changed_configs_has_enable_hive_int
+ or (0 == len(services['changed-configurations'])
+ and not services['configurations']['hive-interactive-env']['properties']['num_llap_nodes'])) \
and services['configurations']['hive-interactive-env']['properties']['enable_hive_interactive']:
num_llap_nodes_requested = min_nodes_required
else: