You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by nc...@apache.org on 2017/02/24 14:19:27 UTC
[20/50] ambari git commit: AMBARI-20062. StackAdvisor reports error
for missing YARN, MR,
Hive queues while adding services post upgrade (dgrinenko via dlysnichenko)
AMBARI-20062. StackAdvisor reports error for missing YARN,MR,Hive queues while adding services post upgrade (dgrinenko via dlysnichenko)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b789b7c1
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b789b7c1
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b789b7c1
Branch: refs/heads/branch-feature-AMBARI-12556
Commit: b789b7c102c164b3fab3275cebf33148a5755ee0
Parents: 309dbd7
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Wed Feb 22 13:52:17 2017 +0200
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Wed Feb 22 13:52:17 2017 +0200
----------------------------------------------------------------------
.../stacks/HDP/2.3/upgrades/config-upgrade.xml | 29 ++++++++++++++
.../HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml | 30 +++++++++++++++
.../stacks/HDP/2.3/upgrades/upgrade-2.6.xml | 8 ++++
.../stacks/HDP/2.4/upgrades/config-upgrade.xml | 29 ++++++++++++++
.../HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml | 32 ++++++++++++++++
.../stacks/HDP/2.4/upgrades/upgrade-2.6.xml | 11 ++++++
.../stacks/HDP/2.5/upgrades/config-upgrade.xml | 40 ++++++++++++++++++++
.../HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml | 32 +++++++++++++++-
.../stacks/HDP/2.5/upgrades/upgrade-2.6.xml | 8 ++++
.../src/main/resources/stacks/stack_advisor.py | 2 +-
.../stacks/2.2/common/test_stack_advisor.py | 20 +++-------
.../stacks/2.5/common/test_stack_advisor.py | 5 +--
12 files changed, 226 insertions(+), 20 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/b789b7c1/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
index cdd701a..8589e2d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
@@ -96,6 +96,10 @@
<type>tez-site</type>
<set key="tez.lib.uris" value="/hdp/apps/${hdp.version}/tez/tez.tar.gz"/>
</definition>
+ <definition xsi:type="configure" id="hdp_2_5_0_0_tez_queue_name">
+ <type>tez-site</type>
+ <set key="tez.queue.name" value="default" if-type="tez-site" if-key="tez.queue.name" if-key-state="absent"/>
+ </definition>
</changes>
</component>
</service>
@@ -230,6 +234,12 @@
<replace key="content" find="log4j.appender.standard = org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.standard = org.apache.log4j.DailyRollingFileAppender
log4j.appender.standard.MaxFileSize = {{webhcat_log_maxfilesize}}MB"/>
<replace key="content" find="log4j.appender.standard = org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.standard = org.apache.log4j.DailyRollingFileAppender
log4j.appender.standard.MaxBackupIndex = {{webhcat_log_maxbackupindex}}"/>
</definition>
+
+ <definition xsi:type="configure" id="hdp_2_6_0_0_templeton_hadoop_queue_name">
+ <type>webhcat-site</type>
+ <set key="templeton.hadoop.queue.name" value="default" if-type="webhcat-site" if-key="templeton.hadoop.queue.name" if-key-state="absent"/>
+ </definition>
+
</changes>
</component>
</service>
@@ -423,6 +433,10 @@
<type>spark-javaopts-properties</type>
<transfer operation="delete" delete-key="content" />
</definition>
+ <definition xsi:type="configure" id="hdp_2_5_0_0_spark_yarn_queue">
+ <type>spark-defaults</type>
+ <set key="spark.yarn.queue" value="default" if-type="spark-defaults" if-key="spark.yarn.queue" if-key-state="absent"/>
+ </definition>
</changes>
</component>
<component name="SPARK_THRIFTSERVER">
@@ -530,6 +544,21 @@
to-key="yarn.scheduler.capacity.ordering-policy.priority-utilization.underutilized-preemption.enabled"
default-value="false"/>
</definition>
+ <definition xsi:type="configure" id="hdp_2_6_0_0_service_check_queue_name">
+ <type>yarn-env</type>
+ <set key="service_check.queue.name" value="default" if-type="yarn-env" if-key="service_check.queue.name" if-key-state="absent"/>
+ </definition>
+ </changes>
+ </component>
+ </service>
+
+ <service name="MAPREDUCE2">
+ <component name="MAPREDUCE2_CLIENT">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_6_0_0_mapreduce_job_queuename">
+ <type>mapred-site</type>
+ <set key="mapreduce.job.queuename" value="default" if-type="mapred-site" if-key="mapreduce.job.queuename" if-key-state="absent"/>
+ </definition>
</changes>
</component>
</service>
http://git-wip-us.apache.org/repos/asf/ambari/blob/b789b7c1/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
index ff42022..d675986 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
@@ -326,12 +326,24 @@
</task>
</execute-stage>
+ <execute-stage service="YARN" component="RESOURCEMANAGER" title="Apply config changes for Resource Manager">
+ <task xsi:type="configure" id="hdp_2_6_0_0_service_check_queue_name">
+ <summary>Adding service check customization property</summary>
+ </task>
+ </execute-stage>
+
<execute-stage service="MAPREDUCE2" component="MAPREDUCE2_CLIENT" title="Apply config changes for Mapreduce2 client">
<task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FixLzoCodecPath">
<summary>Verifying LZO codec path for mapreduce</summary>
</task>
</execute-stage>
+ <execute-stage service="MAPREDUCE2" component="MAPREDUCE2_CLIENT" title="Apply config changes for Mapreduce2 client">
+ <task xsi:type="configure" id="hdp_2_6_0_0_mapreduce_job_queuename">
+ <summary>Adding queue customization property</summary>
+ </task>
+ </execute-stage>
+
<!-- HBASE -->
<!-- These HBASE configs changed in HDP 2.3.4.0, but Ambari can't distinguish HDP 2.3.2.0 vs HDP 2.3.4.0, so easier to always do them. -->
<execute-stage service="HBASE" component="HBASE_MASTER" title="Apply config changes for HBASE Master">
@@ -366,6 +378,12 @@
</task>
</execute-stage>
+ <execute-stage service="TEZ" component="TEZ_CLIENT" title="Apply config changes for Tez">
+ <task xsi:type="configure" id="hdp_2_5_0_0_tez_queue_name">
+ <summary>Add queue customization property</summary>
+ </task>
+ </execute-stage>
+
<!-- SQOOP -->
<execute-stage service="SQOOP" component="SQOOP" title="Apply config changes for Sqoop to remove Atlas Configs">
<!-- Remove Atlas configs that were incorrectly added to sqoop-site instead of Atlas' application.properties. -->
@@ -388,6 +406,12 @@
<task xsi:type="configure" id="hdp_2_5_0_0_webhcat_server_update_configuration_paths"/>
</execute-stage>
+ <execute-stage service="HIVE" component="WEBHCAT_SERVER" title="Apply config changes for WebHCat Server">
+ <task xsi:type="configure" id="hdp_2_6_0_0_templeton_hadoop_queue_name">
+ <summary>Adding queue customization setting</summary>
+ </task>
+ </execute-stage>
+
<execute-stage service="HIVE" component="WEBHCAT_SERVER" title="Parameterizing WebHCat Log4J Properties">
<task xsi:type="configure" id="webhcat_log4j_parameterize">
<summary>Updating the Webhcat Log4J properties to include parameterizations</summary>
@@ -484,6 +508,12 @@
<task xsi:type="configure" id="hdp_2_4_0_0_spark_java_opts"/>
</execute-stage>
+ <execute-stage service="SPARK" component="SPARK_CLIENT" title="Apply config changes for Spark">
+ <task xsi:type="configure" id="hdp_2_5_0_0_spark_yarn_queue">
+ <summary>Add queue customization property</summary>
+ </task>
+ </execute-stage>
+
<!-- RANGER -->
<execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger Admin">
<task xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_flag"/>
http://git-wip-us.apache.org/repos/asf/ambari/blob/b789b7c1/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
index b255bc0..9917ee1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.6.xml
@@ -737,6 +737,7 @@
<task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FixLzoCodecPath">
<summary>Verifying LZO codec path for mapreduce</summary>
</task>
+ <task xsi:type="configure" id="hdp_2_6_0_0_mapreduce_job_queuename"/>
</pre-upgrade>
<pre-downgrade copy-upgrade="true" />
@@ -765,6 +766,7 @@
<task xsi:type="configure" id="yarn_log4j_parameterize" />
<task xsi:type="configure" id="yarn_env_security_opts" />
<task xsi:type="configure" id="hdp_2_6_0_0_yarn_priority_utilization_underutilized_preemption" />
+ <task xsi:type="configure" id="hdp_2_6_0_0_service_check_queue_name"/>
</pre-upgrade>
<pre-downgrade /> <!-- no-op to prevent config changes on downgrade -->
@@ -829,6 +831,10 @@
<service name="TEZ">
<component name="TEZ_CLIENT">
+ <pre-upgrade>
+ <task xsi:type="configure" id="hdp_2_5_0_0_tez_queue_name"/>
+ </pre-upgrade>
+ <pre-downgrade/>
<upgrade>
<task xsi:type="restart-task" />
</upgrade>
@@ -908,6 +914,7 @@
<pre-upgrade>
<task xsi:type="configure" id="hdp_2_5_0_0_webhcat_server_update_configuration_paths"/>
<task xsi:type="configure" id="webhcat_log4j_parameterize" />
+ <task xsi:type="configure" id="hdp_2_6_0_0_templeton_hadoop_queue_name"/>
</pre-upgrade>
<pre-downgrade />
@@ -967,6 +974,7 @@
<pre-upgrade>
<task xsi:type="configure" id="hdp_2_4_0_0_remove_spark_properties_extraJavaOptions"/>
<task xsi:type="configure" id="hdp_2_4_0_0_spark_java_opts"/>
+ <task xsi:type="configure" id="hdp_2_5_0_0_spark_yarn_queue"/>
</pre-upgrade>
<pre-downgrade /> <!-- no-op to prevent config changes on downgrade -->
http://git-wip-us.apache.org/repos/asf/ambari/blob/b789b7c1/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
index 1bea263..14feab6 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
@@ -31,6 +31,11 @@
<set key="tez.lib.uris" value="/hdp/apps/${hdp.version}/tez/tez.tar.gz"/>
</definition>
+ <definition xsi:type="configure" id="hdp_2_5_0_0_tez_queue_name">
+ <type>tez-site</type>
+ <set key="tez.queue.name" value="default" if-type="tez-site" if-key="tez.queue.name" if-key-state="absent"/>
+ </definition>
+
</changes>
</component>
</service>
@@ -121,6 +126,10 @@
<replace key="content" find="log4j.appender.standard = org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.standard = org.apache.log4j.DailyRollingFileAppender
log4j.appender.standard.MaxFileSize = {{webhcat_log_maxfilesize}}MB"/>
<replace key="content" find="log4j.appender.standard = org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.standard = org.apache.log4j.DailyRollingFileAppender
log4j.appender.standard.MaxBackupIndex = {{webhcat_log_maxbackupindex}}"/>
</definition>
+ <definition xsi:type="configure" id="hdp_2_6_0_0_templeton_hadoop_queue_name">
+ <type>webhcat-site</type>
+ <set key="templeton.hadoop.queue.name" value="default" if-type="webhcat-site" if-key="templeton.hadoop.queue.name" if-key-state="absent"/>
+ </definition>
</changes>
</component>
</service>
@@ -316,6 +325,10 @@
to-key="yarn.scheduler.capacity.ordering-policy.priority-utilization.underutilized-preemption.enabled"
default-value="false"/>
</definition>
+ <definition xsi:type="configure" id="hdp_2_6_0_0_service_check_queue_name">
+ <type>yarn-env</type>
+ <set key="service_check.queue.name" value="default" if-type="yarn-env" if-key="service_check.queue.name" if-key-state="absent"/>
+ </definition>
</changes>
</component>
@@ -330,6 +343,17 @@
</component>
</service>
+ <service name="MAPREDUCE2">
+ <component name="MAPREDUCE2_CLIENT">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_6_0_0_mapreduce_job_queuename">
+ <type>mapred-site</type>
+ <set key="mapreduce.job.queuename" value="default" if-type="mapred-site" if-key="mapreduce.job.queuename" if-key-state="absent"/>
+ </definition>
+ </changes>
+ </component>
+ </service>
+
<service name="KAFKA">
<component name="KAFKA_BROKER">
<changes>
@@ -564,6 +588,11 @@
<type>spark-defaults</type>
<transfer operation="delete" delete-key="spark.yarn.max.executor.failures" />
</definition>
+
+ <definition xsi:type="configure" id="hdp_2_5_0_0_spark_yarn_queue">
+ <type>spark-defaults</type>
+ <set key="spark.yarn.queue" value="default" if-type="spark-defaults" if-key="spark.yarn.queue" if-key-state="absent"/>
+ </definition>
</changes>
</component>
</service>
http://git-wip-us.apache.org/repos/asf/ambari/blob/b789b7c1/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
index 86cd56a..e856288 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
@@ -301,6 +301,18 @@
</task>
</execute-stage>
+ <execute-stage service="YARN" component="RESOURCEMANAGER" title="Apply config changes for Resource Manager">
+ <task xsi:type="configure" id="hdp_2_6_0_0_service_check_queue_name">
+ <summary>Adding service check customization property</summary>
+ </task>
+ </execute-stage>
+
+ <execute-stage service="MAPREDUCE2" component="MAPREDUCE2_CLIENT" title="Apply config changes for Mapreduce2 client">
+ <task xsi:type="configure" id="hdp_2_6_0_0_mapreduce_job_queuename">
+ <summary>Adding queue customization property</summary>
+ </task>
+ </execute-stage>
+
<!--TEZ-->
<execute-stage service="TEZ" component="TEZ_CLIENT" title="Verify LZO codec path for Tez">
<task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FixLzoCodecPath">
@@ -312,6 +324,13 @@
<task xsi:type="configure" id="hdp_2_5_0_0_tez_client_adjust_tez_lib_uris_property"/>
</execute-stage>
+
+ <execute-stage service="TEZ" component="TEZ_CLIENT" title="Apply config changes for Tez">
+ <task xsi:type="configure" id="hdp_2_5_0_0_tez_queue_name">
+ <summary>Add queue customization property</summary>
+ </task>
+ </execute-stage>
+
<!--OOZIE-->
<execute-stage service="OOZIE" component="OOZIE_SERVER" title="Apply config changes for Oozie Server">
<task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.OozieConfigCalculation">
@@ -422,6 +441,13 @@
</task>
</execute-stage>
+ <!--SPARK-->
+ <execute-stage service="SPARK" component="SPARK_CLIENT" title="Apply config changes for Spark">
+ <task xsi:type="configure" id="hdp_2_5_0_0_spark_yarn_queue">
+ <summary>Add queue customization property</summary>
+ </task>
+ </execute-stage>
+
<!-- SQOOP -->
<execute-stage service="SQOOP" component="SQOOP" title="Apply config changes for Sqoop to remove Atlas Configs">
<!-- Remove Atlas configs that were incorrectly added to sqoop-site instead of Atlas' application.properties. -->
@@ -473,6 +499,12 @@
</task>
</execute-stage>
+ <execute-stage service="HIVE" component="WEBHCAT_SERVER" title="Apply config changes for WebHCat Server">
+ <task xsi:type="configure" id="hdp_2_6_0_0_templeton_hadoop_queue_name">
+ <summary>Adding queue customization setting</summary>
+ </task>
+ </execute-stage>
+
<!-- HBASE -->
<execute-stage service="HBASE" component="HBASE_MASTER" title="Apply config changes for Hbase Master">
<task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hbase_audit_db"/>
http://git-wip-us.apache.org/repos/asf/ambari/blob/b789b7c1/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
index 6a8e9d7..f736796 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
@@ -742,6 +742,7 @@
<task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FixLzoCodecPath">
<summary>Verifying LZO codec path for mapreduce</summary>
</task>
+ <task xsi:type="configure" id="hdp_2_6_0_0_mapreduce_job_queuename"/>
</pre-upgrade>
<pre-downgrade copy-upgrade="true" />
@@ -770,6 +771,7 @@
<task xsi:type="configure" id="yarn_log4j_parameterize" />
<task xsi:type="configure" id="yarn_env_security_opts" />
<task xsi:type="configure" id="hdp_2_6_0_0_yarn_priority_utilization_underutilized_preemption" />
+ <task xsi:type="configure" id="hdp_2_6_0_0_service_check_queue_name"/>
</pre-upgrade>
<pre-downgrade /> <!-- no-op to prevent config changes on downgrade -->
@@ -832,6 +834,10 @@
<service name="TEZ">
<component name="TEZ_CLIENT">
+ <pre-upgrade>
+ <task xsi:type="configure" id="hdp_2_5_0_0_tez_queue_name"/>
+ </pre-upgrade>
+ <pre-downgrade/>
<upgrade>
<task xsi:type="restart-task" />
</upgrade>
@@ -906,6 +912,7 @@
<component name="WEBHCAT_SERVER">
<pre-upgrade>
<task xsi:type="configure" id="webhcat_log4j_parameterize" />
+ <task xsi:type="configure" id="hdp_2_6_0_0_templeton_hadoop_queue_name"/>
</pre-upgrade>
<pre-downgrade/>
<upgrade>
@@ -949,6 +956,10 @@
</upgrade>
</component>
<component name="SPARK_CLIENT">
+ <pre-upgrade>
+ <task xsi:type="configure" id="hdp_2_5_0_0_spark_yarn_queue"/>
+ </pre-upgrade>
+ <pre-downgrade/>
<upgrade>
<task xsi:type="restart-task" />
</upgrade>
http://git-wip-us.apache.org/repos/asf/ambari/blob/b789b7c1/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
index ca9cf47..3069b34 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
@@ -84,7 +84,27 @@
</definition>
</changes>
</component>
+ <component name="SPARK_CLIENT">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_5_0_0_spark_yarn_queue">
+ <type>spark-defaults</type>
+ <set key="spark.yarn.queue" value="default" if-type="spark-defaults" if-key="spark.yarn.queue" if-key-state="absent"/>
+ </definition>
+ </changes>
+ </component>
+ </service>
+
+ <service name="TEZ">
+ <component name="TEZ_CLIENT">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_5_0_0_tez_queue_name">
+ <type>tez-site</type>
+ <set key="tez.queue.name" value="default" if-type="tez-site" if-key="tez.queue.name" if-key-state="absent"/>
+ </definition>
+ </changes>
+ </component>
</service>
+
<service name="ZOOKEEPER">
<component name="ZOOKEEPER_SERVER">
<changes>
@@ -151,9 +171,25 @@
<set key="yarn.nodemanager.log-aggregation.num-log-files-per-app"
value="336" />
</definition>
+ <definition xsi:type="configure" id="hdp_2_6_0_0_service_check_queue_name">
+ <type>yarn-env</type>
+ <set key="service_check.queue.name" value="default" if-type="yarn-env" if-key="service_check.queue.name" if-key-state="absent"/>
+ </definition>
</changes>
</component>
</service>
+
+ <service name="MAPREDUCE2">
+ <component name="MAPREDUCE2_CLIENT">
+ <changes>
+ <definition xsi:type="configure" id="hdp_2_6_0_0_mapreduce_job_queuename">
+ <type>mapred-site</type>
+ <set key="mapreduce.job.queuename" value="default" if-type="mapred-site" if-key="mapreduce.job.queuename" if-key-state="absent"/>
+ </definition>
+ </changes>
+ </component>
+ </service>
+
<service name="HDFS">
<component name="NAMENODE">
<changes>
@@ -409,6 +445,10 @@
<replace key="content" find="log4j.appender.standard = org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.standard = org.apache.log4j.DailyRollingFileAppender
log4j.appender.standard.MaxFileSize = {{webhcat_log_maxfilesize}}MB"/>
<replace key="content" find="log4j.appender.standard = org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.standard = org.apache.log4j.DailyRollingFileAppender
log4j.appender.standard.MaxBackupIndex = {{webhcat_log_maxbackupindex}}"/>
</definition>
+ <definition xsi:type="configure" id="hdp_2_6_0_0_templeton_hadoop_queue_name">
+ <type>webhcat-site</type>
+ <set key="templeton.hadoop.queue.name" value="default" if-type="webhcat-site" if-key="templeton.hadoop.queue.name" if-key-state="absent"/>
+ </definition>
</changes>
</component>
</service>
http://git-wip-us.apache.org/repos/asf/ambari/blob/b789b7c1/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
index 06cae78..9516b6a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
@@ -281,6 +281,12 @@
</task>
</execute-stage>
+ <execute-stage service="MAPREDUCE2" component="MAPREDUCE2_CLIENT" title="Apply config changes for Mapreduce2 client">
+ <task xsi:type="configure" id="hdp_2_6_0_0_mapreduce_job_queuename">
+ <summary>Adding queue customization property</summary>
+ </task>
+ </execute-stage>
+
<!--Yarn-->
<execute-stage service="YARN" component="RESOURCEMANAGER" title="Parameterizing Yarn Log4J Properties Resource Manager">
<task xsi:type="configure" id="yarn_log4j_parameterize">
@@ -302,6 +308,12 @@
</task>
</execute-stage>
+ <execute-stage service="YARN" component="RESOURCEMANAGER" title="Apply config changes for Resource Manager">
+ <task xsi:type="configure" id="hdp_2_6_0_0_service_check_queue_name">
+ <summary>Adding service check customization property</summary>
+ </task>
+ </execute-stage>
+
<!--Yarn-->
<execute-stage service="YARN" component="RESOURCEMANAGER" title="Apply config changes for YARN app log retaintion">
<task xsi:type="configure" id="yarn_site_retained_log_count">
@@ -316,6 +328,12 @@
</task>
</execute-stage>
+ <execute-stage service="TEZ" component="TEZ_CLIENT" title="Update Tez queue configuration">
+ <task xsi:type="configure" id="hdp_2_5_0_0_tez_queue_name">
+ <summary>Add queue customization property</summary>
+ </task>
+ </execute-stage>
+
<!--OOZIE-->
<execute-stage service="OOZIE" component="OOZIE_SERVER" title="Apply config changes for Oozie Server">
<task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.OozieConfigCalculation">
@@ -466,6 +484,12 @@
</task>
</execute-stage>
+ <execute-stage service="HIVE" component="WEBHCAT_SERVER" title="Apply config changes for WebHCat Server">
+ <task xsi:type="configure" id="hdp_2_6_0_0_templeton_hadoop_queue_name">
+ <summary>Adding queue customization setting</summary>
+ </task>
+ </execute-stage>
+
<execute-stage service="HIVE" component="HIVE_SERVER" title="Appending heap dump options for Hive">
<task xsi:type="configure" id="hdp_2_6_0_0_hive_append_heap_dump_options"/>
</execute-stage>
@@ -507,7 +531,13 @@
<!-- SPARK -->
<execute-stage service="SPARK" component="LIVY_SERVER" title="Apply config changes for Livy Server">
<task xsi:type="configure" id="hdp_2_5_0_0_rename_spark_livy_configs"/>
- </execute-stage>
+ </execute-stage>
+
+ <execute-stage service="SPARK" component="SPARK_CLIENT" title="Apply config changes for Spark">
+ <task xsi:type="configure" id="hdp_2_5_0_0_spark_yarn_queue">
+ <summary>Add queue customization property</summary>
+ </task>
+ </execute-stage>
</group>
<!--
http://git-wip-us.apache.org/repos/asf/ambari/blob/b789b7c1/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
index 64edbb8..04d603b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
@@ -650,6 +650,7 @@
<task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FixLzoCodecPath">
<summary>Verifying LZO codec path for mapreduce</summary>
</task>
+ <task xsi:type="configure" id="hdp_2_6_0_0_mapreduce_job_queuename"/>
</pre-upgrade>
<pre-downgrade copy-upgrade="true" />
@@ -673,6 +674,7 @@
<task xsi:type="configure" id="yarn_env_security_opts" />
<task xsi:type="configure" id="hdp_2_6_0_0_yarn_priority_utilization_underutilized_preemption" />
<task xsi:type="configure" id="yarn_site_retained_log_count" />
+ <task xsi:type="configure" id="hdp_2_6_0_0_service_check_queue_name"/>
</pre-upgrade>
<pre-downgrade />
<upgrade>
@@ -729,6 +731,7 @@
<task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FixLzoCodecPath">
<summary>Verifying LZO codec path for Tez</summary>
</task>
+ <task xsi:type="configure" id="hdp_2_5_0_0_tez_queue_name"/>
</pre-upgrade>
<pre-downgrade copy-upgrade="true" />
@@ -811,6 +814,7 @@
<component name="WEBHCAT_SERVER">
<pre-upgrade>
<task xsi:type="configure" id="webhcat_log4j_parameterize" />
+ <task xsi:type="configure" id="hdp_2_6_0_0_templeton_hadoop_queue_name"/>
</pre-upgrade>
<pre-downgrade/>
<upgrade>
@@ -854,6 +858,10 @@
</upgrade>
</component>
<component name="SPARK_CLIENT">
+ <pre-upgrade>
+ <task xsi:type="configure" id="hdp_2_5_0_0_spark_yarn_queue"/>
+ </pre-upgrade>
+ <pre-downgrade/>
<upgrade>
<task xsi:type="restart-task" />
</upgrade>
http://git-wip-us.apache.org/repos/asf/ambari/blob/b789b7c1/ambari-server/src/main/resources/stacks/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/stack_advisor.py b/ambari-server/src/main/resources/stacks/stack_advisor.py
index abcd762..04c6baf 100644
--- a/ambari-server/src/main/resources/stacks/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/stack_advisor.py
@@ -2526,7 +2526,7 @@ class DefaultStackAdvisor(StackAdvisor):
#region YARN and MAPREDUCE
def validatorYarnQueue(self, properties, recommendedDefaults, propertyName, services):
if propertyName not in properties:
- return self.getErrorItem("Value should be set")
+ return None
capacity_scheduler_properties, _ = self.getCapacitySchedulerProperties(services)
leaf_queue_names = self.getAllYarnLeafQueues(capacity_scheduler_properties)
http://git-wip-us.apache.org/repos/asf/ambari/blob/b789b7c1/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
index b8762b3..36936d5 100644
--- a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
@@ -3488,12 +3488,7 @@ class TestHDP22StackAdvisor(TestCase):
'tez.tez-ui.history-url.base' : 'http://host:8080/#/main/views/TEZ/0.7.0.2.3.0.0-2155/TEZ_CLUSTER_INSTANCE'}
- res_expected = [{'config-name': 'tez.queue.name',
- 'config-type': 'tez-site',
- 'level': 'ERROR',
- 'message': 'Value should be set',
- 'type': 'configuration'},
- {'config-name': 'tez.tez-ui.history-url.base',
+ res_expected = [{'config-name': 'tez.tez-ui.history-url.base',
'config-type': 'tez-site',
'level': 'WARN',
'message': "It is recommended to set value https://host:8443/#/main/views/TEZ/0.7.0.2.3.0.0-2155/TEZ_CLUSTER_INSTANCE for property tez.tez-ui.history-url.base",
@@ -3510,7 +3505,7 @@ class TestHDP22StackAdvisor(TestCase):
'type': 'configuration'}]
res = self.stackAdvisor.validateTezConfigurations(properties, recommendedDefaults, configurations, '', '')
- self.assertEquals(res, res_expected)
+ self.assertEquals(res_expected, res)
def test_validateHDFSConfigurationsEnv(self):
@@ -3622,11 +3617,6 @@ class TestHDP22StackAdvisor(TestCase):
'type': 'configuration',
'config-name': 'yarn.app.mapreduce.am.command-opts',
'level': 'WARN'},
- {'config-name': 'mapreduce.job.queuename',
- 'config-type': 'mapred-site',
- 'level': 'ERROR',
- 'message': 'Value should be set',
- 'type': 'configuration'},
{'config-type': 'mapred-site',
'message': 'yarn.app.mapreduce.am.command-opts Xmx should be less than yarn.app.mapreduce.am.resource.mb (410)',
'type': 'configuration',
@@ -3634,7 +3624,7 @@ class TestHDP22StackAdvisor(TestCase):
'level': 'WARN'}]
res = self.stackAdvisor.validateMapReduce2Configurations(properties, recommendedDefaults, {}, '', '')
- self.assertEquals(res, res_expected)
+ self.assertEquals(res_expected, res)
def test_validateHiveConfigurationsEnv(self):
properties = {"hive_security_authorization": "None"}
@@ -4320,7 +4310,7 @@ class TestHDP22StackAdvisor(TestCase):
}
# Test with ranger plugin enabled, validation fails
- res_expected = [{'config-type': 'spark-defaults', 'message': 'Value should be set', 'type': 'configuration', 'config-name': 'spark.yarn.queue', 'level': 'ERROR'}]
+ res_expected = []
res = self.stackAdvisor.validateSparkDefaults(properties, recommendedDefaults, configurations, services, {})
- self.assertEquals(res, res_expected)
+ self.assertEquals(res_expected, res)
http://git-wip-us.apache.org/repos/asf/ambari/blob/b789b7c1/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
index 176dd99..6890ef6 100644
--- a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
@@ -5388,11 +5388,10 @@ class TestHDP25StackAdvisor(TestCase):
]
}
- # Test with ranger plugin enabled, validation fails
- res_expected = [{'config-type': 'spark2-defaults', 'message': 'Value should be set', 'type': 'configuration', 'config-name': 'spark.yarn.queue', 'level': 'ERROR'}]
+ res_expected = []
res = self.stackAdvisor.validateSpark2Defaults(properties, recommendedDefaults, configurations, services, {})
- self.assertEquals(res, res_expected)
+ self.assertEquals(res_expected, res)
def test_recommendOozieConfigurations_noFalconServer(self):