You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by dm...@apache.org on 2016/06/02 15:02:34 UTC

[04/47] ambari git commit: AMBARI-16272. Ambari Upgrade shouldn't automatically add stack configs (dlysnichenko)

http://git-wip-us.apache.org/repos/asf/ambari/blob/6919aa50/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml
index 64f1c2c..3e6d7e5 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml
@@ -1,6 +1,5 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -17,63 +16,67 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-
 <!-- Put site-specific property overrides in this file. -->
-
 <configuration>
-
-<!-- file system properties -->
+  <!-- file system properties -->
   <property>
     <name>test.password</name>
     <property-type>PASSWORD</property-type>
     <value>test</value>
     <description>1</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>test.password.empty</name>
     <property-type>PASSWORD</property-type>
-    <value></value>
+    <value/>
     <description>1</description>
     <value-attributes>
       <type>password</type>
     </value-attributes>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.namenode.name.dir</name>
     <!-- cluster variant -->
-    <value></value>
+    <value/>
     <description>Determines where on the local filesystem the DFS name node
       should store the name table.  If this is a comma-delimited list
       of directories then the name table is replicated in all of the
       directories, for redundancy. </description>
     <final>true</final>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.support.append</name>
     <value>true</value>
     <description>to enable dfs append</description>
     <final>true</final>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.webhdfs.enabled</name>
     <value>true</value>
     <description>to enable webhdfs</description>
     <final>true</final>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.datanode.failed.volumes.tolerated</name>
     <value>0</value>
     <description>#of failed disks dn would tolerate</description>
     <final>true</final>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.datanode.data.dir</name>
-    <value></value>
+    <value/>
     <description>Determines where on the local filesystem an DFS data node
   should store its blocks.  If this is a comma-delimited
   list of directories, then data will be stored in all named
@@ -81,18 +84,20 @@
   Directories that do not exist are ignored.
   </description>
     <final>true</final>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.hosts.exclude</name>
-    <value></value>
+    <value/>
     <description>Names a file that contains a list of hosts that are
     not permitted to connect to the namenode.  The full pathname of the
     file must be specified.  If the value is empty, no hosts are
     excluded.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
-<!--
+  <!--
   <property>
     <name>dfs.hosts</name>
     <value></value>
@@ -102,33 +107,36 @@
     permitted.</description>
   </property>
 -->
-
   <property>
     <name>dfs.replication.max</name>
     <value>50</value>
     <description>Maximal block replication.
   </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.replication</name>
     <value>3</value>
     <description>Default block replication.
   </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.heartbeat.interval</name>
     <value>3</value>
     <description>Determines datanode heartbeat interval in seconds.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.heartbeat.interval</name>
     <value>3</value>
     <description>Determines datanode heartbeat interval in seconds.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.namenode.safemode.threshold-pct</name>
     <value>1.0f</value>
@@ -138,8 +146,9 @@
         Values less than or equal to 0 mean not to start in safe mode.
         Values greater than 1 will make safe mode permanent.
         </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.datanode.balance.bandwidthPerSec</name>
     <value>6250000</value>
@@ -148,218 +157,241 @@
         can utilize for the balancing purpose in term of
         the number of bytes per second.
   </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.datanode.address</name>
     <value>0.0.0.0:50010</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.datanode.http.address</name>
     <value>0.0.0.0:50075</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.blocksize</name>
     <value>134217728</value>
     <description>The default block size for new files.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.namenode.http-address</name>
-    <value></value>
-<description>The name of the default file system.  Either the
+    <value/>
+    <description>The name of the default file system.  Either the
 literal string "local" or a host:port for HDFS.</description>
-<final>true</final>
-</property>
-
-<property>
-<name>dfs.datanode.du.reserved</name>
-<!-- cluster variant -->
-<value>1073741824</value>
-<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+    <final>true</final>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.datanode.du.reserved</name>
+    <!-- cluster variant -->
+    <value>1073741824</value>
+    <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
 </description>
-</property>
-
-<property>
-<name>dfs.datanode.ipc.address</name>
-<value>0.0.0.0:8010</value>
-<description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.datanode.ipc.address</name>
+    <value>0.0.0.0:8010</value>
+    <description>
 The datanode ipc server address and port.
 If the port is 0 then the server will start on a free port.
 </description>
-</property>
-
-<property>
-<name>dfs.blockreport.initialDelay</name>
-<value>120</value>
-<description>Delay for first block report in seconds.</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>40</value>
-<description>The number of server threads for the namenode.</description>
-</property>
-
-<property>
-<name>dfs.datanode.max.transfer.threads</name>
-<value>1024</value>
-<description>PRIVATE CONFIG VARIABLE</description>
-</property>
-
-<!-- Permissions configuration -->
-
-<property>
-<name>fs.permissions.umask-mode</name>
-<value>022</value>
-<description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.blockreport.initialDelay</name>
+    <value>120</value>
+    <description>Delay for first block report in seconds.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.namenode.handler.count</name>
+    <value>40</value>
+    <description>The number of server threads for the namenode.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.datanode.max.transfer.threads</name>
+    <value>1024</value>
+    <description>PRIVATE CONFIG VARIABLE</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <!-- Permissions configuration -->
+  <property>
+    <name>fs.permissions.umask-mode</name>
+    <value>022</value>
+    <description>
 The octal umask used when creating files and directories.
 </description>
-</property>
-
-<property>
-<name>dfs.permissions.enabled</name>
-<value>true</value>
-<description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.permissions.enabled</name>
+    <value>true</value>
+    <description>
 If "true", enable permission checking in HDFS.
 If "false", permission checking is turned off,
 but all other behavior is unchanged.
 Switching from one parameter value to the other does not change the mode,
 owner or group of files or directories.
 </description>
-</property>
-
-<property>
-<name>dfs.permissions.superusergroup</name>
-<value>hdfs</value>
-<description>The name of the group of super-users.</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>100</value>
-<description>Added to grow Queue size so that more client connections are allowed</description>
-</property>
-
-<property>
-<name>dfs.block.access.token.enable</name>
-<value>true</value>
-<description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.permissions.superusergroup</name>
+    <value>hdfs</value>
+    <description>The name of the group of super-users.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.namenode.handler.count</name>
+    <value>100</value>
+    <description>Added to grow Queue size so that more client connections are allowed</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.block.access.token.enable</name>
+    <value>true</value>
+    <description>
 If "true", access tokens are used as capabilities for accessing datanodes.
 If "false", no access tokens are checked on accessing datanodes.
 </description>
-</property>
-
-<property>
-<name>dfs.namenode.kerberos.principal</name>
-<value></value>
-<description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.namenode.kerberos.principal</name>
+    <value/>
+    <description>
 Kerberos principal name for the NameNode
 </description>
-</property>
-
-<property>
-<name>dfs.secondary.namenode.kerberos.principal</name>
-<value></value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.secondary.namenode.kerberos.principal</name>
+    <value/>
     <description>
         Kerberos principal name for the secondary NameNode.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
-
-<!--
+  <!--
   This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
 -->
   <property>
     <name>dfs.namenode.kerberos.https.principal</name>
-    <value></value>
-     <description>The Kerberos principal for the host that the NameNode runs on.</description>
-
+    <value/>
+    <description>The Kerberos principal for the host that the NameNode runs on.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value></value>
+    <value/>
     <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
-
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <!-- cluster variant -->
     <name>dfs.namenode.secondary.http-address</name>
-    <value></value>
+    <value/>
     <description>Address of secondary namenode web server</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.web.authentication.kerberos.principal</name>
-    <value></value>
+    <value/>
     <description>
       The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
       The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
       HTTP SPENGO specification.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.web.authentication.kerberos.keytab</name>
-    <value></value>
+    <value/>
     <description>
       The Kerberos keytab file with the credentials for the
       HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.datanode.kerberos.principal</name>
-    <value></value>
- <description>
+    <value/>
+    <description>
         The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.namenode.keytab.file</name>
-    <value></value>
- <description>
+    <value/>
+    <description>
         Combined keytab file containing the namenode service and host principals.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.secondary.namenode.keytab.file</name>
-    <value></value>
-  <description>
+    <value/>
+    <description>
         Combined keytab file containing the namenode service and host principals.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.datanode.keytab.file</name>
-    <value></value>
- <description>
+    <value/>
+    <description>
         The filename of the keytab file for the DataNode.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.namenode.https-address</name>
-    <value></value>
-  <description>The https address where namenode binds</description>
-
+    <value/>
+    <description>The https address where namenode binds</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.datanode.data.dir.perm</name>
     <value>750</value>
-<description>The permissions that should be there on dfs.datanode.data.dir
+    <description>The permissions that should be there on dfs.datanode.data.dir
 directories. The datanode will not come up if the permissions are
 different on existing dfs.datanode.data.dir directories. If the directories
 don't exist, they will be created with this permission.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.namenode.accesstime.precision</name>
     <value>0</value>
@@ -367,14 +399,16 @@ don't exist, they will be created with this permission.</description>
                  The default value is 1 hour. Setting a value of 0 disables
                  access times for HDFS.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
-   <name>dfs.cluster.administrators</name>
-   <value> hdfs</value>
-   <description>ACL for who all can view the default servlets in the HDFS</description>
+    <name>dfs.cluster.administrators</name>
+    <value> hdfs</value>
+    <description>ACL for who all can view the default servlets in the HDFS</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.namenode.avoid.read.stale.datanode</name>
     <value>true</value>
@@ -383,6 +417,8 @@ don't exist, they will be created with this permission.</description>
       heartbeat messages have not been received by the namenode for more than a
       specified time interval.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>dfs.namenode.avoid.write.stale.datanode</name>
@@ -392,6 +428,8 @@ don't exist, they will be created with this permission.</description>
       heartbeat messages have not been received by the namenode for more than a
       specified time interval.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>dfs.namenode.write.stale.datanode.ratio</name>
@@ -399,47 +437,54 @@ don't exist, they will be created with this permission.</description>
     <description>When the ratio of number stale datanodes to total datanodes marked is greater
       than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>dfs.namenode.stale.datanode.interval</name>
     <value>30000</value>
     <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-  
   <property>
     <name>dfs.journalnode.http-address</name>
     <value>0.0.0.0:8480</value>
     <description>The address and port the JournalNode web UI listens on.
      If the port is 0 then the server will start on a free port. </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-  
   <property>
     <name>dfs.journalnode.edits.dir</name>
     <value>/grid/0/hdfs/journal</value>
     <description>The path where the JournalNode daemon will store its local state. </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <!-- HDFS Short-Circuit Local Reads -->
-
   <property>
     <name>dfs.client.read.shortcircuit</name>
     <value>true</value>
     <description>
       This configuration parameter turns on short-circuit local reads.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.client.read.shortcircuit.skip.checksum</name>
-    <value></value>
+    <value/>
     <description>Enable/disbale skipping the checksum check</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.domain.socket.path</name>
     <value>/var/lib/hadoop-hdfs/dn_socket</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.client.read.shortcircuit.streams.cache.size</name>
     <value>4096</value>
@@ -449,6 +494,7 @@ don't exist, they will be created with this permission.</description>
       more file descriptors, but potentially provide better performance on
       workloads involving lots of seeks.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6919aa50/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HIVE/configuration/hive-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HIVE/configuration/hive-site.xml
index 830b0af..023ea8e 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HIVE/configuration/hive-site.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HIVE/configuration/hive-site.xml
@@ -16,178 +16,205 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 -->
-
 <configuration>
   <property>
     <name>hive.metastore.local</name>
     <value>false</value>
     <description>controls whether to connect to remove metastore server or
     open a new metastore server in Hive Client JVM</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>javax.jdo.option.ConnectionURL</name>
-    <value></value>
+    <value/>
     <description>JDBC connect string for a JDBC metastore</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>javax.jdo.option.ConnectionDriverName</name>
     <value>com.mysql.jdbc.Driver</value>
     <description>Driver class name for a JDBC metastore</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>javax.jdo.option.ConnectionUserName</name>
-    <value></value>
+    <value/>
     <description>username to use against metastore database</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>javax.jdo.option.ConnectionPassword</name>
-    <value></value>
+    <value/>
     <description>password to use against metastore database</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.warehouse.dir</name>
     <value>/apps/hive/warehouse</value>
     <description>location of default database for the warehouse</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.sasl.enabled</name>
-    <value></value>
+    <value/>
     <description>If true, the metastore thrift interface will be secured with SASL.
      Clients must authenticate with Kerberos.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.kerberos.keytab.file</name>
-    <value></value>
+    <value/>
     <description>The path to the Kerberos Keytab file containing the metastore
      thrift server's service principal.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.kerberos.principal</name>
-    <value></value>
+    <value/>
     <description>The service principal for the metastore thrift server. The special
     string _HOST will be replaced automatically with the correct host name.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.cache.pinobjtypes</name>
     <value>Table,Database,Type,FieldSchema,Order</value>
     <description>List of comma separated metastore object types that should be pinned in the cache</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.uris</name>
-    <value></value>
+    <value/>
     <description>URI for client to contact metastore server</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hadoop.clientside.fs.operations</name>
     <value>true</value>
     <description>FS operations are owned by client</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.client.socket.timeout</name>
     <value>60</value>
     <description>MetaStore Client socket timeout in seconds</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.execute.setugi</name>
     <value>true</value>
     <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.security.authorization.enabled</name>
     <value>false</value>
     <description>enable or disable the hive client authorization</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.security.authorization.manager</name>
     <value>org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider</value>
     <description>the hive client authorization manager class name.
     The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.server2.enable.doAs</name>
     <value>true</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>fs.hdfs.impl.disable.cache</name>
     <value>true</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>fs.file.impl.disable.cache</name>
     <value>true</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.enforce.bucketing</name>
     <value>true</value>
     <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.enforce.sorting</name>
     <value>true</value>
     <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.map.aggr</name>
     <value>true</value>
     <description>Whether to use map-side aggregation in Hive Group By queries.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.optimize.bucketmapjoin</name>
     <value>true</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.optimize.bucketmapjoin.sortedmerge</name>
     <value>true</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.mapred.reduce.tasks.speculative.execution</name>
     <value>false</value>
     <description>Whether speculative execution for reducers should be turned on.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.auto.convert.join</name>
     <value>true</value>
     <description>Whether Hive enable the optimization about converting common
       join into mapjoin based on the input file size.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.auto.convert.sortmerge.join</name>
     <value>true</value>
     <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass
       the criteria for sort-merge join.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
     <value>true</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.auto.convert.join.noconditionaltask</name>
     <value>true</value>
@@ -195,8 +222,9 @@ limitations under the License.
       size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
       specified size, the join is directly converted to a mapjoin (there is no conditional task).
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.auto.convert.join.noconditionaltask.size</name>
     <value>1000000000</value>
@@ -204,8 +232,9 @@ limitations under the License.
       is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
       converted to a mapjoin(there is no conditional task). The default is 10MB.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.optimize.reducededuplication.min.reducer</name>
     <value>1</value>
@@ -213,8 +242,9 @@ limitations under the License.
       That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
       The optimization will be disabled if number of reducers is less than specified value.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.optimize.mapjoin.mapreduce</name>
     <value>true</value>
@@ -223,8 +253,9 @@ limitations under the License.
       job (for e.g a group by), each map-only job is merged with the following
       map-reduce job.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.mapjoin.bucket.cache.size</name>
     <value>10000</value>
@@ -232,24 +263,28 @@ limitations under the License.
       Size per reducer.The default is 1G, i.e if the input size is 10G, it
       will use 10 reducers.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.vectorized.execution.enabled</name>
     <value>false</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.optimize.reducededuplication</name>
     <value>true</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.optimize.index.filter</name>
     <value>true</value>
     <description>
     Whether to enable automatic use of indexes
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6919aa50/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/global.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/global.xml
index ceedd56..a497471 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/global.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/global.xml
@@ -19,26 +19,33 @@
  * limitations under the License.
  */
 -->
-
 <configuration>
   <property>
     <name>hs_host</name>
-    <value></value>
+    <value/>
     <description>History Server.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>mapred_log_dir_prefix</name>
     <value>/var/log/hadoop-mapreduce</value>
     <description>Mapreduce Log Dir Prefix</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>mapred_pid_dir_prefix</name>
     <value>/var/run/hadoop-mapreduce</value>
     <description>Mapreduce PID Dir Prefix</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>mapred_user</name>
     <value>mapred</value>
     <description>Mapreduce User</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6919aa50/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-queue-acls.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
index ce12380..3f83f98 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
@@ -1,6 +1,5 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -17,23 +16,20 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-
 <!-- mapred-queue-acls.xml -->
 <configuration>
-
-
-<!-- queue default -->
-
+  <!-- queue default -->
   <property>
     <name>mapred.queue.default.acl-submit-job</name>
     <value>*</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapred.queue.default.acl-administer-jobs</name>
     <value>*</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <!-- END ACLs -->
-
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6919aa50/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml
index fad359e..f65cfc7 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml
@@ -1,6 +1,5 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -17,13 +16,9 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-
 <!-- Put site-specific property overrides in this file. -->
-
 <configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
+  <!-- i/o properties -->
   <property>
     <name>mapreduce.task.io.sort.mb</name>
     <value>100</value>
@@ -31,8 +26,9 @@
       The total amount of buffer memory to use while sorting files, in megabytes.
       By default, gives each merge stream 1MB, which should minimize seeks.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.map.sort.spill.percent</name>
     <value>0.1</value>
@@ -43,8 +39,9 @@
       is already in progress, so spills may be larger than this threshold when
       it is set to less than .5
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.task.io.sort.factor</name>
     <value>100</value>
@@ -52,9 +49,10 @@
       The number of streams to merge at once while sorting files.
       This determines the number of open file handles.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
-<!-- map/reduce properties -->
+  <!-- map/reduce properties -->
   <property>
     <name>mapreduce.reduce.shuffle.parallelcopies</name>
     <value>30</value>
@@ -62,8 +60,9 @@
       The default number of parallel transfers run by reduce during
       the copy(shuffle) phase.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.map.speculative</name>
     <value>false</value>
@@ -71,8 +70,9 @@
       If true, then multiple instances of some map tasks
       may be executed in parallel.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.reduce.speculative</name>
     <value>false</value>
@@ -80,8 +80,9 @@
       If true, then multiple instances of some reduce tasks may be
       executed in parallel.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.job.reduce.slowstart.completedmaps</name>
     <value>0.05</value>
@@ -89,8 +90,9 @@
       Fraction of the number of maps in the job which should be complete before
       reduces are scheduled for the job.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.reduce.shuffle.merge.percent</name>
     <value>0.66</value>
@@ -100,8 +102,9 @@
       storing in-memory map outputs, as defined by
       mapreduce.reduce.shuffle.input.buffer.percent.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
     <value>0.7</value>
@@ -109,16 +112,18 @@
       The percentage of memory to be allocated from the maximum heap
       size to storing map outputs during the shuffle.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.map.output.compress.codec</name>
-    <value></value>
+    <value/>
     <description>If the map outputs are compressed, how should they be
       compressed
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.output.fileoutputformat.compress.type</name>
     <value>BLOCK</value>
@@ -126,8 +131,9 @@
       If the job outputs are to compressed as SequenceFiles, how should
       they be compressed? Should be one of NONE, RECORD or BLOCK.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.reduce.input.buffer.percent</name>
     <value>0.0</value>
@@ -137,14 +143,16 @@
       remaining map outputs in memory must consume less than this threshold before
       the reduce can begin.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <!-- copied from kryptonite configuration -->
   <property>
     <name>mapreduce.map.output.compress</name>
-    <value></value>
+    <value/>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.task.timeout</name>
     <value>600000</value>
@@ -153,31 +161,36 @@
       terminated if it neither reads an input, writes an output, nor
       updates its status string.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.map.memory.mb</name>
     <value>1536</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.reduce.memory.mb</name>
     <value>1024</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.tasktracker.keytab.file</name>
-    <value></value>
+    <value/>
     <description>The filename of the keytab for the task tracker</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.jobhistory.keytab.file</name>
     <!-- cluster variant -->
-    <value></value>
+    <value/>
     <description>The keytab for the job history server principal.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.shuffle.port</name>
     <value>13562</value>
@@ -186,36 +199,41 @@
       ShuffleHandler is a service run at the NodeManager to facilitate
       transfers of intermediate Map outputs to requesting Reducers.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.jobhistory.intermediate-done-dir</name>
     <value>/mr-history/tmp</value>
     <description>
       Directory where history files are written by MapReduce jobs.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.jobhistory.done-dir</name>
     <value>/mr-history/done</value>
     <description>
       Directory where history files are managed by the MR JobHistory Server.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
-  <property>   ����
+  <property>
     <name>mapreduce.jobhistory.address</name>
     <value>localhost:10020</value>
     <description>Enter your JobHistoryServer hostname.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
-  <property>   ����
+  <property>
     <name>mapreduce.jobhistory.webapp.address</name>
     <value>localhost:19888</value>
     <description>Enter your JobHistoryServer hostname.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.framework.name</name>
     <value>yarn</value>
@@ -223,22 +241,25 @@
       The runtime framework for executing MapReduce jobs. Can be one of local,
       classic or yarn.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>yarn.app.mapreduce.am.staging-dir</name>
     <value>/user</value>
     <description>
       The staging dir used while submitting jobs.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>yarn.app.mapreduce.am.resource.mb</name>
     <value>1024</value>
     <description>The amount of memory the MR AppMaster needs.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>yarn.app.mapreduce.am.command-opts</name>
     <value>-Xmx756m</value>
@@ -255,8 +276,9 @@
       of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
       mapreduce.reduce.env config settings.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>yarn.app.mapreduce.am.admin-command-opts</name>
     <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
@@ -270,35 +292,40 @@
       of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
       mapreduce.reduce.env config settings.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>yarn.app.mapreduce.am.log.level</name>
     <value>INFO</value>
     <description>MR App Master process log level.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>yarn.app.mapreduce.am.env</name>
-    <value></value>
+    <value/>
     <description>
       User added environment variables for the MR App Master
       processes. Example :
       1) A=foo  This will set the env variable A to foo
       2) B=$B:c This is inherit tasktracker's B env variable.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.admin.map.child.java.opts</name>
     <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.admin.reduce.child.java.opts</name>
     <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.application.classpath</name>
     <value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*</value>
@@ -306,8 +333,9 @@
       CLASSPATH for MR applications. A comma-separated list of CLASSPATH
       entries.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.am.max-attempts</name>
     <value>2</value>
@@ -317,40 +345,45 @@
       set by resourcemanager. Otherwise, it will be override. The default number is
       set to 2, to allow at least one retry for AM.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.map.memory.mb</name>
     <value>512</value>
     <description>
       Larger resource limit for maps.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.map.java.opts</name>
     <value>-Xmx320m</value>
     <description>
       Larger heap-size for child jvms of maps.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.reduce.memory.mb</name>
     <value>1024</value>
     <description>
       Larger resource limit for reduces.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.reduce.java.opts</name>
     <value>-Xmx756m</value>
     <description>
       Larger heap-size for child jvms of reduces.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.map.log.level</name>
     <value>INFO</value>
@@ -358,8 +391,9 @@
       The logging level for the map task. The allowed levels are:
       OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.reduce.log.level</name>
     <value>INFO</value>
@@ -367,8 +401,9 @@
       The logging level for the reduce task. The allowed levels are:
       OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.admin.user.env</name>
     <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &amp;&gt; /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`</value>
@@ -377,6 +412,7 @@
       This is not an additive property. You must preserve the original value if
       you want your map and reduce tasks to have access to native libraries (compression, etc)
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6919aa50/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/OOZIE/configuration/oozie-site.xml
index 32864db..6e69879 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/OOZIE/configuration/oozie-site.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/OOZIE/configuration/oozie-site.xml
@@ -15,11 +15,9 @@
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.
--->     
-
+-->
 <configuration>
-
-<!--
+  <!--
     Refer to the oozie-default.xml file for the complete list of
     Oozie configuration properties and their default values.
 -->
@@ -27,145 +25,164 @@
     <name>oozie.base.url</name>
     <value>http://localhost:11000/oozie</value>
     <description>Base Oozie URL.</description>
-   </property>
-
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
   <property>
     <name>oozie.system.id</name>
     <value>oozie-${user.name}</value>
     <description>
     The Oozie system ID.
     </description>
-   </property>
-
-   <property>
-     <name>oozie.systemmode</name>
-     <value>NORMAL</value>
-     <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.systemmode</name>
+    <value>NORMAL</value>
+    <description>
      System mode for  Oozie at startup.
      </description>
-   </property>
-
-   <property>
-     <name>oozie.service.AuthorizationService.security.enabled</name>
-     <value>true</value>
-     <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.AuthorizationService.security.enabled</name>
+    <value>true</value>
+    <description>
      Specifies whether security (user name/admin role) is enabled or not.
      If disabled any user can manage Oozie system and manage any job.
      </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.older.than</name>
-     <value>30</value>
-     <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.PurgeService.older.than</name>
+    <value>30</value>
+    <description>
      Jobs older than this value, in days, will be purged by the PurgeService.
      </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.purge.interval</name>
-     <value>3600</value>
-     <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.PurgeService.purge.interval</name>
+    <value>3600</value>
+    <description>
      Interval at which the purge service will run, in seconds.
      </description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.queue.size</name>
-     <value>1000</value>
-     <description>Max callable queue size</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.threads</name>
-     <value>10</value>
-     <description>Number of threads used for executing callables</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.callable.concurrency</name>
-     <value>3</value>
-     <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.CallableQueueService.queue.size</name>
+    <value>1000</value>
+    <description>Max callable queue size</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.CallableQueueService.threads</name>
+    <value>10</value>
+    <description>Number of threads used for executing callables</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.CallableQueueService.callable.concurrency</name>
+    <value>3</value>
+    <description>
      Maximum concurrency for a given callable type.
      Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
      Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
      All commands that use action executors (action-start, action-end, action-kill and action-check) use
      the action type as the callable type.
      </description>
-   </property>
-
-   <property>
-     <name>oozie.service.coord.normal.default.timeout</name>
-     <value>120</value>
-     <description>Default timeout for a coordinator action input check (in minutes) for normal job.
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.coord.normal.default.timeout</name>
+    <value>120</value>
+    <description>Default timeout for a coordinator action input check (in minutes) for normal job.
       -1 means infinite timeout</description>
-   </property>
-
-   <property>
-     <name>oozie.db.schema.name</name>
-     <value>oozie</value>
-     <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.db.schema.name</name>
+    <value>oozie</value>
+    <description>
       Oozie DataBase Name
      </description>
-   </property>
-
-    <property>
-      <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
-      <value> </value>
-      <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
+    <value> </value>
+    <description>
       Whitelisted job tracker for Oozie service.
       </description>
-    </property>
-   
-    <property>
-      <name>oozie.authentication.type</name>
-      <value>simple</value>
-      <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.authentication.type</name>
+    <value>simple</value>
+    <description>
       </description>
-    </property>
-   
-    <property>
-      <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
-      <value> </value>
-      <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
+    <value> </value>
+    <description>
       </description>
-    </property>
-
-    <property>
-      <name>oozie.service.WorkflowAppService.system.libpath</name>
-      <value>/user/${user.name}/share/lib</value>
-      <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.WorkflowAppService.system.libpath</name>
+    <value>/user/${user.name}/share/lib</value>
+    <description>
       System library path to use for workflow applications.
       This path is added to workflow application if their job properties sets
       the property 'oozie.use.system.libpath' to true.
       </description>
-    </property>
-
-    <property>
-      <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
-      <value>false</value>
-      <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
+    <value>false</value>
+    <description>
       If set to true, submissions of MapReduce and Pig jobs will include
       automatically the system library path, thus not requiring users to
       specify where the Pig JAR files are. Instead, the ones from the system
       library path are used.
       </description>
-    </property>
-    <property>
-      <name>oozie.authentication.kerberos.name.rules</name>
-      <value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.authentication.kerberos.name.rules</name>
+    <value>
 
 
 
 
 
         </value>
-      <description>The mapping from kerberos principal names to local OS user names.</description>
-    </property>
-    <property>
-      <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-      <value>*=/etc/hadoop/conf</value>
-      <description>
+    <description>The mapping from kerberos principal names to local OS user names.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
+    <value>*=/etc/hadoop/conf</value>
+    <description>
           Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
           the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
           used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
@@ -173,73 +190,85 @@
           the Oozie configuration directory; though the path can be absolute (i.e. to point
           to Hadoop client conf/ directories in the local filesystem.
       </description>
-    </property>
-    <property>
-        <name>oozie.service.ActionService.executor.ext.classes</name>
-        <value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.ActionService.executor.ext.classes</name>
+    <value>
             org.apache.oozie.action.email.EmailActionExecutor,
             org.apache.oozie.action.hadoop.HiveActionExecutor,
             org.apache.oozie.action.hadoop.ShellActionExecutor,
             org.apache.oozie.action.hadoop.SqoopActionExecutor,
             org.apache.oozie.action.hadoop.DistcpActionExecutor
         </value>
-    </property>
-
-    <property>
-        <name>oozie.service.SchemaService.wf.ext.schemas</name>
-        <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd,hive-action-0.3.xsd</value>
-    </property>
-    <property>
-        <name>oozie.service.JPAService.create.db.schema</name>
-        <value>false</value>
-        <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.SchemaService.wf.ext.schemas</name>
+    <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd,hive-action-0.3.xsd</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.JPAService.create.db.schema</name>
+    <value>false</value>
+    <description>
             Creates Oozie DB.
 
             If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
             If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
         </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.driver</name>
-        <value>org.apache.derby.jdbc.EmbeddedDriver</value>
-        <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.JPAService.jdbc.driver</name>
+    <value>org.apache.derby.jdbc.EmbeddedDriver</value>
+    <description>
             JDBC driver class.
         </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.url</name>
-        <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
-        <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.JPAService.jdbc.url</name>
+    <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
+    <description>
             JDBC URL.
         </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.username</name>
-        <value>sa</value>
-        <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.JPAService.jdbc.username</name>
+    <value>sa</value>
+    <description>
             DB user name.
         </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.password</name>
-        <value> </value>
-        <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.JPAService.jdbc.password</name>
+    <value> </value>
+    <description>
             DB user password.
 
             IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
                        if empty Configuration assumes it is NULL.
         </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.pool.max.active.conn</name>
-        <value>10</value>
-        <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.JPAService.pool.max.active.conn</name>
+    <value>10</value>
+    <description>
              Max number of connections.
         </description>
-    </property>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6919aa50/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/YARN/configuration/capacity-scheduler.xml
index fd9101d..061bfeb 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/YARN/configuration/capacity-scheduler.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/YARN/configuration/capacity-scheduler.xml
@@ -1,3 +1,4 @@
+<?xml version="1.0"?>
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -14,17 +15,16 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-
 <configuration>
-
   <property>
     <name>yarn.scheduler.capacity.maximum-applications</name>
     <value>10000</value>
     <description>
       Maximum number of applications that can be pending and running.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
     <value>0.2</value>
@@ -33,16 +33,18 @@
       application masters i.e. controls number of concurrent running
       applications.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>yarn.scheduler.capacity.root.queues</name>
     <value>default</value>
     <description>
       The queues at the this level (root is the root queue).
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>yarn.scheduler.capacity.root.capacity</name>
     <value>100</value>
@@ -52,54 +54,61 @@
       The child queues capacity should add up to their parent queue's capacity
       or less.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>yarn.scheduler.capacity.root.default.capacity</name>
     <value>100</value>
     <description>Default queue target capacity.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
     <value>1</value>
     <description>
       Default queue user limit a percentage from 0.0 to 1.0.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
     <value>100</value>
     <description>
       The maximum capacity of the default queue. 
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>yarn.scheduler.capacity.root.default.state</name>
     <value>RUNNING</value>
     <description>
       The state of the default queue. State can be one of RUNNING or STOPPED.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>yarn.scheduler.capacity.root.default.acl_submit_jobs</name>
     <value>*</value>
     <description>
       The ACL of who can submit jobs to the default queue.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>yarn.scheduler.capacity.root.default.acl_administer_jobs</name>
     <value>*</value>
     <description>
       The ACL of who can administer jobs on the default queue.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>yarn.scheduler.capacity.root.acl_administer_queues</name>
     <value>*</value>
@@ -107,6 +116,7 @@
       The ACL for who can administer this queue i.e. change sub-queue 
       allocations.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6919aa50/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/YARN/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/YARN/configuration/global.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/YARN/configuration/global.xml
index edd1636..3494d50 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/YARN/configuration/global.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/YARN/configuration/global.xml
@@ -19,46 +19,61 @@
  * limitations under the License.
  */
 -->
-
 <configuration>
   <property>
     <name>rm_host</name>
-    <value></value>
+    <value/>
     <description>ResourceManager.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>nm_hosts</name>
-    <value></value>
+    <value/>
     <description>List of NodeManager Hosts.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>yarn_log_dir_prefix</name>
     <value>/var/log/hadoop-yarn</value>
     <description>YARN Log Dir Prefix</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>yarn_pid_dir_prefix</name>
     <value>/var/run/hadoop-yarn</value>
     <description>YARN PID Dir Prefix</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>yarn_user</name>
     <value>yarn</value>
     <description>YARN User</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>yarn_heapsize</name>
     <value>1024</value>
     <description>Max heapsize for all YARN components using a numerical value in the scale of MB</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>resourcemanager_heapsize</name>
     <value>1024</value>
     <description>Max heapsize for ResourceManager using a numerical value in the scale of MB</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>nodemanager_heapsize</name>
     <value>1024</value>
     <description>Max heapsize for NodeManager using a numerical value in the scale of MB</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
 </configuration>