You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by dm...@apache.org on 2016/06/01 15:26:34 UTC

[51/94] ambari git commit: AMBARI-16272. Ambari Upgrade shouldn't automatically add stack configs (dlysnichenko)

http://git-wip-us.apache.org/repos/asf/ambari/blob/98d86419/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/core-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/core-site.xml
index ab46feb..6027671 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/core-site.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/core-site.xml
@@ -1,7 +1,6 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
- <!--
+<!--
     Licensed to the Apache Software Foundation (ASF) under one or more
     contributor license agreements.  See the NOTICE file distributed with
     this work for additional information regarding copyright ownership.
@@ -17,13 +16,9 @@
     See the License for the specific language governing permissions and
     limitations under the License.
  -->
- 
 <!-- Put site-specific property overrides in this file. -->
-
-<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
+  <!-- i/o properties -->
   <property>
     <name>io.file.buffer.size</name>
     <value>131072</value>
@@ -31,49 +26,54 @@
   The size of this buffer should probably be a multiple of hardware
   page size (4096 on Intel x86), and it determines how much data is
   buffered during read and write operations.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>io.serializations</name>
     <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>io.compression.codecs</name>
     <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec</value>
     <description>A list of the compression codec classes that can be used
                  for compression/decompression.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
-<!-- file system properties -->
-
+  <!-- file system properties -->
   <property>
     <name>fs.defaultFS</name>
     <!-- cluster variant -->
-    <value></value>
+    <value/>
     <description>The name of the default file system.  Either the
   literal string "local" or a host:port for HDFS.</description>
     <final>true</final>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>fs.trash.interval</name>
     <value>360</value>
     <description>Number of minutes between trash checkpoints.
   If zero, the trash feature is disabled.
   </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.namenode.checkpoint.dir</name>
-    <value></value>
+    <value/>
     <description>Determines where on the local filesystem the DFS secondary
         name node should store the temporary images to merge.
         If this is a comma-delimited list of directories then the image is
         replicated in all of the directories for redundancy.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.namenode.checkpoint.edits.dir</name>
     <value>${dfs.namenode.checkpoint.dir}</value>
@@ -83,15 +83,17 @@
         replicated in all of the directoires for redundancy.
         Default value is same as dfs.namenode.checkpoint.dir
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.namenode.checkpoint.period</name>
     <value>21600</value>
     <description>The number of seconds between two periodic checkpoints.
   </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <!-- ipc properties: copied from kryptonite configuration -->
   <property>
     <name>ipc.client.idlethreshold</name>
@@ -99,22 +101,25 @@
     <description>Defines the threshold number of connections after which
                connections will be inspected for idleness.
   </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>ipc.client.connection.maxidletime</name>
     <value>30000</value>
     <description>The maximum time after which a client will bring down the
                connection to the server.
   </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>ipc.client.connect.max.retries</name>
     <value>50</value>
     <description>Defines the maximum number of retries for IPC connections.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <!-- Web Interface Configuration -->
   <property>
     <name>mapreduce.jobtracker.webinterface.trusted</name>
@@ -124,24 +129,28 @@
                 not be exposed to public. Enable this option if the interfaces
                 are only reachable by those who have the right authorization.
   </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
- <property>
-   <name>hadoop.security.authentication</name>
-   <value>simple</value>
-   <description>
+  <property>
+    <name>hadoop.security.authentication</name>
+    <value>simple</value>
+    <description>
    Set the authentication for the cluster. Valid values are: simple or
    kerberos.
    </description>
- </property>
-<property>
-  <name>hadoop.security.authorization</name>
-  <value>false</value>
-  <description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
+  </property>
+  <property>
+    <name>hadoop.security.authorization</name>
+    <value>false</value>
+    <description>
      Enable authorization for different protocols.
   </description>
-</property>
-
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
+  </property>
   <property>
     <name>hadoop.security.auth_to_local</name>
     <value>
@@ -152,7 +161,7 @@
         RULE:[2:$1@$0](rs@.*)s/.*/hbase/
         DEFAULT
     </value>
-<description>The mapping from kerberos principal names to local OS mapreduce.job.user.names.
+    <description>The mapping from kerberos principal names to local OS mapreduce.job.user.names.
   So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
   "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
 The translations rules have 3 sections:
@@ -190,6 +199,7 @@ If you want to treat all principals from APACHE.ORG with /admin as "admin", your
 RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
 DEFAULT
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/98d86419/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/global.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/global.xml
index f636751..dcb151c 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/global.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/global.xml
@@ -19,179 +19,243 @@
  * limitations under the License.
  */
 -->
-
 <configuration>
   <property>
     <name>namenode_host</name>
-    <value></value>
+    <value/>
     <description>NameNode Host.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>dfs_namenode_name_dir</name>
     <value>/hadoop/hdfs/namenode</value>
     <description>NameNode Directories.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>snamenode_host</name>
-    <value></value>
+    <value/>
     <description>Secondary NameNode.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>dfs_namenode_checkpoint_dir</name>
     <value>/hadoop/hdfs/namesecondary</value>
     <description>Secondary NameNode checkpoint dir.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>datanode_hosts</name>
-    <value></value>
+    <value/>
     <description>List of Datanode Hosts.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>dfs_datanode_data_dir</name>
     <value>/hadoop/hdfs/data</value>
     <description>Data directories for Data Nodes.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>hdfs_log_dir_prefix</name>
     <value>/var/log/hadoop</value>
     <description>Hadoop Log Dir Prefix</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>hadoop_pid_dir_prefix</name>
     <value>/var/run/hadoop</value>
     <description>Hadoop PID Dir Prefix</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>dfs_webhdfs_enabled</name>
     <value>true</value>
     <description>WebHDFS enabled</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>hadoop_heapsize</name>
     <value>1024</value>
     <description>Hadoop maximum Java heap size</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>namenode_heapsize</name>
     <value>1024</value>
     <description>NameNode Java heap size</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>namenode_opt_newsize</name>
     <value>200</value>
     <description>Default size of Java new generation for NameNode (Java option -XX:NewSize) Note: The value of NameNode new generation size (default size of Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize property is 1/8 the value of maximum heap size (-Xmx).</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>namenode_opt_maxnewsize</name>
     <value>640</value>
     <description>NameNode maximum new generation size</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>namenode_opt_permsize</name>
     <value>128</value>
     <description>NameNode permanent generation size</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>namenode_opt_maxpermsize</name>
     <value>256</value>
     <description>NameNode maximum permanent generation size</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>datanode_du_reserved</name>
     <value>1</value>
     <description>Reserved space for HDFS</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>dtnode_heapsize</name>
     <value>1024</value>
     <description>DataNode maximum Java heap size</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>dfs_datanode_failed_volume_tolerated</name>
     <value>0</value>
     <description>DataNode volumes failure toleration</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>dfs_namenode_checkpoint_period</name>
     <value>21600</value>
     <description>HDFS Maximum Checkpoint Delay</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>fs_checkpoint_size</name>
     <value>0.5</value>
     <description>FS Checkpoint Size.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>proxyuser_group</name>
     <value>users</value>
     <description>Proxy user group.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>dfs_exclude</name>
-    <value></value>
+    <value/>
     <description>HDFS Exclude hosts.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>dfs_replication</name>
     <value>3</value>
     <description>Default Block Replication.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>dfs_block_local_path_access_user</name>
     <value>hbase</value>
     <description>Default Block Replication.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>dfs_datanode_address</name>
     <value>50010</value>
     <description>Port for datanode address.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>dfs_datanode_http_address</name>
     <value>50075</value>
     <description>Port for datanode address.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>dfs_datanode_data_dir_perm</name>
     <value>750</value>
     <description>Datanode dir perms.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>security_enabled</name>
     <value>false</value>
     <description>Hadoop Security</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>kerberos_domain</name>
     <value>EXAMPLE.COM</value>
     <description>Kerberos realm.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>kadmin_pw</name>
-    <value></value>
+    <value/>
     <description>Kerberos realm admin password</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>keytab_path</name>
     <value>/etc/security/keytabs</value>
     <description>Kerberos keytab path.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-  
   <property>
     <name>keytab_path</name>
     <value>/etc/security/keytabs</value>
     <description>KeyTab Directory.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-    <property>
+  <property>
     <name>namenode_formatted_mark_dir</name>
     <value>/var/run/hadoop/hdfs/namenode/formatted/</value>
     <description>Formatteed Mark Directory.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-    <property>
+  <property>
     <name>hdfs_user</name>
     <value>hdfs</value>
     <description>User and Groups.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-  
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/98d86419/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hadoop-policy.xml
index 51b01bb..d6e493b 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hadoop-policy.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hadoop-policy.xml
@@ -1,6 +1,5 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -17,9 +16,7 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-
 <!-- Put site-specific property overrides in this file. -->
-
 <configuration>
   <property>
     <name>security.client.protocol.acl</name>
@@ -29,8 +26,9 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>security.client.datanode.protocol.acl</name>
     <value>*</value>
@@ -39,8 +37,9 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>security.datanode.protocol.acl</name>
     <value>*</value>
@@ -49,8 +48,9 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>security.inter.datanode.protocol.acl</name>
     <value>*</value>
@@ -59,8 +59,9 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>security.namenode.protocol.acl</name>
     <value>*</value>
@@ -69,8 +70,9 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>security.inter.tracker.protocol.acl</name>
     <value>*</value>
@@ -79,8 +81,9 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>security.job.client.protocol.acl</name>
     <value>*</value>
@@ -89,8 +92,9 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>security.job.task.protocol.acl</name>
     <value>*</value>
@@ -99,17 +103,19 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
- <property>
+  <property>
     <name>security.admin.operations.protocol.acl</name>
     <value>hadoop</value>
     <description>ACL for AdminOperationsProtocol. Used for admin commands.
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>security.refresh.usertogroups.mappings.protocol.acl</name>
     <value>hadoop</value>
@@ -118,9 +124,10 @@
     group names. The user and group list is separated by a blank. For
     e.g. "alice,bob users,wheel".  A special value of "*" means all
     users are allowed.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
-<property>
+  <property>
     <name>security.refresh.policy.protocol.acl</name>
     <value>hadoop</value>
     <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
@@ -128,7 +135,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
-
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/98d86419/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml
index 64f1c2c..5236ab9 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml
@@ -1,6 +1,5 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -17,63 +16,67 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-
 <!-- Put site-specific property overrides in this file. -->
-
 <configuration>
-
-<!-- file system properties -->
+  <!-- file system properties -->
   <property>
     <name>test.password</name>
     <property-type>PASSWORD</property-type>
     <value>test</value>
     <description>1</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>test.password.empty</name>
     <property-type>PASSWORD</property-type>
-    <value></value>
+    <value/>
     <description>1</description>
     <value-attributes>
       <type>password</type>
     </value-attributes>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.namenode.name.dir</name>
     <!-- cluster variant -->
-    <value></value>
+    <value/>
     <description>Determines where on the local filesystem the DFS name node
       should store the name table.  If this is a comma-delimited list
       of directories then the name table is replicated in all of the
       directories, for redundancy. </description>
     <final>true</final>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.support.append</name>
     <value>true</value>
     <description>to enable dfs append</description>
     <final>true</final>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.webhdfs.enabled</name>
     <value>true</value>
     <description>to enable webhdfs</description>
     <final>true</final>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.datanode.failed.volumes.tolerated</name>
     <value>0</value>
     <description>#of failed disks dn would tolerate</description>
     <final>true</final>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.datanode.data.dir</name>
-    <value></value>
+    <value/>
     <description>Determines where on the local filesystem an DFS data node
   should store its blocks.  If this is a comma-delimited
   list of directories, then data will be stored in all named
@@ -81,18 +84,20 @@
   Directories that do not exist are ignored.
   </description>
     <final>true</final>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.hosts.exclude</name>
-    <value></value>
+    <value/>
     <description>Names a file that contains a list of hosts that are
     not permitted to connect to the namenode.  The full pathname of the
     file must be specified.  If the value is empty, no hosts are
     excluded.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
-<!--
+  <!--
   <property>
     <name>dfs.hosts</name>
     <value></value>
@@ -102,33 +107,36 @@
     permitted.</description>
   </property>
 -->
-
   <property>
     <name>dfs.replication.max</name>
     <value>50</value>
     <description>Maximal block replication.
   </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.replication</name>
     <value>3</value>
     <description>Default block replication.
   </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.heartbeat.interval</name>
     <value>3</value>
     <description>Determines datanode heartbeat interval in seconds.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.heartbeat.interval</name>
     <value>3</value>
     <description>Determines datanode heartbeat interval in seconds.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.namenode.safemode.threshold-pct</name>
     <value>1.0f</value>
@@ -138,8 +146,9 @@
         Values less than or equal to 0 mean not to start in safe mode.
         Values greater than 1 will make safe mode permanent.
         </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.datanode.balance.bandwidthPerSec</name>
     <value>6250000</value>
@@ -148,218 +157,241 @@
         can utilize for the balancing purpose in term of
         the number of bytes per second.
   </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.datanode.address</name>
     <value>0.0.0.0:50010</value>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.datanode.http.address</name>
     <value>0.0.0.0:50075</value>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.blocksize</name>
     <value>134217728</value>
     <description>The default block size for new files.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.namenode.http-address</name>
-    <value></value>
-<description>The name of the default file system.  Either the
+    <value/>
+    <description>The name of the default file system.  Either the
 literal string "local" or a host:port for HDFS.</description>
-<final>true</final>
-</property>
-
-<property>
-<name>dfs.datanode.du.reserved</name>
-<!-- cluster variant -->
-<value>1073741824</value>
-<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+    <final>true</final>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.datanode.du.reserved</name>
+    <!-- cluster variant -->
+    <value>1073741824</value>
+    <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
 </description>
-</property>
-
-<property>
-<name>dfs.datanode.ipc.address</name>
-<value>0.0.0.0:8010</value>
-<description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.datanode.ipc.address</name>
+    <value>0.0.0.0:8010</value>
+    <description>
 The datanode ipc server address and port.
 If the port is 0 then the server will start on a free port.
 </description>
-</property>
-
-<property>
-<name>dfs.blockreport.initialDelay</name>
-<value>120</value>
-<description>Delay for first block report in seconds.</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>40</value>
-<description>The number of server threads for the namenode.</description>
-</property>
-
-<property>
-<name>dfs.datanode.max.transfer.threads</name>
-<value>1024</value>
-<description>PRIVATE CONFIG VARIABLE</description>
-</property>
-
-<!-- Permissions configuration -->
-
-<property>
-<name>fs.permissions.umask-mode</name>
-<value>022</value>
-<description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.blockreport.initialDelay</name>
+    <value>120</value>
+    <description>Delay for first block report in seconds.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.namenode.handler.count</name>
+    <value>40</value>
+    <description>The number of server threads for the namenode.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.datanode.max.transfer.threads</name>
+    <value>1024</value>
+    <description>PRIVATE CONFIG VARIABLE</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
+  </property>
+  <!-- Permissions configuration -->
+  <property>
+    <name>fs.permissions.umask-mode</name>
+    <value>022</value>
+    <description>
 The octal umask used when creating files and directories.
 </description>
-</property>
-
-<property>
-<name>dfs.permissions.enabled</name>
-<value>true</value>
-<description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.permissions.enabled</name>
+    <value>true</value>
+    <description>
 If "true", enable permission checking in HDFS.
 If "false", permission checking is turned off,
 but all other behavior is unchanged.
 Switching from one parameter value to the other does not change the mode,
 owner or group of files or directories.
 </description>
-</property>
-
-<property>
-<name>dfs.permissions.superusergroup</name>
-<value>hdfs</value>
-<description>The name of the group of super-users.</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>100</value>
-<description>Added to grow Queue size so that more client connections are allowed</description>
-</property>
-
-<property>
-<name>dfs.block.access.token.enable</name>
-<value>true</value>
-<description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.permissions.superusergroup</name>
+    <value>hdfs</value>
+    <description>The name of the group of super-users.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.namenode.handler.count</name>
+    <value>100</value>
+    <description>Added to grow Queue size so that more client connections are allowed</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.block.access.token.enable</name>
+    <value>true</value>
+    <description>
 If "true", access tokens are used as capabilities for accessing datanodes.
 If "false", no access tokens are checked on accessing datanodes.
 </description>
-</property>
-
-<property>
-<name>dfs.namenode.kerberos.principal</name>
-<value></value>
-<description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.namenode.kerberos.principal</name>
+    <value/>
+    <description>
 Kerberos principal name for the NameNode
 </description>
-</property>
-
-<property>
-<name>dfs.secondary.namenode.kerberos.principal</name>
-<value></value>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.secondary.namenode.kerberos.principal</name>
+    <value/>
     <description>
         Kerberos principal name for the secondary NameNode.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
-
-<!--
+  <!--
   This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
 -->
   <property>
     <name>dfs.namenode.kerberos.https.principal</name>
-    <value></value>
-     <description>The Kerberos principal for the host that the NameNode runs on.</description>
-
+    <value/>
+    <description>The Kerberos principal for the host that the NameNode runs on.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value></value>
+    <value/>
     <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
-
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <!-- cluster variant -->
     <name>dfs.namenode.secondary.http-address</name>
-    <value></value>
+    <value/>
     <description>Address of secondary namenode web server</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.web.authentication.kerberos.principal</name>
-    <value></value>
+    <value/>
     <description>
       The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
       The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
       HTTP SPENGO specification.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.web.authentication.kerberos.keytab</name>
-    <value></value>
+    <value/>
     <description>
       The Kerberos keytab file with the credentials for the
       HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.datanode.kerberos.principal</name>
-    <value></value>
- <description>
+    <value/>
+    <description>
         The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.namenode.keytab.file</name>
-    <value></value>
- <description>
+    <value/>
+    <description>
         Combined keytab file containing the namenode service and host principals.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.secondary.namenode.keytab.file</name>
-    <value></value>
-  <description>
+    <value/>
+    <description>
         Combined keytab file containing the namenode service and host principals.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.datanode.keytab.file</name>
-    <value></value>
- <description>
+    <value/>
+    <description>
         The filename of the keytab file for the DataNode.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.namenode.https-address</name>
-    <value></value>
-  <description>The https address where namenode binds</description>
-
+    <value/>
+    <description>The https address where namenode binds</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.datanode.data.dir.perm</name>
     <value>750</value>
-<description>The permissions that should be there on dfs.datanode.data.dir
+    <description>The permissions that should be there on dfs.datanode.data.dir
 directories. The datanode will not come up if the permissions are
 different on existing dfs.datanode.data.dir directories. If the directories
 don't exist, they will be created with this permission.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.namenode.accesstime.precision</name>
     <value>0</value>
@@ -367,14 +399,16 @@ don't exist, they will be created with this permission.</description>
                  The default value is 1 hour. Setting a value of 0 disables
                  access times for HDFS.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
-   <name>dfs.cluster.administrators</name>
-   <value> hdfs</value>
-   <description>ACL for who all can view the default servlets in the HDFS</description>
+    <name>dfs.cluster.administrators</name>
+    <value> hdfs</value>
+    <description>ACL for who all can view the default servlets in the HDFS</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.namenode.avoid.read.stale.datanode</name>
     <value>true</value>
@@ -383,6 +417,8 @@ don't exist, they will be created with this permission.</description>
       heartbeat messages have not been received by the namenode for more than a
       specified time interval.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>dfs.namenode.avoid.write.stale.datanode</name>
@@ -392,6 +428,8 @@ don't exist, they will be created with this permission.</description>
       heartbeat messages have not been received by the namenode for more than a
       specified time interval.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>dfs.namenode.write.stale.datanode.ratio</name>
@@ -399,47 +437,54 @@ don't exist, they will be created with this permission.</description>
     <description>When the ratio of number stale datanodes to total datanodes marked is greater
       than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>dfs.namenode.stale.datanode.interval</name>
     <value>30000</value>
     <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-  
   <property>
     <name>dfs.journalnode.http-address</name>
     <value>0.0.0.0:8480</value>
     <description>The address and port the JournalNode web UI listens on.
      If the port is 0 then the server will start on a free port. </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-  
   <property>
     <name>dfs.journalnode.edits.dir</name>
     <value>/grid/0/hdfs/journal</value>
     <description>The path where the JournalNode daemon will store its local state. </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <!-- HDFS Short-Circuit Local Reads -->
-
   <property>
     <name>dfs.client.read.shortcircuit</name>
     <value>true</value>
     <description>
       This configuration parameter turns on short-circuit local reads.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.client.read.shortcircuit.skip.checksum</name>
-    <value></value>
+    <value/>
     <description>Enable/disbale skipping the checksum check</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.domain.socket.path</name>
     <value>/var/lib/hadoop-hdfs/dn_socket</value>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>dfs.client.read.shortcircuit.streams.cache.size</name>
     <value>4096</value>
@@ -449,6 +494,7 @@ don't exist, they will be created with this permission.</description>
       more file descriptors, but potentially provide better performance on
       workloads involving lots of seeks.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/98d86419/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HIVE/configuration/hive-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HIVE/configuration/hive-site.xml
index 830b0af..2260c5c 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HIVE/configuration/hive-site.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HIVE/configuration/hive-site.xml
@@ -16,178 +16,205 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 -->
-
 <configuration>
   <property>
     <name>hive.metastore.local</name>
     <value>false</value>
     <description>controls whether to connect to remove metastore server or
     open a new metastore server in Hive Client JVM</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>javax.jdo.option.ConnectionURL</name>
-    <value></value>
+    <value/>
     <description>JDBC connect string for a JDBC metastore</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>javax.jdo.option.ConnectionDriverName</name>
     <value>com.mysql.jdbc.Driver</value>
     <description>Driver class name for a JDBC metastore</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>javax.jdo.option.ConnectionUserName</name>
-    <value></value>
+    <value/>
     <description>username to use against metastore database</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>javax.jdo.option.ConnectionPassword</name>
-    <value></value>
+    <value/>
     <description>password to use against metastore database</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.warehouse.dir</name>
     <value>/apps/hive/warehouse</value>
     <description>location of default database for the warehouse</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.sasl.enabled</name>
-    <value></value>
+    <value/>
     <description>If true, the metastore thrift interface will be secured with SASL.
      Clients must authenticate with Kerberos.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.kerberos.keytab.file</name>
-    <value></value>
+    <value/>
     <description>The path to the Kerberos Keytab file containing the metastore
      thrift server's service principal.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.kerberos.principal</name>
-    <value></value>
+    <value/>
     <description>The service principal for the metastore thrift server. The special
     string _HOST will be replaced automatically with the correct host name.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.cache.pinobjtypes</name>
     <value>Table,Database,Type,FieldSchema,Order</value>
     <description>List of comma separated metastore object types that should be pinned in the cache</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.uris</name>
-    <value></value>
+    <value/>
     <description>URI for client to contact metastore server</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hadoop.clientside.fs.operations</name>
     <value>true</value>
     <description>FS operations are owned by client</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.client.socket.timeout</name>
     <value>60</value>
     <description>MetaStore Client socket timeout in seconds</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.execute.setugi</name>
     <value>true</value>
     <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.security.authorization.enabled</name>
     <value>false</value>
     <description>enable or disable the hive client authorization</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.security.authorization.manager</name>
     <value>org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider</value>
     <description>the hive client authorization manager class name.
     The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.server2.enable.doAs</name>
     <value>true</value>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>fs.hdfs.impl.disable.cache</name>
     <value>true</value>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>fs.file.impl.disable.cache</name>
     <value>true</value>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.enforce.bucketing</name>
     <value>true</value>
     <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.enforce.sorting</name>
     <value>true</value>
     <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.map.aggr</name>
     <value>true</value>
     <description>Whether to use map-side aggregation in Hive Group By queries.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.optimize.bucketmapjoin</name>
     <value>true</value>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.optimize.bucketmapjoin.sortedmerge</name>
     <value>true</value>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.mapred.reduce.tasks.speculative.execution</name>
     <value>false</value>
     <description>Whether speculative execution for reducers should be turned on.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.auto.convert.join</name>
     <value>true</value>
     <description>Whether Hive enable the optimization about converting common
       join into mapjoin based on the input file size.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.auto.convert.sortmerge.join</name>
     <value>true</value>
     <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass
       the criteria for sort-merge join.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
     <value>true</value>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.auto.convert.join.noconditionaltask</name>
     <value>true</value>
@@ -195,8 +222,9 @@ limitations under the License.
       size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
       specified size, the join is directly converted to a mapjoin (there is no conditional task).
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.auto.convert.join.noconditionaltask.size</name>
     <value>1000000000</value>
@@ -204,8 +232,9 @@ limitations under the License.
       is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
       converted to a mapjoin(there is no conditional task). The default is 10MB.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.optimize.reducededuplication.min.reducer</name>
     <value>1</value>
@@ -213,8 +242,9 @@ limitations under the License.
       That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
       The optimization will be disabled if number of reducers is less than specified value.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.optimize.mapjoin.mapreduce</name>
     <value>true</value>
@@ -223,8 +253,9 @@ limitations under the License.
       job (for e.g a group by), each map-only job is merged with the following
       map-reduce job.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.mapjoin.bucket.cache.size</name>
     <value>10000</value>
@@ -232,24 +263,28 @@ limitations under the License.
       Size per reducer.The default is 1G, i.e if the input size is 10G, it
       will use 10 reducers.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.vectorized.execution.enabled</name>
     <value>false</value>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.optimize.reducededuplication</name>
     <value>true</value>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.optimize.index.filter</name>
     <value>true</value>
     <description>
     Whether to enable automatic use of indexes
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/98d86419/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/global.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/global.xml
index ceedd56..a148c52 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/global.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/global.xml
@@ -19,26 +19,33 @@
  * limitations under the License.
  */
 -->
-
 <configuration>
   <property>
     <name>hs_host</name>
-    <value></value>
+    <value/>
     <description>History Server.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>mapred_log_dir_prefix</name>
     <value>/var/log/hadoop-mapreduce</value>
     <description>Mapreduce Log Dir Prefix</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>mapred_pid_dir_prefix</name>
     <value>/var/run/hadoop-mapreduce</value>
     <description>Mapreduce PID Dir Prefix</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>mapred_user</name>
     <value>mapred</value>
     <description>Mapreduce User</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/98d86419/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-queue-acls.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
index ce12380..2b6307e 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
@@ -1,6 +1,5 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -17,23 +16,20 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-
 <!-- mapred-queue-acls.xml -->
 <configuration>
-
-
-<!-- queue default -->
-
+  <!-- queue default -->
   <property>
     <name>mapred.queue.default.acl-submit-job</name>
     <value>*</value>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapred.queue.default.acl-administer-jobs</name>
     <value>*</value>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <!-- END ACLs -->
-
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/98d86419/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml
index fad359e..9646961 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml
@@ -1,6 +1,5 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -17,13 +16,9 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-
 <!-- Put site-specific property overrides in this file. -->
-
 <configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
+  <!-- i/o properties -->
   <property>
     <name>mapreduce.task.io.sort.mb</name>
     <value>100</value>
@@ -31,8 +26,9 @@
       The total amount of buffer memory to use while sorting files, in megabytes.
       By default, gives each merge stream 1MB, which should minimize seeks.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.map.sort.spill.percent</name>
     <value>0.1</value>
@@ -43,8 +39,9 @@
       is already in progress, so spills may be larger than this threshold when
       it is set to less than .5
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.task.io.sort.factor</name>
     <value>100</value>
@@ -52,9 +49,10 @@
       The number of streams to merge at once while sorting files.
       This determines the number of open file handles.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
-<!-- map/reduce properties -->
+  <!-- map/reduce properties -->
   <property>
     <name>mapreduce.reduce.shuffle.parallelcopies</name>
     <value>30</value>
@@ -62,8 +60,9 @@
       The default number of parallel transfers run by reduce during
       the copy(shuffle) phase.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.map.speculative</name>
     <value>false</value>
@@ -71,8 +70,9 @@
       If true, then multiple instances of some map tasks
       may be executed in parallel.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.reduce.speculative</name>
     <value>false</value>
@@ -80,8 +80,9 @@
       If true, then multiple instances of some reduce tasks may be
       executed in parallel.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.job.reduce.slowstart.completedmaps</name>
     <value>0.05</value>
@@ -89,8 +90,9 @@
       Fraction of the number of maps in the job which should be complete before
       reduces are scheduled for the job.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.reduce.shuffle.merge.percent</name>
     <value>0.66</value>
@@ -100,8 +102,9 @@
       storing in-memory map outputs, as defined by
       mapreduce.reduce.shuffle.input.buffer.percent.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
     <value>0.7</value>
@@ -109,16 +112,18 @@
       The percentage of memory to be allocated from the maximum heap
       size to storing map outputs during the shuffle.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.map.output.compress.codec</name>
-    <value></value>
+    <value/>
     <description>If the map outputs are compressed, how should they be
       compressed
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.output.fileoutputformat.compress.type</name>
     <value>BLOCK</value>
@@ -126,8 +131,9 @@
       If the job outputs are to compressed as SequenceFiles, how should
       they be compressed? Should be one of NONE, RECORD or BLOCK.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.reduce.input.buffer.percent</name>
     <value>0.0</value>
@@ -137,14 +143,16 @@
       remaining map outputs in memory must consume less than this threshold before
       the reduce can begin.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <!-- copied from kryptonite configuration -->
   <property>
     <name>mapreduce.map.output.compress</name>
-    <value></value>
+    <value/>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.task.timeout</name>
     <value>600000</value>
@@ -153,31 +161,36 @@
       terminated if it neither reads an input, writes an output, nor
       updates its status string.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.map.memory.mb</name>
     <value>1536</value>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.reduce.memory.mb</name>
     <value>1024</value>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.tasktracker.keytab.file</name>
-    <value></value>
+    <value/>
     <description>The filename of the keytab for the task tracker</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.jobhistory.keytab.file</name>
     <!-- cluster variant -->
-    <value></value>
+    <value/>
     <description>The keytab for the job history server principal.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.shuffle.port</name>
     <value>13562</value>
@@ -186,36 +199,37 @@
       ShuffleHandler is a service run at the NodeManager to facilitate
       transfers of intermediate Map outputs to requesting Reducers.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.jobhistory.intermediate-done-dir</name>
     <value>/mr-history/tmp</value>
     <description>
       Directory where history files are written by MapReduce jobs.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.jobhistory.done-dir</name>
     <value>/mr-history/done</value>
     <description>
       Directory where history files are managed by the MR JobHistory Server.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
-  <property>   ����
+  <property>
     <name>mapreduce.jobhistory.address</name>
     <value>localhost:10020</value>
     <description>Enter your JobHistoryServer hostname.</description>
-  </property>
-
-  <property>   ����
+  <on-ambari-upgrade add="true" change="false" delete="false"/><on-stack-upgrade add="true" change="false" delete="false"/></property>
+  <property>
     <name>mapreduce.jobhistory.webapp.address</name>
     <value>localhost:19888</value>
     <description>Enter your JobHistoryServer hostname.</description>
-  </property>
-
+  <on-ambari-upgrade add="true" change="false" delete="false"/><on-stack-upgrade add="true" change="false" delete="false"/></property>
   <property>
     <name>mapreduce.framework.name</name>
     <value>yarn</value>
@@ -223,22 +237,25 @@
       The runtime framework for executing MapReduce jobs. Can be one of local,
       classic or yarn.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>yarn.app.mapreduce.am.staging-dir</name>
     <value>/user</value>
     <description>
       The staging dir used while submitting jobs.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>yarn.app.mapreduce.am.resource.mb</name>
     <value>1024</value>
     <description>The amount of memory the MR AppMaster needs.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>yarn.app.mapreduce.am.command-opts</name>
     <value>-Xmx756m</value>
@@ -255,8 +272,9 @@
       of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
       mapreduce.reduce.env config settings.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>yarn.app.mapreduce.am.admin-command-opts</name>
     <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
@@ -270,35 +288,40 @@
       of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
       mapreduce.reduce.env config settings.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>yarn.app.mapreduce.am.log.level</name>
     <value>INFO</value>
     <description>MR App Master process log level.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>yarn.app.mapreduce.am.env</name>
-    <value></value>
+    <value/>
     <description>
       User added environment variables for the MR App Master
       processes. Example :
       1) A=foo  This will set the env variable A to foo
       2) B=$B:c This is inherit tasktracker's B env variable.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.admin.map.child.java.opts</name>
     <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.admin.reduce.child.java.opts</name>
     <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.application.classpath</name>
     <value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*</value>
@@ -306,8 +329,9 @@
       CLASSPATH for MR applications. A comma-separated list of CLASSPATH
       entries.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.am.max-attempts</name>
     <value>2</value>
@@ -317,40 +341,45 @@
       set by resourcemanager. Otherwise, it will be override. The default number is
       set to 2, to allow at least one retry for AM.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.map.memory.mb</name>
     <value>512</value>
     <description>
       Larger resource limit for maps.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.map.java.opts</name>
     <value>-Xmx320m</value>
     <description>
       Larger heap-size for child jvms of maps.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.reduce.memory.mb</name>
     <value>1024</value>
     <description>
       Larger resource limit for reduces.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.reduce.java.opts</name>
     <value>-Xmx756m</value>
     <description>
       Larger heap-size for child jvms of reduces.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.map.log.level</name>
     <value>INFO</value>
@@ -358,8 +387,9 @@
       The logging level for the map task. The allowed levels are:
       OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.reduce.log.level</name>
     <value>INFO</value>
@@ -367,8 +397,9 @@
       The logging level for the reduce task. The allowed levels are:
       OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>mapreduce.admin.user.env</name>
     <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &amp;&gt; /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`</value>
@@ -377,6 +408,7 @@
       This is not an additive property. You must preserve the original value if
       you want your map and reduce tasks to have access to native libraries (compression, etc)
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
 </configuration>