You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by dm...@apache.org on 2016/06/09 15:02:03 UTC

[38/70] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation. Change defaults (dlysnichenko)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d31dcfc/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/core-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/core-site.xml
index 52d277e..b396f8e 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/core-site.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/core-site.xml
@@ -26,19 +26,19 @@
   The size of this buffer should probably be a multiple of hardware
   page size (4096 on Intel x86), and it determines how much data is
   buffered during read and write operations.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>io.serializations</name>
     <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>io.compression.codecs</name>
     <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec</value>
     <description>A list of the compression codec classes that can be used
                  for compression/decompression.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- file system properties -->
   <property>
@@ -48,7 +48,7 @@
     <description>The name of the default file system.  Either the
   literal string "local" or a host:port for HDFS.</description>
     <final>true</final>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>fs.trash.interval</name>
@@ -56,7 +56,7 @@
     <description>Number of minutes between trash checkpoints.
   If zero, the trash feature is disabled.
   </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.namenode.checkpoint.dir</name>
@@ -66,7 +66,7 @@
         If this is a comma-delimited list of directories then the image is
         replicated in all of the directories for redundancy.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.namenode.checkpoint.edits.dir</name>
@@ -77,14 +77,14 @@
         replicated in all of the directoires for redundancy.
         Default value is same as dfs.namenode.checkpoint.dir
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.namenode.checkpoint.period</name>
     <value>21600</value>
     <description>The number of seconds between two periodic checkpoints.
   </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- ipc properties: copied from kryptonite configuration -->
   <property>
@@ -93,7 +93,7 @@
     <description>Defines the threshold number of connections after which
                connections will be inspected for idleness.
   </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ipc.client.connection.maxidletime</name>
@@ -101,13 +101,13 @@
     <description>The maximum time after which a client will bring down the
                connection to the server.
   </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ipc.client.connect.max.retries</name>
     <value>50</value>
     <description>Defines the maximum number of retries for IPC connections.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- Web Interface Configuration -->
   <property>
@@ -118,7 +118,7 @@
                 not be exposed to public. Enable this option if the interfaces
                 are only reachable by those who have the right authorization.
   </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hadoop.security.authentication</name>
@@ -127,7 +127,7 @@
    Set the authentication for the cluster. Valid values are: simple or
    kerberos.
    </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hadoop.security.authorization</name>
@@ -135,7 +135,7 @@
     <description>
      Enable authorization for different protocols.
   </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hadoop.security.auth_to_local</name>
@@ -185,6 +185,6 @@ If you want to treat all principals from APACHE.ORG with /admin as "admin", your
 RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
 DEFAULT
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d31dcfc/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/global.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/global.xml
index 2bee181..c1511ee 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/global.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/global.xml
@@ -24,204 +24,204 @@
     <name>namenode_host</name>
     <value/>
     <description>NameNode Host.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs_namenode_name_dir</name>
     <value>/hadoop/hdfs/namenode</value>
     <description>NameNode Directories.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>snamenode_host</name>
     <value/>
     <description>Secondary NameNode.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs_namenode_checkpoint_dir</name>
     <value>/hadoop/hdfs/namesecondary</value>
     <description>Secondary NameNode checkpoint dir.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>datanode_hosts</name>
     <value/>
     <description>List of Datanode Hosts.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs_datanode_data_dir</name>
     <value>/hadoop/hdfs/data</value>
     <description>Data directories for Data Nodes.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hdfs_log_dir_prefix</name>
     <value>/var/log/hadoop</value>
     <description>Hadoop Log Dir Prefix</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hadoop_pid_dir_prefix</name>
     <value>/var/run/hadoop</value>
     <description>Hadoop PID Dir Prefix</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs_webhdfs_enabled</name>
     <value>true</value>
     <description>WebHDFS enabled</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hadoop_heapsize</name>
     <value>1024</value>
     <description>Hadoop maximum Java heap size</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>namenode_heapsize</name>
     <value>1024</value>
     <description>NameNode Java heap size</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>namenode_opt_newsize</name>
     <value>200</value>
     <description>Default size of Java new generation for NameNode (Java option -XX:NewSize) Note: The value of NameNode new generation size (default size of Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize property is 1/8 the value of maximum heap size (-Xmx).</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>namenode_opt_maxnewsize</name>
     <value>640</value>
     <description>NameNode maximum new generation size</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>namenode_opt_permsize</name>
     <value>128</value>
     <description>NameNode permanent generation size</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>namenode_opt_maxpermsize</name>
     <value>256</value>
     <description>NameNode maximum permanent generation size</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>datanode_du_reserved</name>
     <value>1</value>
     <description>Reserved space for HDFS</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dtnode_heapsize</name>
     <value>1024</value>
     <description>DataNode maximum Java heap size</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs_datanode_failed_volume_tolerated</name>
     <value>0</value>
     <description>DataNode volumes failure toleration</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs_namenode_checkpoint_period</name>
     <value>21600</value>
     <description>HDFS Maximum Checkpoint Delay</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>fs_checkpoint_size</name>
     <value>0.5</value>
     <description>FS Checkpoint Size.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>proxyuser_group</name>
     <value>users</value>
     <description>Proxy user group.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs_exclude</name>
     <value/>
     <description>HDFS Exclude hosts.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs_replication</name>
     <value>3</value>
     <description>Default Block Replication.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs_block_local_path_access_user</name>
     <value>hbase</value>
     <description>Default Block Replication.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs_datanode_address</name>
     <value>50010</value>
     <description>Port for datanode address.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs_datanode_http_address</name>
     <value>50075</value>
     <description>Port for datanode address.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs_datanode_data_dir_perm</name>
     <value>750</value>
     <description>Datanode dir perms.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>security_enabled</name>
     <value>false</value>
     <description>Hadoop Security</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>kerberos_domain</name>
     <value>EXAMPLE.COM</value>
     <description>Kerberos realm.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>kadmin_pw</name>
     <value/>
     <description>Kerberos realm admin password</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>keytab_path</name>
     <value>/etc/security/keytabs</value>
     <description>Kerberos keytab path.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>keytab_path</name>
     <value>/etc/security/keytabs</value>
     <description>KeyTab Directory.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>namenode_formatted_mark_dir</name>
     <value>/var/run/hadoop/hdfs/namenode/formatted/</value>
     <description>Formatteed Mark Directory.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hdfs_user</name>
     <value>hdfs</value>
     <description>User and Groups.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d31dcfc/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hadoop-policy.xml
index a31a481..25de4dc 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hadoop-policy.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hadoop-policy.xml
@@ -26,7 +26,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>security.client.datanode.protocol.acl</name>
@@ -36,7 +36,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>security.datanode.protocol.acl</name>
@@ -46,7 +46,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>security.inter.datanode.protocol.acl</name>
@@ -56,7 +56,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>security.namenode.protocol.acl</name>
@@ -66,7 +66,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>security.inter.tracker.protocol.acl</name>
@@ -76,7 +76,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>security.job.client.protocol.acl</name>
@@ -86,7 +86,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>security.job.task.protocol.acl</name>
@@ -96,7 +96,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>security.admin.operations.protocol.acl</name>
@@ -105,7 +105,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>security.refresh.usertogroups.mappings.protocol.acl</name>
@@ -115,7 +115,7 @@
     group names. The user and group list is separated by a blank. For
     e.g. "alice,bob users,wheel".  A special value of "*" means all
     users are allowed.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>security.refresh.policy.protocol.acl</name>
@@ -125,6 +125,6 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d31dcfc/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml
index 27084d2..28657eb 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml
@@ -24,7 +24,7 @@
     <property-type>PASSWORD</property-type>
     <value>test</value>
     <description>1</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>test.password.empty</name>
@@ -34,7 +34,7 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.namenode.name.dir</name>
@@ -45,28 +45,28 @@
       of directories then the name table is replicated in all of the
       directories, for redundancy. </description>
     <final>true</final>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.support.append</name>
     <value>true</value>
     <description>to enable dfs append</description>
     <final>true</final>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.webhdfs.enabled</name>
     <value>true</value>
     <description>to enable webhdfs</description>
     <final>true</final>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.datanode.failed.volumes.tolerated</name>
     <value>0</value>
     <description>#of failed disks dn would tolerate</description>
     <final>true</final>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.datanode.data.dir</name>
@@ -78,7 +78,7 @@
   Directories that do not exist are ignored.
   </description>
     <final>true</final>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.hosts.exclude</name>
@@ -87,7 +87,7 @@
     not permitted to connect to the namenode.  The full pathname of the
     file must be specified.  If the value is empty, no hosts are
     excluded.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!--
   <property>
@@ -104,26 +104,26 @@
     <value>50</value>
     <description>Maximal block replication.
   </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.replication</name>
     <value>3</value>
     <description>Default block replication.
   </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.heartbeat.interval</name>
     <value>3</value>
     <description>Determines datanode heartbeat interval in seconds.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.heartbeat.interval</name>
     <value>3</value>
     <description>Determines datanode heartbeat interval in seconds.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.namenode.safemode.threshold-pct</name>
@@ -134,7 +134,7 @@
         Values less than or equal to 0 mean not to start in safe mode.
         Values greater than 1 will make safe mode permanent.
         </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.datanode.balance.bandwidthPerSec</name>
@@ -144,23 +144,23 @@
         can utilize for the balancing purpose in term of
         the number of bytes per second.
   </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.datanode.address</name>
     <value>0.0.0.0:50010</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.datanode.http.address</name>
     <value>0.0.0.0:50075</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.blocksize</name>
     <value>134217728</value>
     <description>The default block size for new files.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.namenode.http-address</name>
@@ -168,7 +168,7 @@
     <description>The name of the default file system.  Either the
 literal string "local" or a host:port for HDFS.</description>
     <final>true</final>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.datanode.du.reserved</name>
@@ -176,7 +176,7 @@ literal string "local" or a host:port for HDFS.</description>
     <value>1073741824</value>
     <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
 </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.datanode.ipc.address</name>
@@ -185,25 +185,25 @@ literal string "local" or a host:port for HDFS.</description>
 The datanode ipc server address and port.
 If the port is 0 then the server will start on a free port.
 </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.blockreport.initialDelay</name>
     <value>120</value>
     <description>Delay for first block report in seconds.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.namenode.handler.count</name>
     <value>40</value>
     <description>The number of server threads for the namenode.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.datanode.max.transfer.threads</name>
     <value>1024</value>
     <description>PRIVATE CONFIG VARIABLE</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- Permissions configuration -->
   <property>
@@ -212,7 +212,7 @@ If the port is 0 then the server will start on a free port.
     <description>
 The octal umask used when creating files and directories.
 </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.permissions.enabled</name>
@@ -224,19 +224,19 @@ but all other behavior is unchanged.
 Switching from one parameter value to the other does not change the mode,
 owner or group of files or directories.
 </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.permissions.superusergroup</name>
     <value>hdfs</value>
     <description>The name of the group of super-users.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.namenode.handler.count</name>
     <value>100</value>
     <description>Added to grow Queue size so that more client connections are allowed</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.block.access.token.enable</name>
@@ -245,7 +245,7 @@ owner or group of files or directories.
 If "true", access tokens are used as capabilities for accessing datanodes.
 If "false", no access tokens are checked on accessing datanodes.
 </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.namenode.kerberos.principal</name>
@@ -253,7 +253,7 @@ If "false", no access tokens are checked on accessing datanodes.
     <description>
 Kerberos principal name for the NameNode
 </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.secondary.namenode.kerberos.principal</name>
@@ -261,7 +261,7 @@ Kerberos principal name for the NameNode
     <description>
         Kerberos principal name for the secondary NameNode.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!--
   This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
@@ -270,20 +270,20 @@ Kerberos principal name for the NameNode
     <name>dfs.namenode.kerberos.https.principal</name>
     <value/>
     <description>The Kerberos principal for the host that the NameNode runs on.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.secondary.namenode.kerberos.https.principal</name>
     <value/>
     <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <!-- cluster variant -->
     <name>dfs.namenode.secondary.http-address</name>
     <value/>
     <description>Address of secondary namenode web server</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.web.authentication.kerberos.principal</name>
@@ -293,7 +293,7 @@ Kerberos principal name for the NameNode
       The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
       HTTP SPENGO specification.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.web.authentication.kerberos.keytab</name>
@@ -302,7 +302,7 @@ Kerberos principal name for the NameNode
       The Kerberos keytab file with the credentials for the
       HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.datanode.kerberos.principal</name>
@@ -310,7 +310,7 @@ Kerberos principal name for the NameNode
     <description>
         The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.namenode.keytab.file</name>
@@ -318,7 +318,7 @@ Kerberos principal name for the NameNode
     <description>
         Combined keytab file containing the namenode service and host principals.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.secondary.namenode.keytab.file</name>
@@ -326,7 +326,7 @@ Kerberos principal name for the NameNode
     <description>
         Combined keytab file containing the namenode service and host principals.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.datanode.keytab.file</name>
@@ -334,13 +334,13 @@ Kerberos principal name for the NameNode
     <description>
         The filename of the keytab file for the DataNode.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.namenode.https-address</name>
     <value/>
     <description>The https address where namenode binds</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.datanode.data.dir.perm</name>
@@ -349,7 +349,7 @@ Kerberos principal name for the NameNode
 directories. The datanode will not come up if the permissions are
 different on existing dfs.datanode.data.dir directories. If the directories
 don't exist, they will be created with this permission.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.namenode.accesstime.precision</name>
@@ -358,13 +358,13 @@ don't exist, they will be created with this permission.</description>
                  The default value is 1 hour. Setting a value of 0 disables
                  access times for HDFS.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.cluster.administrators</name>
     <value> hdfs</value>
     <description>ACL for who all can view the default servlets in the HDFS</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.namenode.avoid.read.stale.datanode</name>
@@ -374,7 +374,7 @@ don't exist, they will be created with this permission.</description>
       heartbeat messages have not been received by the namenode for more than a
       specified time interval.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.namenode.avoid.write.stale.datanode</name>
@@ -384,7 +384,7 @@ don't exist, they will be created with this permission.</description>
       heartbeat messages have not been received by the namenode for more than a
       specified time interval.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.namenode.write.stale.datanode.ratio</name>
@@ -392,26 +392,26 @@ don't exist, they will be created with this permission.</description>
     <description>When the ratio of number stale datanodes to total datanodes marked is greater
       than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.namenode.stale.datanode.interval</name>
     <value>30000</value>
     <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.journalnode.http-address</name>
     <value>0.0.0.0:8480</value>
     <description>The address and port the JournalNode web UI listens on.
      If the port is 0 then the server will start on a free port. </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.journalnode.edits.dir</name>
     <value>/grid/0/hdfs/journal</value>
     <description>The path where the JournalNode daemon will store its local state. </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- HDFS Short-Circuit Local Reads -->
   <property>
@@ -420,18 +420,18 @@ don't exist, they will be created with this permission.</description>
     <description>
       This configuration parameter turns on short-circuit local reads.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.client.read.shortcircuit.skip.checksum</name>
     <value/>
     <description>Enable/disbale skipping the checksum check</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.domain.socket.path</name>
     <value>/var/lib/hadoop-hdfs/dn_socket</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.client.read.shortcircuit.streams.cache.size</name>
@@ -442,6 +442,6 @@ don't exist, they will be created with this permission.</description>
       more file descriptors, but potentially provide better performance on
       workloads involving lots of seeks.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d31dcfc/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HIVE/configuration/hive-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HIVE/configuration/hive-site.xml
index 3318597..5a3cb6c 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HIVE/configuration/hive-site.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HIVE/configuration/hive-site.xml
@@ -22,157 +22,157 @@ limitations under the License.
     <value>false</value>
     <description>controls whether to connect to remove metastore server or
     open a new metastore server in Hive Client JVM</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>javax.jdo.option.ConnectionURL</name>
     <value/>
     <description>JDBC connect string for a JDBC metastore</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>javax.jdo.option.ConnectionDriverName</name>
     <value>com.mysql.jdbc.Driver</value>
     <description>Driver class name for a JDBC metastore</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>javax.jdo.option.ConnectionUserName</name>
     <value/>
     <description>username to use against metastore database</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>javax.jdo.option.ConnectionPassword</name>
     <value/>
     <description>password to use against metastore database</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.metastore.warehouse.dir</name>
     <value>/apps/hive/warehouse</value>
     <description>location of default database for the warehouse</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.metastore.sasl.enabled</name>
     <value/>
     <description>If true, the metastore thrift interface will be secured with SASL.
      Clients must authenticate with Kerberos.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.metastore.kerberos.keytab.file</name>
     <value/>
     <description>The path to the Kerberos Keytab file containing the metastore
      thrift server's service principal.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.metastore.kerberos.principal</name>
     <value/>
     <description>The service principal for the metastore thrift server. The special
     string _HOST will be replaced automatically with the correct host name.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.metastore.cache.pinobjtypes</name>
     <value>Table,Database,Type,FieldSchema,Order</value>
     <description>List of comma separated metastore object types that should be pinned in the cache</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.metastore.uris</name>
     <value/>
     <description>URI for client to contact metastore server</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hadoop.clientside.fs.operations</name>
     <value>true</value>
     <description>FS operations are owned by client</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.metastore.client.socket.timeout</name>
     <value>60</value>
     <description>MetaStore Client socket timeout in seconds</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.metastore.execute.setugi</name>
     <value>true</value>
     <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.security.authorization.enabled</name>
     <value>false</value>
     <description>enable or disable the hive client authorization</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.security.authorization.manager</name>
     <value>org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider</value>
     <description>the hive client authorization manager class name.
     The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.server2.enable.doAs</name>
     <value>true</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>fs.hdfs.impl.disable.cache</name>
     <value>true</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>fs.file.impl.disable.cache</name>
     <value>true</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.enforce.bucketing</name>
     <value>true</value>
     <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.enforce.sorting</name>
     <value>true</value>
     <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.map.aggr</name>
     <value>true</value>
     <description>Whether to use map-side aggregation in Hive Group By queries.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.optimize.bucketmapjoin</name>
     <value>true</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.optimize.bucketmapjoin.sortedmerge</name>
     <value>true</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.mapred.reduce.tasks.speculative.execution</name>
     <value>false</value>
     <description>Whether speculative execution for reducers should be turned on.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.auto.convert.join</name>
     <value>true</value>
     <description>Whether Hive enable the optimization about converting common
       join into mapjoin based on the input file size.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.auto.convert.sortmerge.join</name>
@@ -180,12 +180,12 @@ limitations under the License.
     <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass
       the criteria for sort-merge join.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
     <value>true</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.auto.convert.join.noconditionaltask</name>
@@ -194,7 +194,7 @@ limitations under the License.
       size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
       specified size, the join is directly converted to a mapjoin (there is no conditional task).
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.auto.convert.join.noconditionaltask.size</name>
@@ -203,7 +203,7 @@ limitations under the License.
       is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
       converted to a mapjoin(there is no conditional task). The default is 10MB.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.optimize.reducededuplication.min.reducer</name>
@@ -212,7 +212,7 @@ limitations under the License.
       That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
       The optimization will be disabled if number of reducers is less than specified value.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.optimize.mapjoin.mapreduce</name>
@@ -222,7 +222,7 @@ limitations under the License.
       job (for e.g a group by), each map-only job is merged with the following
       map-reduce job.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.mapjoin.bucket.cache.size</name>
@@ -231,17 +231,17 @@ limitations under the License.
       Size per reducer.The default is 1G, i.e if the input size is 10G, it
       will use 10 reducers.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.vectorized.execution.enabled</name>
     <value>false</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.optimize.reducededuplication</name>
     <value>true</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.optimize.index.filter</name>
@@ -249,6 +249,6 @@ limitations under the License.
     <description>
     Whether to enable automatic use of indexes
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d31dcfc/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/global.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/global.xml
index 5aefdeb..3629ad0 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/global.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/global.xml
@@ -24,24 +24,24 @@
     <name>hs_host</name>
     <value/>
     <description>History Server.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapred_log_dir_prefix</name>
     <value>/var/log/hadoop-mapreduce</value>
     <description>Mapreduce Log Dir Prefix</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapred_pid_dir_prefix</name>
     <value>/var/run/hadoop-mapreduce</value>
     <description>Mapreduce PID Dir Prefix</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapred_user</name>
     <value>mapred</value>
     <description>Mapreduce User</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d31dcfc/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-queue-acls.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
index 94be6ac..36b3f29 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
@@ -22,12 +22,12 @@
   <property>
     <name>mapred.queue.default.acl-submit-job</name>
     <value>*</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapred.queue.default.acl-administer-jobs</name>
     <value>*</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- END ACLs -->
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d31dcfc/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml
index a2f61d1..c4cb121 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml
@@ -26,7 +26,7 @@
       The total amount of buffer memory to use while sorting files, in megabytes.
       By default, gives each merge stream 1MB, which should minimize seeks.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.map.sort.spill.percent</name>
@@ -38,7 +38,7 @@
       is already in progress, so spills may be larger than this threshold when
       it is set to less than .5
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.task.io.sort.factor</name>
@@ -47,7 +47,7 @@
       The number of streams to merge at once while sorting files.
       This determines the number of open file handles.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- map/reduce properties -->
   <property>
@@ -57,7 +57,7 @@
       The default number of parallel transfers run by reduce during
       the copy(shuffle) phase.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.map.speculative</name>
@@ -66,7 +66,7 @@
       If true, then multiple instances of some map tasks
       may be executed in parallel.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.reduce.speculative</name>
@@ -75,7 +75,7 @@
       If true, then multiple instances of some reduce tasks may be
       executed in parallel.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.job.reduce.slowstart.completedmaps</name>
@@ -84,7 +84,7 @@
       Fraction of the number of maps in the job which should be complete before
       reduces are scheduled for the job.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.reduce.shuffle.merge.percent</name>
@@ -95,7 +95,7 @@
       storing in-memory map outputs, as defined by
       mapreduce.reduce.shuffle.input.buffer.percent.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
@@ -104,7 +104,7 @@
       The percentage of memory to be allocated from the maximum heap
       size to storing map outputs during the shuffle.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.map.output.compress.codec</name>
@@ -112,7 +112,7 @@
     <description>If the map outputs are compressed, how should they be
       compressed
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.output.fileoutputformat.compress.type</name>
@@ -121,7 +121,7 @@
       If the job outputs are to compressed as SequenceFiles, how should
       they be compressed? Should be one of NONE, RECORD or BLOCK.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.reduce.input.buffer.percent</name>
@@ -132,13 +132,13 @@
       remaining map outputs in memory must consume less than this threshold before
       the reduce can begin.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- copied from kryptonite configuration -->
   <property>
     <name>mapreduce.map.output.compress</name>
     <value/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.task.timeout</name>
@@ -148,30 +148,30 @@
       terminated if it neither reads an input, writes an output, nor
       updates its status string.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.map.memory.mb</name>
     <value>1536</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.reduce.memory.mb</name>
     <value>1024</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.tasktracker.keytab.file</name>
     <value/>
     <description>The filename of the keytab for the task tracker</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.jobhistory.keytab.file</name>
     <!-- cluster variant -->
     <value/>
     <description>The keytab for the job history server principal.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.shuffle.port</name>
@@ -181,7 +181,7 @@
       ShuffleHandler is a service run at the NodeManager to facilitate
       transfers of intermediate Map outputs to requesting Reducers.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.jobhistory.intermediate-done-dir</name>
@@ -189,7 +189,7 @@
     <description>
       Directory where history files are written by MapReduce jobs.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.jobhistory.done-dir</name>
@@ -197,19 +197,19 @@
     <description>
       Directory where history files are managed by the MR JobHistory Server.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.jobhistory.address</name>
     <value>localhost:10020</value>
     <description>Enter your JobHistoryServer hostname.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.jobhistory.webapp.address</name>
     <value>localhost:19888</value>
     <description>Enter your JobHistoryServer hostname.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.framework.name</name>
@@ -218,7 +218,7 @@
       The runtime framework for executing MapReduce jobs. Can be one of local,
       classic or yarn.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.app.mapreduce.am.staging-dir</name>
@@ -226,13 +226,13 @@
     <description>
       The staging dir used while submitting jobs.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.app.mapreduce.am.resource.mb</name>
     <value>1024</value>
     <description>The amount of memory the MR AppMaster needs.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.app.mapreduce.am.command-opts</name>
@@ -250,7 +250,7 @@
       of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
       mapreduce.reduce.env config settings.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.app.mapreduce.am.admin-command-opts</name>
@@ -265,13 +265,13 @@
       of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
       mapreduce.reduce.env config settings.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.app.mapreduce.am.log.level</name>
     <value>INFO</value>
     <description>MR App Master process log level.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.app.mapreduce.am.env</name>
@@ -282,17 +282,17 @@
       1) A=foo  This will set the env variable A to foo
       2) B=$B:c This is inherit tasktracker's B env variable.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.admin.map.child.java.opts</name>
     <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.admin.reduce.child.java.opts</name>
     <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.application.classpath</name>
@@ -301,7 +301,7 @@
       CLASSPATH for MR applications. A comma-separated list of CLASSPATH
       entries.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.am.max-attempts</name>
@@ -312,7 +312,7 @@
       set by resourcemanager. Otherwise, it will be override. The default number is
       set to 2, to allow at least one retry for AM.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.map.memory.mb</name>
@@ -320,7 +320,7 @@
     <description>
       Larger resource limit for maps.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.map.java.opts</name>
@@ -328,7 +328,7 @@
     <description>
       Larger heap-size for child jvms of maps.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.reduce.memory.mb</name>
@@ -336,7 +336,7 @@
     <description>
       Larger resource limit for reduces.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.reduce.java.opts</name>
@@ -344,7 +344,7 @@
     <description>
       Larger heap-size for child jvms of reduces.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.map.log.level</name>
@@ -353,7 +353,7 @@
       The logging level for the map task. The allowed levels are:
       OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.reduce.log.level</name>
@@ -362,7 +362,7 @@
       The logging level for the reduce task. The allowed levels are:
       OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.admin.user.env</name>
@@ -372,6 +372,6 @@
       This is not an additive property. You must preserve the original value if
       you want your map and reduce tasks to have access to native libraries (compression, etc)
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d31dcfc/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/OOZIE/configuration/oozie-site.xml
index 20a73d6..6e96f59 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/OOZIE/configuration/oozie-site.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/OOZIE/configuration/oozie-site.xml
@@ -25,7 +25,7 @@
     <name>oozie.base.url</name>
     <value>http://localhost:11000/oozie</value>
     <description>Base Oozie URL.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.system.id</name>
@@ -33,7 +33,7 @@
     <description>
     The Oozie system ID.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.systemmode</name>
@@ -41,7 +41,7 @@
     <description>
      System mode for  Oozie at startup.
      </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.service.AuthorizationService.security.enabled</name>
@@ -50,7 +50,7 @@
      Specifies whether security (user name/admin role) is enabled or not.
      If disabled any user can manage Oozie system and manage any job.
      </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.service.PurgeService.older.than</name>
@@ -58,7 +58,7 @@
     <description>
      Jobs older than this value, in days, will be purged by the PurgeService.
      </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.service.PurgeService.purge.interval</name>
@@ -66,19 +66,19 @@
     <description>
      Interval at which the purge service will run, in seconds.
      </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.service.CallableQueueService.queue.size</name>
     <value>1000</value>
     <description>Max callable queue size</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.service.CallableQueueService.threads</name>
     <value>10</value>
     <description>Number of threads used for executing callables</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.service.CallableQueueService.callable.concurrency</name>
@@ -90,14 +90,14 @@
      All commands that use action executors (action-start, action-end, action-kill and action-check) use
      the action type as the callable type.
      </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.service.coord.normal.default.timeout</name>
     <value>120</value>
     <description>Default timeout for a coordinator action input check (in minutes) for normal job.
       -1 means infinite timeout</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.db.schema.name</name>
@@ -105,7 +105,7 @@
     <description>
       Oozie DataBase Name
      </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
@@ -113,21 +113,21 @@
     <description>
       Whitelisted job tracker for Oozie service.
       </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.authentication.type</name>
     <value>simple</value>
     <description>
       </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
     <value> </value>
     <description>
       </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.service.WorkflowAppService.system.libpath</name>
@@ -137,7 +137,7 @@
       This path is added to workflow application if their job properties sets
       the property 'oozie.use.system.libpath' to true.
       </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
@@ -148,7 +148,7 @@
       specify where the Pig JAR files are. Instead, the ones from the system
       library path are used.
       </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.authentication.kerberos.name.rules</name>
@@ -160,7 +160,7 @@
 
         </value>
     <description>The mapping from kerberos principal names to local OS user names.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
@@ -173,7 +173,7 @@
           the Oozie configuration directory; though the path can be absolute (i.e. to point
           to Hadoop client conf/ directories in the local filesystem.
       </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.service.ActionService.executor.ext.classes</name>
@@ -184,12 +184,12 @@
             org.apache.oozie.action.hadoop.SqoopActionExecutor,
             org.apache.oozie.action.hadoop.DistcpActionExecutor
         </value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.service.SchemaService.wf.ext.schemas</name>
     <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd,hive-action-0.3.xsd</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.service.JPAService.create.db.schema</name>
@@ -200,7 +200,7 @@
             If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
             If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
         </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.service.JPAService.jdbc.driver</name>
@@ -208,7 +208,7 @@
     <description>
             JDBC driver class.
         </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.service.JPAService.jdbc.url</name>
@@ -216,7 +216,7 @@
     <description>
             JDBC URL.
         </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.service.JPAService.jdbc.username</name>
@@ -224,7 +224,7 @@
     <description>
             DB user name.
         </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.service.JPAService.jdbc.password</name>
@@ -235,7 +235,7 @@
             IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
                        if empty Configuration assumes it is NULL.
         </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>oozie.service.JPAService.pool.max.active.conn</name>
@@ -243,6 +243,6 @@
     <description>
              Max number of connections.
         </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d31dcfc/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/YARN/configuration/capacity-scheduler.xml
index db1f15d..ef8cd9a 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/YARN/configuration/capacity-scheduler.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/YARN/configuration/capacity-scheduler.xml
@@ -22,7 +22,7 @@
     <description>
       Maximum number of applications that can be pending and running.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
@@ -32,7 +32,7 @@
       application masters i.e. controls number of concurrent running
       applications.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.queues</name>
@@ -40,7 +40,7 @@
     <description>
       The queues at the this level (root is the root queue).
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.capacity</name>
@@ -51,13 +51,13 @@
       The child queues capacity should add up to their parent queue's capacity
       or less.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.default.capacity</name>
     <value>100</value>
     <description>Default queue target capacity.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
@@ -65,7 +65,7 @@
     <description>
       Default queue user limit a percentage from 0.0 to 1.0.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
@@ -73,7 +73,7 @@
     <description>
       The maximum capacity of the default queue. 
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.default.state</name>
@@ -81,7 +81,7 @@
     <description>
       The state of the default queue. State can be one of RUNNING or STOPPED.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.default.acl_submit_jobs</name>
@@ -89,7 +89,7 @@
     <description>
       The ACL of who can submit jobs to the default queue.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.default.acl_administer_jobs</name>
@@ -97,7 +97,7 @@
     <description>
       The ACL of who can administer jobs on the default queue.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.acl_administer_queues</name>
@@ -106,6 +106,6 @@
       The ACL for who can administer this queue i.e. change sub-queue 
       allocations.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d31dcfc/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/YARN/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/YARN/configuration/global.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/YARN/configuration/global.xml
index 9739bc4..0aade53 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/YARN/configuration/global.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/YARN/configuration/global.xml
@@ -24,48 +24,48 @@
     <name>rm_host</name>
     <value/>
     <description>ResourceManager.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>nm_hosts</name>
     <value/>
     <description>List of NodeManager Hosts.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn_log_dir_prefix</name>
     <value>/var/log/hadoop-yarn</value>
     <description>YARN Log Dir Prefix</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn_pid_dir_prefix</name>
     <value>/var/run/hadoop-yarn</value>
     <description>YARN PID Dir Prefix</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn_user</name>
     <value>yarn</value>
     <description>YARN User</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn_heapsize</name>
     <value>1024</value>
     <description>Max heapsize for all YARN components using a numerical value in the scale of MB</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>resourcemanager_heapsize</name>
     <value>1024</value>
     <description>Max heapsize for ResourceManager using a numerical value in the scale of MB</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>nodemanager_heapsize</name>
     <value>1024</value>
     <description>Max heapsize for NodeManager using a numerical value in the scale of MB</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>