You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by dm...@apache.org on 2016/06/09 14:19:49 UTC

[29/94] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation (dlysnichenko)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/configuration/storm-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/configuration/storm-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/configuration/storm-site.xml
index cfa9c76..495a46f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/configuration/storm-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/configuration/storm-site.xml
@@ -27,8 +27,7 @@
        for the java.library.path value. java.library.path tells the JVM where
        to look for native libraries. It is necessary to set this config correctly since
        Storm uses the ZeroMQ and JZMQ native libs. </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>storm.local.dir</name>
@@ -36,71 +35,61 @@
     <description>A directory on the local filesystem used by Storm for any local
        filesystem usage it needs. The directory must exist and the Storm daemons must
        have permission to read/write from this location.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>storm.zookeeper.servers</name>
     <value>['localhost']</value>
     <description>A list of hosts of ZooKeeper servers used to manage the cluster.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>storm.zookeeper.port</name>
     <value>2181</value>
     <description>The port Storm will use to connect to each of the ZooKeeper servers.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>storm.zookeeper.root</name>
     <value>/storm</value>
     <description>The root location at which Storm stores data in ZooKeeper.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>storm.zookeeper.session.timeout</name>
     <value>20000</value>
     <description>The session timeout for clients to ZooKeeper.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>storm.zookeeper.connection.timeout</name>
     <value>15000</value>
     <description>The connection timeout for clients to ZooKeeper.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>storm.zookeeper.retry.times</name>
     <value>5</value>
     <description>The number of times to retry a Zookeeper operation.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>storm.zookeeper.retry.interval</name>
     <value>1000</value>
     <description>The interval between retries of a Zookeeper operation.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>storm.zookeeper.retry.intervalceiling.millis</name>
     <value>30000</value>
     <description>The ceiling of the interval between retries of a Zookeeper operation.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>storm.cluster.mode</name>
     <value>distributed</value>
     <description>The mode this Storm cluster is running in. Either "distributed" or "local".</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>storm.local.mode.zmq</name>
@@ -110,65 +99,56 @@
        of this flag is to make it easy to run Storm in local mode by eliminating
        the need for native dependencies, which can be difficult to install.
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>storm.thrift.transport</name>
     <value>backtype.storm.security.auth.SimpleTransportPlugin</value>
     <description>The transport plug-in for Thrift client/server communication.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>storm.messaging.transport</name>
     <value>backtype.storm.messaging.netty.Context</value>
     <description>The transporter for communication among Storm tasks.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>nimbus.host</name>
     <value>localhost</value>
     <description>The host that the master server is running on.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>nimbus.thrift.port</name>
     <value>6627</value>
     <description> Which port the Thrift interface of Nimbus should run on. Clients should
        connect to this port to upload jars and submit topologies.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>nimbus.thrift.max_buffer_size</name>
     <value>1048576</value>
     <description>The maximum buffer size thrift should use when reading messages.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>nimbus.childopts</name>
     <value>-Xmx1024m -Djava.security.auth.login.config=/etc/storm/conf/storm_jaas.conf -javaagent:/usr/lib/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8649,wireformat31x=true,mode=multicast,config=/usr/lib/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM</value>
     <description>This parameter is used by the storm-deploy project to configure the jvm options for the nimbus daemon.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>nimbus.task.timeout.secs</name>
     <value>30</value>
     <description>How long without heartbeating a task can go before nimbus will consider the task dead and reassign it to another location.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>nimbus.supervisor.timeout.secs</name>
     <value>60</value>
     <description>How long before a supervisor can go without heartbeating before nimbus considers it dead and stops assigning new work to it.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>nimbus.monitor.freq.secs</name>
@@ -178,15 +158,13 @@
        that if a machine ever goes down Nimbus will immediately wake up and take action.
        This parameter is for checking for failures when there's no explicit event like that occuring.
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>nimbus.cleanup.inbox.freq.secs</name>
     <value>600</value>
     <description>How often nimbus should wake the cleanup thread to clean the inbox.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>nimbus.inbox.jar.expiration.secs</name>
@@ -198,32 +176,28 @@
        Note that the time it takes to delete an inbox jar file is going to be somewhat more than
        NIMBUS_CLEANUP_INBOX_JAR_EXPIRATION_SECS (depending on how often NIMBUS_CLEANUP_FREQ_SECS is set to).
       </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>nimbus.task.launch.secs</name>
     <value>120</value>
     <description>A special timeout used when a task is initially launched. During launch, this is the timeout
        used until the first heartbeat, overriding nimbus.task.timeout.secs.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>nimbus.reassign</name>
     <value>true</value>
     <description>Whether or not nimbus should reassign tasks if it detects that a task goes down.
        Defaults to true, and it's not recommended to change this value.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>nimbus.file.copy.expiration.secs</name>
     <value>600</value>
     <description>During upload/download with the master, how long an upload or download connection is idle
        before nimbus considers it dead and drops the connection.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>nimbus.topology.validator</name>
@@ -231,71 +205,61 @@
     <description>A custom class that implements ITopologyValidator that is run whenever a
        topology is submitted. Can be used to provide business-specific logic for
        whether topologies are allowed to run or not.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>ui.port</name>
     <value>8744</value>
     <description>Storm UI binds to this port.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>ui.childopts</name>
     <value>-Xmx768m -Djava.security.auth.login.config=/etc/storm/conf/storm_jaas.conf</value>
     <description>Childopts for Storm UI Java process.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>logviewer.port</name>
     <value>8000</value>
     <description>HTTP UI port for log viewer.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>logviewer.childopts</name>
     <value>-Xmx128m</value>
     <description>Childopts for log viewer java process.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>logviewer.appender.name</name>
     <value>A1</value>
     <description>Appender name used by log viewer to determine log directory.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>drpc.port</name>
     <value>3772</value>
     <description>This port is used by Storm DRPC for receiving DPRC requests from clients.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>drpc.worker.threads</name>
     <value>64</value>
     <description>DRPC thrift server worker threads.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>drpc.queue.size</name>
     <value>128</value>
     <description>DRPC thrift server queue size.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>drpc.invocations.port</name>
     <value>3773</value>
     <description>This port on Storm DRPC is used by DRPC topologies to receive function invocations and send results back.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>drpc.request.timeout.secs</name>
@@ -303,38 +267,33 @@
     <description>The timeout on DRPC requests within the DRPC server. Defaults to 10 minutes. Note that requests can also
        timeout based on the socket timeout on the DRPC client, and separately based on the topology message
        timeout for the topology implementing the DRPC function.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>drpc.childopts</name>
     <value>-Xmx768m</value>
     <description>Childopts for Storm DRPC Java process.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>transactional.zookeeper.root</name>
     <value>/transactional</value>
     <description>The root directory in ZooKeeper for metadata about TransactionalSpouts.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>transactional.zookeeper.servers</name>
     <value>null</value>
     <description>The list of zookeeper servers in which to keep the transactional state. If null (which is default),
        will use storm.zookeeper.servers</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>transactional.zookeeper.port</name>
     <value>null</value>
     <description>The port to use to connect to the transactional zookeeper servers. If null (which is default),
        will use storm.zookeeper.port</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>supervisor.slots.ports</name>
@@ -342,15 +301,13 @@
     <description>A list of ports that can run workers on this supervisor. Each worker uses one port, and
        the supervisor will only run one worker per port. Use this configuration to tune
        how many workers run on each machine.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>supervisor.childopts</name>
     <value>-Xmx256m -Djava.security.auth.login.config=/etc/storm/conf/storm_jaas.conf -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port=56431 -javaagent:/usr/lib/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/lib/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Supervisor_JVM</value>
     <description>This parameter is used by the storm-deploy project to configure the jvm options for the supervisor daemon.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>supervisor.worker.start.timeout.secs</name>
@@ -359,50 +316,43 @@
        the supervisor tries to restart the worker process. This value override
        supervisor.worker.timeout.secs during launch because there is additional
        overhead to starting and configuring the JVM on launch.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>supervisor.worker.timeout.secs</name>
     <value>30</value>
     <description>How long a worker can go without heartbeating before the supervisor tries to restart the worker process.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>supervisor.monitor.frequency.secs</name>
     <value>3</value>
     <description>How often the supervisor checks the worker heartbeats to see if any of them need to be restarted.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>supervisor.heartbeat.frequency.secs</name>
     <value>5</value>
     <description>How often the supervisor sends a heartbeat to the master.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>worker.childopts</name>
     <value>-Xmx768m -javaagent:/usr/lib/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/lib/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Worker_%ID%_JVM</value>
     <description>The jvm opts provided to workers launched by this supervisor. All \"%ID%\" substrings are replaced with an identifier for this worker.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>worker.heartbeat.frequency.secs</name>
     <value>1</value>
     <description>How often this worker should heartbeat to the supervisor.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>task.heartbeat.frequency.secs</name>
     <value>3</value>
     <description>How often a task should heartbeat its status to the master.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>task.refresh.poll.secs</name>
@@ -412,15 +362,13 @@
        In general though, when a reassignment happens other tasks will be notified
        almost immediately. This configuration is here just in case that notification doesn't
        come through.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>zmq.threads</name>
     <value>1</value>
     <description>The number of threads that should be used by the zeromq context in each worker process.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>zmq.linger.millis</name>
@@ -428,80 +376,69 @@
     <description>How long a connection should retry sending messages to a target host when
        the connection is closed. This is an advanced configuration and can almost
        certainly be ignored.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>zmq.hwm</name>
     <value>0</value>
     <description>The high water for the ZeroMQ push sockets used for networking. Use this config to prevent buffer explosion
        on the networking layer.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>storm.messaging.netty.server_worker_threads</name>
     <value>1</value>
     <description>Netty based messaging: The # of worker threads for the server.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>storm.messaging.netty.client_worker_threads</name>
     <value>1</value>
     <description>Netty based messaging: The # of worker threads for the client.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>storm.messaging.netty.buffer_size</name>
     <value>5242880</value>
     <description>Netty based messaging: The buffer size for send/recv buffer.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>storm.messaging.netty.max_retries</name>
     <value>30</value>
     <description>Netty based messaging: The max # of retries that a peer will perform when a remote is not accessible.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>storm.messaging.netty.max_wait_ms</name>
     <value>1000</value>
     <description>Netty based messaging: The max # of milliseconds that a peer will wait.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>storm.messaging.netty.min_wait_ms</name>
     <value>100</value>
     <description>Netty based messaging: The min # of milliseconds that a peer will wait.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.enable.message.timeouts</name>
     <value>true</value>
     <description>True if Storm should timeout messages or not. Defaults to true. This is meant to be used
        in unit tests to prevent tuples from being accidentally timed out during the test.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.debug</name>
     <value>false</value>
     <description>When set to true, Storm will log every message that's emitted.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.optimize</name>
     <value>true</value>
     <description>Whether or not the master should optimize topologies by running multiple tasks in a single thread where appropriate.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.workers</name>
@@ -510,8 +447,7 @@
        topology. Each process will execute some number of tasks as threads within
        them. This parameter should be used in conjunction with the parallelism hints
        on each component in the topology to tune the performance of a topology.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.acker.executors</name>
@@ -521,8 +457,7 @@
       If this is set to 0, then Storm will immediately ack tuples as soon
        as they come off the spout, effectively disabling reliability.
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.message.timeout.secs</name>
@@ -531,8 +466,7 @@
        emitted by a spout. If the message is not acked within this time frame, Storm
        will fail the message on the spout. Some spouts implementations will then replay
        the message at a later time.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.skip.missing.kryo.registrations</name>
@@ -545,16 +479,14 @@
        a single application may not have the code for the other serializers used by other apps.
        By setting this config to true, Storm will ignore that it doesn't have those other serializations
        rather than throw an error.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.max.task.parallelism</name>
     <value>null</value>
     <description>The maximum parallelism allowed for a component in this topology. This configuration is
        typically used in testing to limit the number of threads spawned in local mode.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.max.spout.pending</name>
@@ -565,135 +497,117 @@
        A pending tuple is one that has been emitted from a spout but has not been acked or failed yet.
        Note that this config parameter has no effect for unreliable spouts that don't tag
        their tuples with a message id.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.state.synchronization.timeout.secs</name>
     <value>60</value>
     <description>The maximum amount of time a component gives a source of state to synchronize before it requests
        synchronization again.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.stats.sample.rate</name>
     <value>0.05</value>
     <description>The percentage of tuples to sample to produce stats for a task.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.builtin.metrics.bucket.size.secs</name>
     <value>60</value>
     <description>The time period that builtin metrics data in bucketed into.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.fall.back.on.java.serialization</name>
     <value>true</value>
     <description>Whether or not to use Java serialization in a topology.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.worker.childopts</name>
     <value>null</value>
     <description>Topology-specific options for the worker child process. This is used in addition to WORKER_CHILDOPTS.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.executor.receive.buffer.size</name>
     <value>1024</value>
     <description>The size of the Disruptor receive queue for each executor. Must be a power of 2.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.executor.send.buffer.size</name>
     <value>1024</value>
     <description>The size of the Disruptor send queue for each executor. Must be a power of 2.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.receiver.buffer.size</name>
     <value>8</value>
     <description>The maximum number of messages to batch from the thread receiving off the network to the
        executor queues. Must be a power of 2.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.transfer.buffer.size</name>
     <value>1024</value>
     <description>The size of the Disruptor transfer queue for each worker.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.tick.tuple.freq.secs</name>
     <value>null</value>
     <description>How often a tick tuple from the "__system" component and "__tick" stream should be sent
        to tasks. Meant to be used as a component-specific configuration.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.worker.shared.thread.pool.size</name>
     <value>4</value>
     <description>The size of the shared thread pool for worker tasks to make use of. The thread pool can be accessed
        via the TopologyContext.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.disruptor.wait.strategy</name>
     <value>com.lmax.disruptor.BlockingWaitStrategy</value>
     <description>Configure the wait strategy used for internal queuing. Can be used to tradeoff latency
        vs. throughput.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.executor.send.buffer.size</name>
     <value>1024</value>
     <description>The size of the Disruptor send queue for each executor. Must be a power of 2.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.receiver.buffer.size</name>
     <value>8</value>
     <description>The maximum number of messages to batch from the thread receiving off the network to the
        executor queues. Must be a power of 2.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.transfer.buffer.size</name>
     <value>1024</value>
     <description>The size of the Disruptor transfer queue for each worker.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.tick.tuple.freq.secs</name>
     <value>null</value>
     <description>How often a tick tuple from the "__system" component and "__tick" stream should be sent
        to tasks. Meant to be used as a component-specific configuration.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.worker.shared.thread.pool.size</name>
     <value>4</value>
     <description>The size of the shared thread pool for worker tasks to make use of. The thread pool can be accessed
        via the TopologyContext.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.spout.wait.strategy</name>
@@ -703,15 +617,13 @@
 
        1. nextTuple emits no tuples
        2. The spout has hit maxSpoutPending and can't emit any more tuples</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.sleep.spout.wait.strategy.time.ms</name>
     <value>1</value>
     <description>The amount of milliseconds the SleepEmptyEmitStrategy should sleep for.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.error.throttle.interval.secs</name>
@@ -719,8 +631,7 @@
     <description>The interval in seconds to use for determining whether to throttle error reported to Zookeeper. For example,
        an interval of 10 seconds with topology.max.error.report.per.interval set to 5 will only allow 5 errors to be
        reported to Zookeeper per task for every 10 second interval of time.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.max.error.report.per.interval</name>
@@ -728,8 +639,7 @@
     <description>The interval in seconds to use for determining whether to throttle error reported to Zookeeper. For example,
        an interval of 10 seconds with topology.max.error.report.per.interval set to 5 will only allow 5 errors to be
        reported to Zookeeper per task for every 10 second interval of time.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.kryo.factory</name>
@@ -737,23 +647,20 @@
     <description>Class that specifies how to create a Kryo instance for serialization. Storm will then apply
        topology.kryo.register and topology.kryo.decorators on top of this. The default implementation
        implements topology.fall.back.on.java.serialization and turns references off.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.tuple.serializer</name>
     <value>backtype.storm.serialization.types.ListDelegateSerializer</value>
     <description>The serializer class for ListDelegate (tuple payload).
        The default serializer will be ListDelegateSerializer</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>topology.trident.batch.emit.interval.millis</name>
     <value>500</value>
     <description>How often a batch can be emitted in a Trident topology.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>dev.zookeeper.path</name>
@@ -761,7 +668,6 @@
     <description>The path to use as the zookeeper dir when running a zookeeper server via
        "storm dev-zookeeper". This zookeeper instance is only intended for development;
        it is not a production grade zookeeper setup.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/configuration/tez-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/configuration/tez-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/configuration/tez-env.xml
index 279db29..dbe1a2e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/configuration/tez-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/configuration/tez-env.xml
@@ -25,7 +25,6 @@
     <display-name>Tez User</display-name>
     <value>tez</value>
     <description/>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/configuration/tez-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/configuration/tez-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/configuration/tez-site.xml
index 6b54b11..2f88480 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/configuration/tez-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/configuration/tez-site.xml
@@ -21,37 +21,32 @@
     <name>tez.lib.uris</name>
     <value>glusterfs:///apps/tez/,glusterfs:///apps/tez/lib/</value>
     <description>The location of the Tez libraries which will be localized for DAGs</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>tez.am.log.level</name>
     <value>INFO</value>
     <description>Root Logging level passed to the Tez app master</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>tez.staging-dir</name>
     <value>/tmp/${user.name}/staging</value>
     <description>The staging dir used while submitting DAGs</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>tez.am.resource.memory.mb</name>
     <value>1536</value>
     <description>The amount of memory to be used by the AppMaster</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <!-- tez picks the java opts from yarn.app.mapreduce.am.command-opts for MR tasks. Likewise for the AM memory MB -->
   <property>
     <name>tez.am.java.opts</name>
     <value>-server -Xmx1024m -Djava.net.preferIPv4Stack=true -XX:+UseNUMA -XX:+UseParallelGC</value>
     <description>Java options for the Tez AppMaster process. The -Xmx parameter value is generally 0.8 times tez.am.resource.memory.mb config.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>tez.am.shuffle-vertex-manager.min-src-fraction</name>
@@ -59,8 +54,7 @@
     <description>In case of a ScatterGather connection, the fraction of source tasks which should
       complete before tasks for the current vertex are schedule
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>tez.am.shuffle-vertex-manager.max-src-fraction</name>
@@ -69,15 +63,13 @@
       completed, all tasks on the current vertex can be scheduled. Number of tasks ready for
       scheduling on the current vertex scales linearly between min-fraction and max-fraction
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>tez.am.am-rm.heartbeat.interval-ms.max</name>
     <value>250</value>
     <description>The maximum heartbeat interval between the AM and RM in milliseconds</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>tez.am.grouping.split-waves</name>
@@ -86,8 +78,7 @@
       a Vertex. 1.4 with 100% queue available implies generating a number of tasks roughly equal
       to 140% of the available containers on the queue
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>tez.am.grouping.min-size</name>
@@ -95,8 +86,7 @@
     <description>Lower bound on the size (in bytes) of a grouped split, to avoid generating
       too many splits
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>tez.am.grouping.max-size</name>
@@ -104,31 +94,27 @@
     <description>Upper bound on the size (in bytes) of a grouped split, to avoid generating
       excessively large split
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>tez.am.container.reuse.enabled</name>
     <value>true</value>
     <description>Configuration to specify whether container should be reused</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>tez.am.container.reuse.rack-fallback.enabled</name>
     <value>true</value>
     <description>Whether to reuse containers for rack local tasks. Active only if reuse is enabled
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>tez.am.container.reuse.non-local-fallback.enabled</name>
     <value>true</value>
     <description>Whether to reuse containers for non-local tasks. Active only if reuse is enabled
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>tez.am.container.session.delay-allocation-millis</name>
@@ -138,8 +124,7 @@
       it immediately. Only active when reuse is enabled. Set to -1 to never release a container
       in a session
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>tez.am.container.reuse.locality.delay-allocation-millis</name>
@@ -147,8 +132,7 @@
     <description>The amount of time to wait before assigning a container to the next level of
       locality. NODE -&gt; RACK -&gt; NON_LOCAL
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>tez.task.get-task.sleep.interval-ms.max</name>
@@ -156,8 +140,7 @@
     <description>The maximum amount of time, in seconds, to wait before a task asks an AM for
       another task
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>tez.am.env</name>
@@ -166,8 +149,7 @@
         Additional execution environment entries for tez. This is not an additive property. You must preserve the original value if
         you want to have access to native libraries.
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <!-- Client Submission timeout value when submitting DAGs to a session -->
   <property>
@@ -176,8 +158,7 @@
     <description>Time (in seconds) to wait for AM to come up when trying to submit a DAG from
       the client
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>tez.session.am.dag.submit.timeout.secs</name>
@@ -185,8 +166,7 @@
     <description>Time (in seconds) for which the Tez AM should wait for a DAG to be submitted
       before shutting down
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <!-- Configuration for runtime components -->
   <!-- These properties can be set on a per edge basis by configuring the payload for each
@@ -195,8 +175,7 @@
     <name>tez.runtime.intermediate-output.should-compress</name>
     <value>false</value>
     <description>Whether intermediate output should be compressed or not</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>tez.runtime.intermediate-output.compress.codec</name>
@@ -204,30 +183,26 @@
     <description>The coded to be used if compressing intermediate output. Only
       applicable if tez.runtime.intermediate-output.should-compress is enabled.
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>tez.runtime.intermediate-input.is-compressed</name>
     <value>false</value>
     <description>Whether intermediate input is compressed</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>tez.runtime.intermediate-input.compress.codec</name>
     <value>org.apache.hadoop.io.compress.SnappyCodec</value>
     <description>The coded to be used when reading intermediate compressed input.
     Only applicable if tez.runtime.intermediate-input.is-compressed is enabled.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <!-- Configuration for ATS integration -->
   <property>
     <name>tez.yarn.ats.enabled</name>
     <value>true</value>
     <description>Whether to send history events to YARN Application Timeline Server</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml
index 0544cf3..e8a4a55 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml
@@ -25,8 +25,7 @@
     <description>
       Directory where history files are written by MapReduce jobs.
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>mapreduce.jobhistory.done-dir</name>
@@ -34,8 +33,7 @@
     <description>
       Directory where history files are managed by the MR JobHistory Server.
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>yarn.app.mapreduce.am.staging-dir</name>
@@ -43,55 +41,46 @@
     <description>
        The staging dir used while submitting jobs.
      </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>mapred.healthChecker.script.path</name>
     <value>glusterfs:///mapred/jobstatus</value>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>mapred.job.tracker.history.completed.location</name>
     <value>glusterfs:///mapred/history/done</value>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>mapred.system.dir</name>
     <value>glusterfs:///mapred/system</value>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>mapreduce.jobtracker.staging.root.dir</name>
     <value>glusterfs:///user</value>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>mapred.healthChecker.script.path</name>
     <value>glusterfs:///mapred/jobstatus</value>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>mapred.job.tracker.history.completed.location</name>
     <value>glusterfs:///mapred/history/done</value>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>mapred.system.dir</name>
     <value>glusterfs:///mapred/system</value>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>mapreduce.jobtracker.staging.root.dir</name>
     <value>glusterfs:///user</value>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/ssl-client.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/ssl-client.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/ssl-client.xml
index f198565..30db7ed 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/ssl-client.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/ssl-client.xml
@@ -21,15 +21,13 @@
     <name>ssl.client.truststore.location</name>
     <value>/etc/security/clientKeys/all.jks</value>
     <description>Location of the trust store file.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>ssl.client.truststore.type</name>
     <value>jks</value>
     <description>Optional. Default value is "jks".</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>ssl.client.truststore.password</name>
@@ -39,29 +37,25 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>ssl.client.truststore.reload.interval</name>
     <value>10000</value>
     <description>Truststore reload interval, in milliseconds.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>ssl.client.keystore.type</name>
     <value>jks</value>
     <description>Optional. Default value is "jks".</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>ssl.client.keystore.location</name>
     <value>/etc/security/clientKeys/keystore.jks</value>
     <description>Location of the keystore file.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>ssl.client.keystore.password</name>
@@ -71,7 +65,6 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/ssl-server.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/ssl-server.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/ssl-server.xml
index 176efaa..361d8d1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/ssl-server.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/ssl-server.xml
@@ -21,15 +21,13 @@
     <name>ssl.server.truststore.location</name>
     <value>/etc/security/serverKeys/all.jks</value>
     <description>Location of the trust store file.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>ssl.server.truststore.type</name>
     <value>jks</value>
     <description>Optional. Default value is "jks".</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>ssl.server.truststore.password</name>
@@ -39,29 +37,25 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>ssl.server.truststore.reload.interval</name>
     <value>10000</value>
     <description>Truststore reload interval, in milliseconds.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>ssl.server.keystore.type</name>
     <value>jks</value>
     <description>Optional. Default value is "jks".</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>ssl.server.keystore.location</name>
     <value>/etc/security/serverKeys/keystore.jks</value>
     <description>Location of the keystore file.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>ssl.server.keystore.password</name>
@@ -71,8 +65,7 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>ssl.server.keystore.keypassword</name>
@@ -82,7 +75,6 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/capacity-scheduler.xml
index 6f597a2..9fdbed3 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/capacity-scheduler.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/capacity-scheduler.xml
@@ -22,8 +22,7 @@
     <description>
       Maximum number of applications that can be pending and running.
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
@@ -33,8 +32,7 @@
       application masters i.e. controls number of concurrent running
       applications.
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.queues</name>
@@ -42,8 +40,7 @@
     <description>
       The queues at the this level (root is the root queue).
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.capacity</name>
@@ -54,15 +51,13 @@
       The child queues capacity should add up to their parent queue's capacity
       or less.
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.default.capacity</name>
     <value>100</value>
     <description>Default queue target capacity.</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
@@ -70,8 +65,7 @@
     <description>
       Default queue user limit a percentage from 0.0 to 1.0.
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
@@ -79,8 +73,7 @@
     <description>
       The maximum capacity of the default queue. 
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.default.state</name>
@@ -88,8 +81,7 @@
     <description>
       The state of the default queue. State can be one of RUNNING or STOPPED.
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
@@ -97,8 +89,7 @@
     <description>
       The ACL of who can submit jobs to the default queue.
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.default.acl_administer_jobs</name>
@@ -106,8 +97,7 @@
     <description>
       The ACL of who can administer jobs on the default queue.
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.acl_administer_queue</name>
@@ -116,8 +106,7 @@
       The ACL for who can administer this queue i.e. change sub-queue 
       allocations.
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.node-locality-delay</name>
@@ -128,8 +117,7 @@
       Typically this should be set to number of nodes in the cluster, By default is setting
       approximately number of nodes in one rack which is 40.
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.default.minimum-user-limit-percent</name>
@@ -137,7 +125,6 @@
     <description>
       Default minimum queue resource limit depends on the number of users who have submitted applications.
     </description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a998371a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-env.xml
index f95cf6b..aaae5df 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-env.xml
@@ -24,58 +24,50 @@
     <name>yarn_log_dir_prefix</name>
     <value>/var/log/hadoop-yarn</value>
     <description>YARN Log Dir Prefix</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>yarn_pid_dir_prefix</name>
     <value>/var/run/hadoop-yarn</value>
     <description>YARN PID Dir Prefix</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>yarn_user</name>
     <display-name>Yarn User</display-name>
     <value>yarn</value>
     <description>YARN User</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>yarn_heapsize</name>
     <value>1024</value>
     <description>Max heapsize for all YARN components using a numerical value in the scale of MB</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>resourcemanager_heapsize</name>
     <value>1024</value>
     <description>Max heapsize for ResourceManager using a numerical value in the scale of MB</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>nodemanager_heapsize</name>
     <value>1024</value>
     <description>Max heapsize for NodeManager using a numerical value in the scale of MB</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>apptimelineserver_heapsize</name>
     <value>1024</value>
     <description>Max heapsize for AppTimelineServer using a numerical value in the scale of MB</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <property>
     <name>namenode_heapsize</name>
     <value>1024</value>
     <description>Max heapsize for NameNode using a numerical value in the scale of MB</description>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
   <!-- yarn-env.sh -->
   <property>
@@ -197,7 +189,6 @@ YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
     <value-attributes>
       <type>content</type>
     </value-attributes>
-    <on-ambari-upgrade add="false" change="true" delete="true"/>
-    <on-stack-upgrade add="true" change="true" delete="false"/>
+    <on-ambari-upgrade add="true" update="false" delete="false"/>
   </property>
 </configuration>