You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by dm...@apache.org on 2016/06/01 15:27:01 UTC

[78/94] ambari git commit: AMBARI-16272. Ambari Upgrade shouldn't automatically add stack configs (dlysnichenko)

http://git-wip-us.apache.org/repos/asf/ambari/blob/98d86419/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/HIVE/configuration/hive-site.xml
index 01b11dd..f4b55dd 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/HIVE/configuration/hive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/HIVE/configuration/hive-site.xml
@@ -16,126 +16,141 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 -->
-
 <configuration supports_final="true">
-
   <property>
     <name>hive.heapsize</name>
     <value>1024</value>
     <description>Hive Java heap size</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>ambari.hive.db.schema.name</name>
     <value>hive</value>
     <description>Database name used as the Hive Metastore</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>javax.jdo.option.ConnectionURL</name>
     <value>jdbc:mysql://localhost/hive?createDatabaseIfNotExist=true</value>
     <description>JDBC connect string for a JDBC metastore</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>javax.jdo.option.ConnectionDriverName</name>
     <value>com.mysql.jdbc.Driver</value>
     <description>Driver class name for a JDBC metastore</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>javax.jdo.option.ConnectionUserName</name>
     <value>hive</value>
     <description>username to use against metastore database</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property require-input="true">
     <name>javax.jdo.option.ConnectionPassword</name>
-    <value></value>
+    <value/>
     <property-type>PASSWORD</property-type>
     <description>password to use against metastore database</description>
     <value-attributes>
       <type>password</type>
     </value-attributes>
-
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.warehouse.dir</name>
     <value>/apps/hive/warehouse</value>
     <description>location of default database for the warehouse</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.sasl.enabled</name>
     <value>false</value>
     <description>If true, the metastore thrift interface will be secured with SASL.
      Clients must authenticate with Kerberos.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.kerberos.keytab.file</name>
     <value>/etc/security/keytabs/hive.service.keytab</value>
     <description>The path to the Kerberos Keytab file containing the metastore
      thrift server's service principal.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.kerberos.principal</name>
     <value>hive/_HOST@EXAMPLE.COM</value>
     <description>The service principal for the metastore thrift server. The special
     string _HOST will be replaced automatically with the correct host name.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.cache.pinobjtypes</name>
     <value>Table,Database,Type,FieldSchema,Order</value>
     <description>List of comma separated metastore object types that should be pinned in the cache</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.uris</name>
     <value>thrift://localhost:9083</value>
     <description>URI for client to contact metastore server</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.client.socket.timeout</name>
     <value>60</value>
     <description>MetaStore Client socket timeout in seconds</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.execute.setugi</name>
     <value>true</value>
     <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.security.authorization.enabled</name>
     <value>false</value>
     <description>enable or disable the hive client authorization</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.security.authorization.manager</name>
     <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
     <description>the hive client authorization manager class name.
     The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.security.metastore.authorization.manager</name>
     <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
     <description>The authorization manager class name to be used in the metastore for authorization. The user-defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.  </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.security.authenticator.manager</name>
     <value>org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator</value>
     <description>Hive client authenticator manager class name. The user-defined authenticator class should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.  </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.server2.enable.doAs</name>
     <value>true</value>
@@ -143,44 +158,51 @@ limitations under the License.
       submitted the query. But if the parameter is set to false, the query will run as the user that the hiveserver2
       process runs as.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>fs.hdfs.impl.disable.cache</name>
     <value>true</value>
     <description>Disable HDFS filesystem cache.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>fs.file.impl.disable.cache</name>
     <value>true</value>
     <description>Disable local filesystem cache.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.enforce.bucketing</name>
     <value>true</value>
     <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.enforce.sorting</name>
     <value>true</value>
     <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.enforce.sortmergebucketmapjoin</name>
     <value>true</value>
     <description>If the user asked for sort-merge bucketed map-side join, and it cannot be performed, should the query fail or not</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.map.aggr</name>
     <value>true</value>
     <description>Whether to use map-side aggregation in Hive Group By queries.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.optimize.bucketmapjoin</name>
     <value>true</value>
@@ -188,43 +210,49 @@ limitations under the License.
       is a multiple of the number of buckets in the other table, the buckets can be joined with each other by setting
       this parameter as true.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.optimize.bucketmapjoin.sortedmerge</name>
     <value>false</value>
     <description> If the tables being joined are sorted and bucketized on the join columns, and they have the same number
     of buckets, a sort-merge join can be performed by setting this parameter as true.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.mapred.reduce.tasks.speculative.execution</name>
     <value>false</value>
     <description>Whether speculative execution for reducers should be turned on.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.auto.convert.join</name>
     <value>true</value>
     <description>Whether Hive enable the optimization about converting common
       join into mapjoin based on the input file size.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.auto.convert.sortmerge.join</name>
     <value>true</value>
     <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass
       the criteria for sort-merge join.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
     <value>true</value>
     <description>Required to Enable the conversion of an SMB (Sort-Merge-Bucket) to a map-join SMB.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.auto.convert.join.noconditionaltask</name>
     <value>true</value>
@@ -232,8 +260,9 @@ limitations under the License.
       size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
       specified size, the join is directly converted to a mapjoin (there is no conditional task).
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.auto.convert.join.noconditionaltask.size</name>
     <value>1000000000</value>
@@ -241,8 +270,9 @@ limitations under the License.
       is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
       converted to a mapjoin(there is no conditional task). The default is 10MB.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.optimize.reducededuplication.min.reducer</name>
     <value>4</value>
@@ -250,8 +280,9 @@ limitations under the License.
       That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
       The optimization will be disabled if number of reducers is less than specified value.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.optimize.mapjoin.mapreduce</name>
     <value>true</value>
@@ -260,8 +291,9 @@ limitations under the License.
       job (for e.g a group by), each map-only job is merged with the following
       map-reduce job.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.mapjoin.bucket.cache.size</name>
     <value>10000</value>
@@ -269,98 +301,113 @@ limitations under the License.
       Size per reducer.The default is 1G, i.e if the input size is 10G, it
       will use 10 reducers.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.vectorized.execution.enabled</name>
     <value>true</value>
     <description>This flag controls the vectorized mode of query execution as documented in HIVE-4160 (as of Hive 0.13.0)
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.optimize.reducededuplication</name>
     <value>true</value>
     <description>Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.optimize.index.filter</name>
     <value>true</value>
     <description>
     Whether to enable automatic use of indexes
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.execution.engine</name>
     <value>mr</value>
     <description>Whether to use MR or Tez</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.exec.post.hooks</name>
     <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
     <description>Comma-separated list of post-execution hooks to be invoked for each statement.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.exec.pre.hooks</name>
     <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
     <description>Comma-separated list of pre-execution hooks to be invoked for each statement.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.exec.failure.hooks</name>
     <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
     <description>Comma-separated list of on-failure hooks to be invoked for each statement.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.vectorized.groupby.maxentries</name>
     <value>100000</value>
     <description>Max number of entries in the vector group by aggregation hashtables.
       Exceeding this will trigger a flush irrelevant of memory pressure condition.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.vectorized.groupby.checkinterval</name>
     <value>1024</value>
     <description>Number of entries added to the group by aggregation hash before a reocmputation of average entry size is performed.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.vectorized.groupby.flush.percent</name>
     <value>0.1</value>
     <description>Percent of entries in the group by aggregation hash flushed when the memory treshold is exceeded.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.stats.autogather</name>
     <value>true</value>
     <description>A flag to gather statistics automatically during the INSERT OVERWRITE command.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.tez.container.size</name>
     <value>682</value>
     <description>By default, Tez uses the java options from map tasks. Use this property to override that value. Assigned value must match value specified for mapreduce.map.child.java.opts.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.tez.input.format</name>
     <value>org.apache.hadoop.hive.ql.io.HiveInputFormat</value>
     <description>The default input format for Tez. Tez groups splits in the Application Master.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.tez.java.opts</name>
     <value>-server -Xmx1024m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC</value>
     <description>Java command line options for Tez. Must be assigned the same value as mapreduce.map.child.java.opts.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.compute.query.using.stats</name>
     <value>true</value>
@@ -369,8 +416,9 @@ limitations under the License.
       stored in metastore. For basic stats collection turn on the config hive.stats.autogather to true.
       For more advanced stats collection need to run analyze table queries.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.orc.splits.include.file.footer</name>
     <value>false</value>
@@ -378,112 +426,130 @@ limitations under the License.
       If turned on splits generated by orc will include metadata about the stripes in the file. This
       data is read remotely (from the client or HS2 machine) and sent to all the tasks.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.limit.optimize.enable</name>
     <value>true</value>
     <description>Whether to enable the optimization of trying a smaller subset of data for simple LIMIT first.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.limit.pushdown.memory.usage</name>
     <value>0.04</value>
     <description>The max memory to be used for hash in RS operator for top K selection.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.server2.tez.default.queues</name>
     <value>default</value>
     <description>A comma-separated list of queues configured for the cluster.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.server2.tez.sessions.per.default.queue</name>
     <value>1</value>
     <description>The number of sessions for each queue named in the hive.server2.tez.default.queues.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.server2.tez.initialize.default.sessions</name>
     <value>false</value>
     <description>Enables a user to use HiveServer2 without enabling Tez for HiveServer2. Users may potentially may want to run queries with Tez without a pool of sessions.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.txn.manager</name>
     <value>org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager</value>
     <description>Select the class to do transaction management. The default DummyTxnManager does no transactions and retains the legacy behavior.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.txn.timeout</name>
     <value>300</value>
     <description>Time after which transactions are declared aborted if the client has not sent a heartbeat, in seconds.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.txn.max.open.batch</name>
     <value>1000</value>
     <description>Maximum number of transactions that can be fetched in one call to open_txns(). Increasing this will decrease the number of delta files created when streaming data into Hive. But it will also increase the number of open transactions at any given time, possibly impacting read performance.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.compactor.initiator.on</name>
     <value>false</value>
     <description>Whether to run the compactor's initiator thread in this metastore instance or not. If there is more than one instance of the thrift metastore this should only be set to true for one of them.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.compactor.worker.threads</name>
     <value>0</value>
     <description>Number of compactor worker threads to run on this metastore instance. Can be different values on different metastore instances.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.compactor.worker.timeout</name>
     <value>86400L</value>
     <description>Time, in seconds, before a given compaction in working state is declared a failure and returned to the initiated state.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.compactor.check.interval</name>
     <value>300L</value>
     <description>Time in seconds between checks to see if any partitions need compacted. This should be kept high because each check for compaction requires many calls against the NameNode.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.compactor.delta.num.threshold</name>
     <value>10</value>
     <description>Number of delta files that must exist in a directory before the compactor will attempt a minor compaction.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.compactor.delta.pct.threshold</name>
     <value>0.1f</value>
     <description>Percentage (by size) of base that deltas can be before major compaction is initiated.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.compactor.abortedtxn.threshold</name>
     <value>1000</value>
     <description>Number of aborted transactions involving a particular table or partition before major compaction is initiated.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>datanucleus.cache.level2.type</name>
     <value>none</value>
     <description>Determines caching mechanism DataNucleus L2 cache will use. It is strongly recommended to use default value of 'none' as other values may cause consistency errors in Hive.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.server2.thrift.port</name>
     <value>10000</value>
     <description>
       TCP port number to listen on, default 10000.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.server2.support.dynamic.service.discovery</name>
     <value>false</value>
@@ -493,14 +559,16 @@ limitations under the License.
       should use the ZooKeeper ensemble: hive.zookeeper.quorum in their
       connection string.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>hive.server2.zookeeper.namespace</name>
     <value>hiveserver2</value>
     <description>The parent node in ZooKeeper used by HiveServer2 when
       supporting dynamic service discovery.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/98d86419/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/OOZIE/configuration/oozie-site.xml
index 2ce2f2d..b771ad6 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/OOZIE/configuration/oozie-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/OOZIE/configuration/oozie-site.xml
@@ -16,9 +16,7 @@
   See the License for the specific language governing permissions and
   limitations under the License.
 -->
-
 <configuration supports_final="true">
-
   <!--
       Refer to the oozie-default.xml file for the complete list of
       Oozie configuration properties and their default values.
@@ -27,24 +25,27 @@
     <name>oozie.base.url</name>
     <value>http://localhost:11000/oozie</value>
     <description>Base Oozie URL.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>oozie.system.id</name>
     <value>oozie-${user.name}</value>
     <description>
       The Oozie system ID.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>oozie.systemmode</name>
     <value>NORMAL</value>
     <description>
       System mode for Oozie at startup.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>oozie.service.AuthorizationService.security.enabled</name>
     <value>true</value>
@@ -52,36 +53,41 @@
       Specifies whether security (user name/admin role) is enabled or not.
       If disabled any user can manage Oozie system and manage any job.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>oozie.service.PurgeService.older.than</name>
     <value>30</value>
     <description>
       Jobs older than this value, in days, will be purged by the PurgeService.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>oozie.service.PurgeService.purge.interval</name>
     <value>3600</value>
     <description>
       Interval at which the purge service will run, in seconds.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>oozie.service.CallableQueueService.queue.size</name>
     <value>1000</value>
     <description>Max callable queue size</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>oozie.service.CallableQueueService.threads</name>
     <value>10</value>
     <description>Number of threads used for executing callables</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>oozie.service.CallableQueueService.callable.concurrency</name>
     <value>3</value>
@@ -92,24 +98,27 @@
       All commands that use action executors (action-start, action-end, action-kill and action-check) use
       the action type as the callable type.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>oozie.service.coord.normal.default.timeout</name>
     <value>120</value>
     <description>Default timeout for a coordinator action input check (in minutes) for normal job.
       -1 means infinite timeout
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>oozie.db.schema.name</name>
     <value>oozie</value>
     <description>
       Oozie DataBase Name
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>oozie.authentication.type</name>
     <value>simple</value>
@@ -117,8 +126,9 @@
       Authentication used for Oozie HTTP endpoint, the supported values are: simple | kerberos |
       #AUTHENTICATION_HANDLER_CLASSNAME#.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>oozie.service.WorkflowAppService.system.libpath</name>
     <value>/user/${user.name}/share/lib</value>
@@ -127,8 +137,9 @@
       This path is added to workflow application if their job properties sets
       the property 'oozie.use.system.libpath' to true.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
     <value>false</value>
@@ -138,6 +149,8 @@
       specify where the Pig JAR files are. Instead, the ones from the system
       library path are used.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>oozie.authentication.kerberos.name.rules</name>
@@ -149,6 +162,8 @@
 
     </value>
     <description>The mapping from kerberos principal names to local OS user names.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
@@ -161,6 +176,8 @@
       the Oozie configuration directory; though the path can be absolute (i.e. to point
       to Hadoop client conf/ directories in the local filesystem.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>oozie.service.ActionService.executor.ext.classes</name>
@@ -176,8 +193,9 @@
       be used in workflows. This property is a convenience property to add extensions to the built in executors without
       having to include all the built in ones.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>oozie.service.SchemaService.wf.ext.schemas</name>
     <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd,hive-action-0.3.xsd</value>
@@ -185,6 +203,8 @@
       Schemas for additional actions types. IMPORTANT: if there are no schemas leave a 1 space string, the service
       trims the value, if empty Configuration assumes it is NULL.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>oozie.service.JPAService.create.db.schema</name>
@@ -195,32 +215,36 @@
       If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
       If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>oozie.service.JPAService.jdbc.driver</name>
     <value>org.apache.derby.jdbc.EmbeddedDriver</value>
     <description>
       JDBC driver class.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>oozie.service.JPAService.jdbc.url</name>
     <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
     <description>
       JDBC URL.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>oozie.service.JPAService.jdbc.username</name>
     <value>oozie</value>
     <description>
       Database user name to use to connect to the database
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>oozie.service.JPAService.jdbc.password</name>
     <value> </value>
@@ -230,16 +254,18 @@
       IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
       if empty Configuration assumes it is NULL.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>oozie.service.JPAService.pool.max.active.conn</name>
     <value>10</value>
     <description>
       Max number of connections.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
   <property>
     <name>oozie.services</name>
     <value>
@@ -278,6 +304,8 @@
       org.apache.oozie.service.JobsConcurrencyService
     </value>
     <description>List of Oozie services</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>oozie.service.URIHandlerService.uri.handlers</name>
@@ -285,6 +313,8 @@
     <description>
       Enlist the different uri handlers supported for data availability checks.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>oozie.services.ext</name>
@@ -294,6 +324,8 @@
       To add/replace services defined in 'oozie.services' with custom implementations.
       Class names must be separated by commas.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>oozie.service.coord.push.check.requeue.interval</name>
@@ -301,6 +333,8 @@
     <description>
       Command re-queue interval for push dependencies (in millisecond).
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>oozie.credentials.credentialclasses</name>
@@ -308,6 +342,7 @@
     <description>
       Credential Class to be used for HCat.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/98d86419/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/PIG/configuration/pig-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/PIG/configuration/pig-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/PIG/configuration/pig-properties.xml
index 8c0c400..c86bd2e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/PIG/configuration/pig-properties.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/PIG/configuration/pig-properties.xml
@@ -19,9 +19,7 @@
  * limitations under the License.
  */
 -->
-
 <configuration supports_final="false" supports_adding_forbidden="true">
-
   <property>
     <name>content</name>
     <display-name>pig-properties template</display-name>
@@ -88,8 +86,9 @@ hcat.bin=/usr/bin/hcat
 
     </value>
     <value-attributes>
-        <type>content</type>
+      <type>content</type>
     </value-attributes>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
-
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/98d86419/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/configuration/storm-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/configuration/storm-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/configuration/storm-env.xml
index bad9a07..38fdfd5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/configuration/storm-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/configuration/storm-env.xml
@@ -19,22 +19,27 @@
  * limitations under the License.
  */
 -->
-
 <configuration supports_final="false" supports_adding_forbidden="true">
   <property>
     <name>storm_user</name>
     <display-name>Storm User</display-name>
     <value>storm</value>
-    <description></description>
+    <description/>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>storm_log_dir</name>
     <value>/var/log/storm</value>
-    <description></description>
+    <description/>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>storm_pid_dir</name>
     <value>/var/run/storm</value>
-    <description></description>
+    <description/>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/98d86419/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/configuration/storm-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/configuration/storm-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/configuration/storm-site.xml
index 73ea7ca..85689dc 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/configuration/storm-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/STORM/configuration/storm-site.xml
@@ -19,7 +19,6 @@
  * limitations under the License.
  */
 -->
-
 <configuration supports_final="true">
   <property>
     <name>java.library.path</name>
@@ -28,6 +27,8 @@
        for the java.library.path value. java.library.path tells the JVM where
        to look for native libraries. It is necessary to set this config correctly since
        Storm uses the ZeroMQ and JZMQ native libs. </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>storm.local.dir</name>
@@ -35,51 +36,71 @@
     <description>A directory on the local filesystem used by Storm for any local
        filesystem usage it needs. The directory must exist and the Storm daemons must
        have permission to read/write from this location.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>storm.zookeeper.servers</name>
     <value>['localhost']</value>
     <description>A list of hosts of ZooKeeper servers used to manage the cluster.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>storm.zookeeper.port</name>
     <value>2181</value>
     <description>The port Storm will use to connect to each of the ZooKeeper servers.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>storm.zookeeper.root</name>
     <value>/storm</value>
     <description>The root location at which Storm stores data in ZooKeeper.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>storm.zookeeper.session.timeout</name>
     <value>20000</value>
     <description>The session timeout for clients to ZooKeeper.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>storm.zookeeper.connection.timeout</name>
     <value>15000</value>
     <description>The connection timeout for clients to ZooKeeper.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>storm.zookeeper.retry.times</name>
     <value>5</value>
     <description>The number of times to retry a Zookeeper operation.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>storm.zookeeper.retry.interval</name>
     <value>1000</value>
     <description>The interval between retries of a Zookeeper operation.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>storm.zookeeper.retry.intervalceiling.millis</name>
     <value>30000</value>
     <description>The ceiling of the interval between retries of a Zookeeper operation.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>storm.cluster.mode</name>
     <value>distributed</value>
     <description>The mode this Storm cluster is running in. Either "distributed" or "local".</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>storm.local.mode.zmq</name>
@@ -89,47 +110,65 @@
        of this flag is to make it easy to run Storm in local mode by eliminating
        the need for native dependencies, which can be difficult to install.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>storm.thrift.transport</name>
     <value>backtype.storm.security.auth.SimpleTransportPlugin</value>
     <description>The transport plug-in for Thrift client/server communication.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>storm.messaging.transport</name>
     <value>backtype.storm.messaging.netty.Context</value>
     <description>The transporter for communication among Storm tasks.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>nimbus.host</name>
     <value>localhost</value>
     <description>The host that the master server is running on.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>nimbus.thrift.port</name>
     <value>6627</value>
     <description> Which port the Thrift interface of Nimbus should run on. Clients should
        connect to this port to upload jars and submit topologies.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>nimbus.thrift.max_buffer_size</name>
     <value>1048576</value>
     <description>The maximum buffer size thrift should use when reading messages.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>nimbus.childopts</name>
     <value>-Xmx1024m -Djava.security.auth.login.config=/etc/storm/conf/storm_jaas.conf -javaagent:/usr/lib/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8649,wireformat31x=true,mode=multicast,config=/usr/lib/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM</value>
     <description>This parameter is used by the storm-deploy project to configure the jvm options for the nimbus daemon.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>nimbus.task.timeout.secs</name>
     <value>30</value>
     <description>How long without heartbeating a task can go before nimbus will consider the task dead and reassign it to another location.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>nimbus.supervisor.timeout.secs</name>
     <value>60</value>
     <description>How long before a supervisor can go without heartbeating before nimbus considers it dead and stops assigning new work to it.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>nimbus.monitor.freq.secs</name>
@@ -139,11 +178,15 @@
        that if a machine ever goes down Nimbus will immediately wake up and take action.
        This parameter is for checking for failures when there's no explicit event like that occuring.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>nimbus.cleanup.inbox.freq.secs</name>
     <value>600</value>
     <description>How often nimbus should wake the cleanup thread to clean the inbox.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>nimbus.inbox.jar.expiration.secs</name>
@@ -155,24 +198,32 @@
        Note that the time it takes to delete an inbox jar file is going to be somewhat more than
        NIMBUS_CLEANUP_INBOX_JAR_EXPIRATION_SECS (depending on how often NIMBUS_CLEANUP_FREQ_SECS is set to).
       </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>nimbus.task.launch.secs</name>
     <value>120</value>
     <description>A special timeout used when a task is initially launched. During launch, this is the timeout
        used until the first heartbeat, overriding nimbus.task.timeout.secs.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>nimbus.reassign</name>
     <value>true</value>
     <description>Whether or not nimbus should reassign tasks if it detects that a task goes down.
        Defaults to true, and it's not recommended to change this value.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>nimbus.file.copy.expiration.secs</name>
     <value>600</value>
     <description>During upload/download with the master, how long an upload or download connection is idle
        before nimbus considers it dead and drops the connection.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>nimbus.topology.validator</name>
@@ -180,51 +231,71 @@
     <description>A custom class that implements ITopologyValidator that is run whenever a
        topology is submitted. Can be used to provide business-specific logic for
        whether topologies are allowed to run or not.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>ui.port</name>
     <value>8744</value>
     <description>Storm UI binds to this port.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>ui.childopts</name>
     <value>-Xmx768m -Djava.security.auth.login.config=/etc/storm/conf/storm_jaas.conf</value>
     <description>Childopts for Storm UI Java process.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>logviewer.port</name>
     <value>8000</value>
     <description>HTTP UI port for log viewer.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>logviewer.childopts</name>
     <value>-Xmx128m</value>
     <description>Childopts for log viewer java process.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>logviewer.appender.name</name>
     <value>A1</value>
     <description>Appender name used by log viewer to determine log directory.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>drpc.port</name>
     <value>3772</value>
     <description>This port is used by Storm DRPC for receiving DPRC requests from clients.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>drpc.worker.threads</name>
     <value>64</value>
     <description>DRPC thrift server worker threads.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>drpc.queue.size</name>
     <value>128</value>
     <description>DRPC thrift server queue size.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>drpc.invocations.port</name>
     <value>3773</value>
     <description>This port on Storm DRPC is used by DRPC topologies to receive function invocations and send results back.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>drpc.request.timeout.secs</name>
@@ -232,28 +303,38 @@
     <description>The timeout on DRPC requests within the DRPC server. Defaults to 10 minutes. Note that requests can also
        timeout based on the socket timeout on the DRPC client, and separately based on the topology message
        timeout for the topology implementing the DRPC function.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>drpc.childopts</name>
     <value>-Xmx768m</value>
     <description>Childopts for Storm DRPC Java process.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>transactional.zookeeper.root</name>
     <value>/transactional</value>
     <description>The root directory in ZooKeeper for metadata about TransactionalSpouts.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>transactional.zookeeper.servers</name>
     <value>null</value>
     <description>The list of zookeeper servers in which to keep the transactional state. If null (which is default),
        will use storm.zookeeper.servers</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>transactional.zookeeper.port</name>
     <value>null</value>
     <description>The port to use to connect to the transactional zookeeper servers. If null (which is default),
        will use storm.zookeeper.port</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>supervisor.slots.ports</name>
@@ -261,11 +342,15 @@
     <description>A list of ports that can run workers on this supervisor. Each worker uses one port, and
        the supervisor will only run one worker per port. Use this configuration to tune
        how many workers run on each machine.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>supervisor.childopts</name>
     <value>-Xmx256m -Djava.security.auth.login.config=/etc/storm/conf/storm_jaas.conf -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port=56431 -javaagent:/usr/lib/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/lib/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Supervisor_JVM</value>
     <description>This parameter is used by the storm-deploy project to configure the jvm options for the supervisor daemon.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>supervisor.worker.start.timeout.secs</name>
@@ -274,36 +359,50 @@
        the supervisor tries to restart the worker process. This value override
        supervisor.worker.timeout.secs during launch because there is additional
        overhead to starting and configuring the JVM on launch.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>supervisor.worker.timeout.secs</name>
     <value>30</value>
     <description>How long a worker can go without heartbeating before the supervisor tries to restart the worker process.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>supervisor.monitor.frequency.secs</name>
     <value>3</value>
     <description>How often the supervisor checks the worker heartbeats to see if any of them need to be restarted.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>supervisor.heartbeat.frequency.secs</name>
     <value>5</value>
     <description>How often the supervisor sends a heartbeat to the master.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>worker.childopts</name>
     <value>-Xmx768m -javaagent:/usr/lib/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/lib/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Worker_%ID%_JVM</value>
     <description>The jvm opts provided to workers launched by this supervisor. All \"%ID%\" substrings are replaced with an identifier for this worker.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>worker.heartbeat.frequency.secs</name>
     <value>1</value>
     <description>How often this worker should heartbeat to the supervisor.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>task.heartbeat.frequency.secs</name>
     <value>3</value>
     <description>How often a task should heartbeat its status to the master.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>task.refresh.poll.secs</name>
@@ -313,11 +412,15 @@
        In general though, when a reassignment happens other tasks will be notified
        almost immediately. This configuration is here just in case that notification doesn't
        come through.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>zmq.threads</name>
     <value>1</value>
     <description>The number of threads that should be used by the zeromq context in each worker process.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>zmq.linger.millis</name>
@@ -325,58 +428,80 @@
     <description>How long a connection should retry sending messages to a target host when
        the connection is closed. This is an advanced configuration and can almost
        certainly be ignored.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>zmq.hwm</name>
     <value>0</value>
     <description>The high water for the ZeroMQ push sockets used for networking. Use this config to prevent buffer explosion
        on the networking layer.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>storm.messaging.netty.server_worker_threads</name>
     <value>1</value>
     <description>Netty based messaging: The # of worker threads for the server.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>storm.messaging.netty.client_worker_threads</name>
     <value>1</value>
     <description>Netty based messaging: The # of worker threads for the client.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>storm.messaging.netty.buffer_size</name>
     <value>5242880</value>
     <description>Netty based messaging: The buffer size for send/recv buffer.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>storm.messaging.netty.max_retries</name>
     <value>30</value>
     <description>Netty based messaging: The max # of retries that a peer will perform when a remote is not accessible.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>storm.messaging.netty.max_wait_ms</name>
     <value>1000</value>
     <description>Netty based messaging: The max # of milliseconds that a peer will wait.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>storm.messaging.netty.min_wait_ms</name>
     <value>100</value>
     <description>Netty based messaging: The min # of milliseconds that a peer will wait.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.enable.message.timeouts</name>
     <value>true</value>
     <description>True if Storm should timeout messages or not. Defaults to true. This is meant to be used
        in unit tests to prevent tuples from being accidentally timed out during the test.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.debug</name>
     <value>false</value>
     <description>When set to true, Storm will log every message that's emitted.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.optimize</name>
     <value>true</value>
     <description>Whether or not the master should optimize topologies by running multiple tasks in a single thread where appropriate.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.workers</name>
@@ -385,6 +510,8 @@
        topology. Each process will execute some number of tasks as threads within
        them. This parameter should be used in conjunction with the parallelism hints
        on each component in the topology to tune the performance of a topology.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.acker.executors</name>
@@ -394,6 +521,8 @@
       If this is set to 0, then Storm will immediately ack tuples as soon
        as they come off the spout, effectively disabling reliability.
     </description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.message.timeout.secs</name>
@@ -402,6 +531,8 @@
        emitted by a spout. If the message is not acked within this time frame, Storm
        will fail the message on the spout. Some spouts implementations will then replay
        the message at a later time.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.skip.missing.kryo.registrations</name>
@@ -414,12 +545,16 @@
        a single application may not have the code for the other serializers used by other apps.
        By setting this config to true, Storm will ignore that it doesn't have those other serializations
        rather than throw an error.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.max.task.parallelism</name>
     <value>null</value>
     <description>The maximum parallelism allowed for a component in this topology. This configuration is
        typically used in testing to limit the number of threads spawned in local mode.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.max.spout.pending</name>
@@ -430,99 +565,135 @@
        A pending tuple is one that has been emitted from a spout but has not been acked or failed yet.
        Note that this config parameter has no effect for unreliable spouts that don't tag
        their tuples with a message id.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.state.synchronization.timeout.secs</name>
     <value>60</value>
     <description>The maximum amount of time a component gives a source of state to synchronize before it requests
        synchronization again.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.stats.sample.rate</name>
     <value>0.05</value>
     <description>The percentage of tuples to sample to produce stats for a task.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.builtin.metrics.bucket.size.secs</name>
     <value>60</value>
     <description>The time period that builtin metrics data in bucketed into.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.fall.back.on.java.serialization</name>
     <value>true</value>
     <description>Whether or not to use Java serialization in a topology.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.worker.childopts</name>
     <value>null</value>
     <description>Topology-specific options for the worker child process. This is used in addition to WORKER_CHILDOPTS.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.executor.receive.buffer.size</name>
     <value>1024</value>
     <description>The size of the Disruptor receive queue for each executor. Must be a power of 2.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.executor.send.buffer.size</name>
     <value>1024</value>
     <description>The size of the Disruptor send queue for each executor. Must be a power of 2.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.receiver.buffer.size</name>
     <value>8</value>
     <description>The maximum number of messages to batch from the thread receiving off the network to the
        executor queues. Must be a power of 2.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.transfer.buffer.size</name>
     <value>1024</value>
     <description>The size of the Disruptor transfer queue for each worker.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.tick.tuple.freq.secs</name>
     <value>null</value>
     <description>How often a tick tuple from the "__system" component and "__tick" stream should be sent
        to tasks. Meant to be used as a component-specific configuration.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.worker.shared.thread.pool.size</name>
     <value>4</value>
     <description>The size of the shared thread pool for worker tasks to make use of. The thread pool can be accessed
        via the TopologyContext.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.disruptor.wait.strategy</name>
     <value>com.lmax.disruptor.BlockingWaitStrategy</value>
     <description>Configure the wait strategy used for internal queuing. Can be used to tradeoff latency
        vs. throughput.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.executor.send.buffer.size</name>
     <value>1024</value>
     <description>The size of the Disruptor send queue for each executor. Must be a power of 2.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.receiver.buffer.size</name>
     <value>8</value>
     <description>The maximum number of messages to batch from the thread receiving off the network to the
        executor queues. Must be a power of 2.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.transfer.buffer.size</name>
     <value>1024</value>
     <description>The size of the Disruptor transfer queue for each worker.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.tick.tuple.freq.secs</name>
     <value>null</value>
     <description>How often a tick tuple from the "__system" component and "__tick" stream should be sent
        to tasks. Meant to be used as a component-specific configuration.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.worker.shared.thread.pool.size</name>
     <value>4</value>
     <description>The size of the shared thread pool for worker tasks to make use of. The thread pool can be accessed
        via the TopologyContext.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.spout.wait.strategy</name>
@@ -532,11 +703,15 @@
 
        1. nextTuple emits no tuples
        2. The spout has hit maxSpoutPending and can't emit any more tuples</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.sleep.spout.wait.strategy.time.ms</name>
     <value>1</value>
     <description>The amount of milliseconds the SleepEmptyEmitStrategy should sleep for.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.error.throttle.interval.secs</name>
@@ -544,6 +719,8 @@
     <description>The interval in seconds to use for determining whether to throttle error reported to Zookeeper. For example,
        an interval of 10 seconds with topology.max.error.report.per.interval set to 5 will only allow 5 errors to be
        reported to Zookeeper per task for every 10 second interval of time.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.max.error.report.per.interval</name>
@@ -551,6 +728,8 @@
     <description>The interval in seconds to use for determining whether to throttle error reported to Zookeeper. For example,
        an interval of 10 seconds with topology.max.error.report.per.interval set to 5 will only allow 5 errors to be
        reported to Zookeeper per task for every 10 second interval of time.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.kryo.factory</name>
@@ -558,17 +737,23 @@
     <description>Class that specifies how to create a Kryo instance for serialization. Storm will then apply
        topology.kryo.register and topology.kryo.decorators on top of this. The default implementation
        implements topology.fall.back.on.java.serialization and turns references off.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.tuple.serializer</name>
     <value>backtype.storm.serialization.types.ListDelegateSerializer</value>
     <description>The serializer class for ListDelegate (tuple payload).
        The default serializer will be ListDelegateSerializer</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.trident.batch.emit.interval.millis</name>
     <value>500</value>
     <description>How often a batch can be emitted in a Trident topology.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>dev.zookeeper.path</name>
@@ -576,5 +761,7 @@
     <description>The path to use as the zookeeper dir when running a zookeeper server via
        "storm dev-zookeeper". This zookeeper instance is only intended for development;
        it is not a production grade zookeeper setup.</description>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/98d86419/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/configuration/tez-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/configuration/tez-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/configuration/tez-env.xml
index a6b06ee..0cfad93 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/configuration/tez-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/configuration/tez-env.xml
@@ -19,12 +19,13 @@
  * limitations under the License.
  */
 -->
-
 <configuration supports_final="false" supports_adding_forbidden="true">
   <property>
     <name>tez_user</name>
     <display-name>Tez User</display-name>
     <value>tez</value>
-    <description></description>
+    <description/>
+    <on-ambari-upgrade add="true" change="false" delete="false"/>
+    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
 </configuration>