You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by sr...@apache.org on 2016/06/02 02:00:16 UTC

[69/98] [abbrv] ambari git commit: Revert "AMBARI-16272. Ambari Upgrade shouldn't automatically add stack configs (dlysnichenko)" - failing testcases

http://git-wip-us.apache.org/repos/asf/ambari/blob/0aeaa956/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/TEZ/configuration/tez-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/TEZ/configuration/tez-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/TEZ/configuration/tez-site.xml
index 0f87619..3f8c368 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/TEZ/configuration/tez-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/TEZ/configuration/tez-site.xml
@@ -16,6 +16,7 @@
   See the License for the specific language governing permissions and
   limitations under the License.
 -->
+
 <configuration supports_final="true" supports_do_not_extend="true">
   <property>
     <name>tez.lib.uris</name>
@@ -24,55 +25,48 @@
       Specifying a single .tar.gz or .tgz assumes that a compressed version of the tez libs is being used. This is uncompressed into a tezlibs directory when running containers, and tezlibs/;tezlibs/lib/ are added to the classpath (after . and .*).
       If multiple files are specified - files are localized as regular files, contents of directories are localized as regular files (non-recursive).
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.cluster.additional.classpath.prefix</name>
     <value>/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure</value>
-    <description/>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
+    <description></description>
   </property>
+
   <property>
     <name>tez.am.log.level</name>
     <value>INFO</value>
     <description>Root Logging level passed to the Tez app master</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.generate.debug.artifacts</name>
     <value>false</value>
     <description>Generate debug artifacts such as a text representation of the submitted DAG plan</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.am.resource.memory.mb</name>
     <value>1536</value>
     <description>The amount of memory to be used by the AppMaster.
       Used only if the value is not specified explicitly by the DAG definition.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.am.launch.cmd-opts</name>
     <value>-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC</value>
     <description>Java options for the Tez AppMaster process. The Xmx value is derived based on tez.am.resource.memory.mb and is 80% of the value by default.
       Used only if the value is not specified explicitly by the DAG definition.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.am.launch.cluster-default.cmd-opts</name>
     <value>-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}</value>
     <description>Cluster default Java options for the Tez AppMaster process. These will be prepended to the properties specified via tez.am.launch.cmd-opts</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.am.launch.env</name>
     <value>LD_LIBRARY_PATH=C:\hdp\hadoop\bin;C:\hdp\hadoop\share\hadoop\common\lib</value>
@@ -81,34 +75,30 @@
         you want to have access to native libraries.
       Used only if the value is not specified explicitly by the DAG definition.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.task.resource.memory.mb</name>
     <value>1536</value>
     <description>The amount of memory to be used by launched tasks.
       Used only if the value is not specified explicitly by the DAG definition.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.task.launch.cmd-opts</name>
     <value>-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC</value>
     <description>Java options for tasks. The Xmx value is derived based on tez.task.resource.memory.mb and is 80% of this value by default.
       Used only if the value is not specified explicitly by the DAG definition.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.task.launch.cluster-default.cmd-opts</name>
     <value>-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}</value>
     <description>Cluster default Java options for tasks. These will be prepended to the properties specified via tez.task.launch.cmd-opts</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.task.launch.env</name>
     <value>LD_LIBRARY_PATH=C:\hdp\hadoop\bin;C:\hdp\hadoop\share\hadoop\common\lib</value>
@@ -117,18 +107,16 @@
       you want to have access to native libraries.
       Used only if the value is not specified explicitly by the DAG definition.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.shuffle-vertex-manager.min-src-fraction</name>
     <value>0.2</value>
     <description>In case of a ScatterGather connection, the fraction of source tasks which should
       complete before tasks for the current vertex are schedule
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.shuffle-vertex-manager.max-src-fraction</name>
     <value>0.4</value>
@@ -136,16 +124,14 @@
       completed, all tasks on the current vertex can be scheduled. Number of tasks ready for
       scheduling on the current vertex scales linearly between min-fraction and max-fraction
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.am.am-rm.heartbeat.interval-ms.max</name>
     <value>250</value>
     <description>The maximum heartbeat interval between the AM and RM in milliseconds</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.grouping.split-waves</name>
     <value>1.7</value>
@@ -153,177 +139,159 @@
       a Vertex. 1.7 with 100% queue available implies generating a number of tasks roughly equal
       to 170% of the available containers on the queue
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.grouping.min-size</name>
     <value>16777216</value>
     <description>Lower bound on the size (in bytes) of a grouped split, to avoid generating
       too many splits
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.grouping.max-size</name>
     <value>1073741824</value>
     <description>Upper bound on the size (in bytes) of a grouped split, to avoid generating
       excessively large split
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.am.container.reuse.enabled</name>
     <value>true</value>
     <description>Configuration to specify whether container should be reused</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.am.container.reuse.rack-fallback.enabled</name>
     <value>true</value>
     <description>Whether to reuse containers for rack local tasks. Active only if reuse is enabled
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.am.container.reuse.non-local-fallback.enabled</name>
     <value>false</value>
     <description>Whether to reuse containers for non-local tasks. Active only if reuse is enabled
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.am.container.idle.release-timeout-min.millis</name>
     <value>10000</value>
     <description>The minimum amount of time to hold on to a container that is idle. Only active when reuse is enabled.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.am.container.idle.release-timeout-max.millis</name>
     <value>20000</value>
     <description>The maximum amount of time to hold on to a container if no task can be assigned to it immediately. Only active when reuse is enabled.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.am.container.reuse.locality.delay-allocation-millis</name>
     <value>250</value>
     <description>The amount of time to wait before assigning a container to the next level of
-      locality. NODE -&gt; RACK -&gt; NON_LOCAL
+      locality. NODE -> RACK -> NON_LOCAL
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.am.max.app.attempts</name>
     <value>2</value>
     <description>Specifies the total number of time the app master will run in case recovery is triggered</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.am.maxtaskfailures.per.node</name>
     <value>10</value>
     <description>The maximum number of allowed task attempt failures on a node before
       it gets marked as blacklisted
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.task.am.heartbeat.counter.interval-ms.max</name>
     <value>4000</value>
     <description>Time interval at which task counters are sent to the AM</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.task.get-task.sleep.interval-ms.max</name>
     <value>200</value>
     <description>The maximum amount of time, in seconds, to wait before a task asks an AM for
       another task
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.task.max-events-per-heartbeat</name>
     <value>500</value>
     <description>Maximum number of of events to fetch from the AM by the tasks in a single heartbeat.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.session.client.timeout.secs</name>
     <value>-1</value>
     <description>Time (in seconds) to wait for AM to come up when trying to submit a DAG from
       the client
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.session.am.dag.submit.timeout.secs</name>
     <value>300</value>
     <description>Time (in seconds) for which the Tez AM should wait for a DAG to be submitted
       before shutting down
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.counters.max</name>
     <value>2000</value>
     <description>The number of allowed counters for the executing DAG</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.counters.max.groups</name>
     <value>1000</value>
     <description>The number of allowed counter groups for the executing DAG</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
+
   <!-- Configuration for runtime components -->
+
   <!-- These properties can be set on a per edge basis by configuring the payload for each
        edge independently. -->
+
+
   <property>
     <name>tez.runtime.compress</name>
     <value>true</value>
     <description>Whether intermediate data should be compressed or not</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.runtime.compress.codec</name>
     <value>org.apache.hadoop.io.compress.SnappyCodec</value>
     <description>The coded to be used if compressing intermediate data. Only
       applicable if tez.runtime.compress is enabled
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.runtime.io.sort.mb</name>
     <value>512</value>
     <description>The size of the sort buffer when output needs to be sorted</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.runtime.unordered.output.buffer.size-mb</name>
     <value>100</value>
     <description>The size of the buffer when output does not require to be sorted</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.history.logging.service.class</name>
     <value>org.apache.tez.dag.history.logging.ats.ATSHistoryLoggingService</value>
@@ -331,7 +299,6 @@
       Set to org.apache.tez.dag.history.logging.ats.ATSHistoryLoggingService to log to ATS
       Set to org.apache.tez.dag.history.logging.impl.SimpleHistoryLoggingService to log to the filesystem specified by ${fs.defaultFS}
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0aeaa956/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration-mapred/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration-mapred/mapred-site.xml
index 5acc094..8a1cc24 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration-mapred/mapred-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration-mapred/mapred-site.xml
@@ -1,5 +1,6 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -16,16 +17,18 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
+
 <!-- Put site-specific property overrides in this file. -->
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
+
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+
   <property>
     <name>mapreduce.admin.user.env</name>
     <deleted>true</deleted>
     <value/>
     <description/>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>mapreduce.application.classpath</name>
     <value>
@@ -34,74 +37,64 @@
     <description>
       CLASSPATH for MR applications. A comma-separated list of CLASSPATH entries.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>mapreduce.application.framework.path</name>
     <deleted>true</deleted>
     <value/>
     <description/>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.app.mapreduce.am.admin-command-opts</name>
     <deleted>true</deleted>
     <value/>
     <description/>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>mapreduce.admin.map.child.java.opts</name>
     <deleted>true</deleted>
     <value/>
     <description/>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>mapreduce.admin.reduce.child.java.opts</name>
     <deleted>true</deleted>
     <value/>
     <description/>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>mapreduce.reduce.shuffle.fetch.retry.enabled</name>
     <value>1</value>
     <description/>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>mapreduce.reduce.shuffle.fetch.retry.interval-ms</name>
     <value>1000</value>
     <description/>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>mapreduce.reduce.shuffle.fetch.retry.timeout-ms</name>
     <value>30000</value>
     <description/>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>mapreduce.job.emit-timeline-data</name>
     <value>false</value>
     <description/>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>mapreduce.jobhistory.bind-host</name>
     <value>0.0.0.0</value>
     <description/>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0aeaa956/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/capacity-scheduler.xml
index fd0764e..4c215b7 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/capacity-scheduler.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/capacity-scheduler.xml
@@ -1,4 +1,3 @@
-<?xml version="1.0"?>
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -15,6 +14,7 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
+
 <configuration supports_final="false" supports_adding_forbidden="true">
   <property>
     <name>yarn.scheduler.capacity.resource-calculator</name>
@@ -34,35 +34,26 @@
       </entries>
       <selection-cardinality>1</selection-cardinality>
     </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.accessible-node-labels</name>
     <value>*</value>
-    <description/>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
+    <description></description>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.accessible-node-labels.default.capacity</name>
     <value>-1</value>
-    <description/>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
+    <description></description>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity</name>
     <value>-1</value>
-    <description/>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
+    <description></description>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.default-node-label-expression</name>
     <value> </value>
-    <description/>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
+    <description></description>
   </property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0aeaa956/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/yarn-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/yarn-env.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/yarn-env.xml
index 51cc346..2184c0e 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/yarn-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/yarn-env.xml
@@ -19,6 +19,7 @@
  * limitations under the License.
  */
 -->
+
 <configuration supports_adding_forbidden="true">
   <property>
     <name>yarn_cgroups_enabled</name>
@@ -39,7 +40,5 @@
       </entries>
       <selection-cardinality>1</selection-cardinality>
     </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0aeaa956/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/yarn-site.xml
index c45ab3f..a3cf59a 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.2/services/YARN/configuration/yarn-site.xml
@@ -16,14 +16,15 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
+
 <!-- Put site-specific property overrides in this file. -->
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
+
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+
   <property>
     <name>yarn.application.classpath</name>
     <value>$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*</value>
     <description>Classpath for typical applications.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>hadoop.registry.rm.enabled</name>
@@ -31,8 +32,6 @@
     <description>
       Is the registry enabled: does the RM start it up, create the user and system paths, and purge service records when containers, application attempts and applications complete
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>hadoop.registry.zk.quorum</name>
@@ -40,15 +39,11 @@
     <description>
       List of hostname:port pairs defining the zookeeper quorum binding for the registry
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.recovery.enabled</name>
     <value>true</value>
     <description>Enable the node manager to recover after starting</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.recovery.dir</name>
@@ -57,22 +52,16 @@
       The local filesystem directory in which the node manager will store
       state when recovery is enabled.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.client.nodemanager-connect.retry-interval-ms</name>
     <value>10000</value>
     <description>Time interval between each attempt to connect to NM</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.client.nodemanager-connect.max-wait-ms</name>
     <value>60000</value>
     <description>Max time to wait to establish a connection to NM</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.recovery.enabled</name>
@@ -81,8 +70,6 @@
       Enable RM to recover state after starting.
       If true, then yarn.resourcemanager.store.class must be specified.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.work-preserving-recovery.enabled</name>
@@ -94,8 +81,6 @@
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.store.class</name>
@@ -106,8 +91,6 @@
       the store is implicitly fenced; meaning a single ResourceManager
       is able to use the store at any point in time.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-address</name>
@@ -115,43 +98,31 @@
     <description>
       List Host:Port of the ZooKeeper servers to be used by the RM. comma separated host:port pairs, each corresponding to a zk server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" If the optional chroot suffix is used the example would look like: "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002/app/a" where the client would be rooted at "/app/a" and all paths would be relative to this root - ie getting/setting/etc...  "/foo/bar" would result in operations being run on "/app/a/foo/bar" (from the server perspective).
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-state-store.parent-path</name>
     <value>/rmstore</value>
     <description>Full path of the ZooKeeper znode where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore as the value for yarn.resourcemanager.store.class</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-acl</name>
     <value>world:anyone:rwcda </value>
     <description>ACL's to be used for ZooKeeper znodes.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms</name>
     <value>10000</value>
     <description>Set the amount of time RM waits before allocating new containers on work-preserving-recovery. Such wait period gives RM a chance to settle down resyncing with NMs in the cluster on recovery, before assigning new containers to applications.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.connect.retry-interval.ms</name>
     <value>30000</value>
     <description>How often to try connecting to the ResourceManager.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.connect.max-wait.ms</name>
     <value>900000</value>
     <description>Maximum time to wait to establish connection to ResourceManager</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-retry-interval-ms</name>
@@ -161,83 +132,61 @@
       automatically from yarn.resourcemanager.zk-timeout-ms and
       yarn.resourcemanager.zk-num-retries."
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-num-retries</name>
     <value>1000</value>
     <description>Number of times RM tries to connect to ZooKeeper.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-timeout-ms</name>
     <value>10000</value>
     <description>ZooKeeper session timeout in milliseconds. Session expiration is managed by the ZooKeeper cluster itself, not by the client. This value is used by the cluster to determine when the client's session expires. Expirations happens when the cluster does not hear from the client within the specified session timeout period (i.e. no heartbeat).</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.state-store.max-completed-applications</name>
     <value>${yarn.resourcemanager.max-completed-applications}</value>
     <description>The maximum number of completed applications RM state store keeps, less than or equals to ${yarn.resourcemanager.max-completed-applications}. By default, it equals to ${yarn.resourcemanager.max-completed-applications}. This ensures that the applications kept in the state store are consistent with the applications remembered in RM memory. Any values larger than ${yarn.resourcemanager.max-completed-applications} will be reset to ${yarn.resourcemanager.max-completed-applications}. Note that this value impacts the RM recovery performance.Typically,  a smaller value indicates better performance on RM recovery.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.fs.state-store.retry-policy-spec</name>
     <value>2000, 500</value>
     <description>hdfs client retry policy specification. hdfs client retry is always enabled. Specified in pairs of sleep-time and number-of-retries and (t0, n0), (t1, n1), ..., the first n0 retries sleep t0 milliseconds on average, the following n1 retries sleep t1 milliseconds on average, and so on.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.fs.state-store.uri</name>
     <value> </value>
     <description>RI pointing to the location of the FileSystem path where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore as the value for yarn.resourcemanager.store.class </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.ha.enabled</name>
     <value>false</value>
     <description>enable RM HA or not</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.resources-handler.class</name>
     <value>org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler</value>
     <description>Pre-requisite to use CGroups</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name>
     <value>hadoop-yarn</value>
     <description>Name of the Cgroups hierarchy under which all YARN jobs will be launched</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.mount</name>
     <value>false</value>
     <description>If true, YARN will automount the CGroup, however the directory needs to already exist; else, the cgroup should be mounted by the admin</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage</name>
     <value>false</value>
     <description>Strictly limit CPU resource usage to allocated usage even if spare CPU is available</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.resource.cpu-vcores</name>
     <value>8</value>
-    <description/>
+    <description></description>
     <display-name>Number of virtual cores</display-name>
     <value-attributes>
       <type>int</type>
@@ -250,8 +199,6 @@
         <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
@@ -264,43 +211,31 @@
       <maximum>100</maximum>
       <increment-step>1</increment-step>
     </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.node-labels.manager-class</name>
     <value>org.apache.hadoop.yarn.server.resourcemanager.nodelabels.MemoryRMNodeLabelsManager</value>
     <description>If user want to enable this feature, specify it to "org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.node-labels.fs-store.retry-policy-spec</name>
     <value>2000, 500</value>
-    <description/>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
+    <description></description>
   </property>
   <property>
     <name>yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb</name>
     <value>1000</value>
     <description>This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name>
     <value>90</value>
     <description>This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
     <value>-1</value>
     <description>Defines how often NMs wake up to upload log files. The default value is -1. By default, the logs will be uploaded whenthe application is finished. By setting this configure, logs can be uploaded periodically when the application is running. The minimum rolling-interval-seconds can be set is 3600.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.log-aggregation.debug-enabled</name>
@@ -309,43 +244,31 @@
       This configuration is for debug and test purpose.
       By setting this configuration as true.
       We can break the lower bound of yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.log-aggregation.num-log-files-per-app</name>
     <value>30</value>
     <description>This is temporary solution. The configuration will be deleted once, we find a more scalable method to only write a single log file per LRS.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.system-metrics-publisher.enabled</name>
     <value>true</value>
-    <description/>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
+    <description></description>
   </property>
   <property>
     <name>yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size</name>
     <value>10</value>
-    <description/>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
+    <description></description>
   </property>
   <property>
     <name>yarn.timeline-service.client.max-retries</name>
     <value>30</value>
-    <description/>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
+    <description></description>
   </property>
   <property>
     <name>yarn.timeline-service.client.retry-interval-ms</name>
     <value>1000</value>
-    <description/>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
+    <description></description>
   </property>
   <property>
     <name>yarn.timeline-service.ttl-enable</name>
@@ -353,15 +276,11 @@
     <description>
       Enable age off of timeline store data.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.leveldb-timeline-store.path</name>
     <value>/hadoop/yarn/timeline</value>
     <description>Store file name for leveldb timeline store.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.leveldb-timeline-store.read-cache-size</name>
@@ -369,8 +288,6 @@
     <description>
       Size of read cache for uncompressed blocks for leveldb timeline store in bytes.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size</name>
@@ -378,8 +295,6 @@
     <description>
       Size of cache for recently read entity start times for leveldb timeline store in number of entities.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size</name>
@@ -387,8 +302,6 @@
     <description>
       Size of cache for recently written entity start times for leveldb timeline store in number of entities.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.http-authentication.type</name>
@@ -397,15 +310,11 @@
       Defines authentication used for the Timeline Server HTTP endpoint.
       Supported values are: simple | kerberos | $AUTHENTICATION_HANDLER_CLASSNAME
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.http-authentication.simple.anonymous.allowed</name>
     <value>true</value>
-    <description/>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
+    <description></description>
   </property>
   <property>
     <name>yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled</name>
@@ -416,41 +325,31 @@
       tokens(fallback to kerberos if the tokens are missing).
       Only applicable when the http authentication type is kerberos.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.bind-host</name>
     <value>0.0.0.0</value>
     <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.bind-host</name>
     <value>0.0.0.0</value>
     <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.bind-host</name>
     <value>0.0.0.0</value>
     <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.node-labels.fs-store.root-dir</name>
     <value>/system/yarn/node-labels</value>
-    <description/>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
+    <description></description>
   </property>
   <property>
     <name>yarn.scheduler.minimum-allocation-vcores</name>
     <value>1</value>
-    <description/>
+    <description></description>
     <display-name>Minimum Container Size (VCores)</display-name>
     <value-attributes>
       <type>int</type>
@@ -464,13 +363,11 @@
         <name>yarn.nodemanager.resource.cpu-vcores</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.scheduler.maximum-allocation-vcores</name>
     <value>8</value>
-    <description/>
+    <description></description>
     <display-name>Maximum Container Size (VCores)</display-name>
     <value-attributes>
       <type>int</type>
@@ -484,8 +381,6 @@
         <name>yarn.nodemanager.resource.cpu-vcores</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.node-labels.enabled</name>
@@ -495,6 +390,9 @@
     </description>
     <display-name>Node Labels</display-name>
     <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <value-attributes>
       <type>value-list</type>
       <entries>
         <entry>
@@ -508,8 +406,6 @@
       </entries>
       <selection-cardinality>1</selection-cardinality>
     </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.scheduler.monitor.enable</name>
@@ -529,7 +425,5 @@
       </entries>
       <selection-cardinality>1</selection-cardinality>
     </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0aeaa956/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/FALCON/configuration/falcon-startup.properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/FALCON/configuration/falcon-startup.properties.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/FALCON/configuration/falcon-startup.properties.xml
index b90fa3c..409cd5a 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/FALCON/configuration/falcon-startup.properties.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/FALCON/configuration/falcon-startup.properties.xml
@@ -1,4 +1,3 @@
-<?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 <!--
 /**
@@ -20,11 +19,11 @@
  */
 -->
 <configuration supports_final="false">
+
   <property>
     <name>*.shared.libs</name>
     <value>activemq-core,ant,geronimo-j2ee-management,jms,json-simple,oozie-client,spring-jms,commons-lang3,commons-el</value>
-    <description/>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
+    <description></description>
   </property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0aeaa956/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HBASE/configuration/hbase-site.xml
index 89cb27e..77d8fa4 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HBASE/configuration/hbase-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HBASE/configuration/hbase-site.xml
@@ -28,42 +28,30 @@
     <value-attributes>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>hbase.master.info.port</name>
     <value>60010</value>
     <description>The port for the HBase Master web UI.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>hbase.regionserver.port</name>
     <value>16020</value>
     <description>The port the HBase RegionServer binds to.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>hbase.regionserver.info.port</name>
     <value>16030</value>
     <description>The port for the HBase RegionServer web UI.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>hbase.regionserver.global.memstore.upperLimit</name>
     <value>0.4</value>
     <deleted>true</deleted>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>hbase.regionserver.global.memstore.lowerLimit</name>
     <value>0.38</value>
     <deleted>true</deleted>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0aeaa956/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HDFS/configuration/hdfs-site.xml
index 34b3b2c..ff28e3e 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HDFS/configuration/hdfs-site.xml
@@ -1,5 +1,6 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -16,8 +17,11 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
+
 <!-- Put site-specific property overrides in this file. -->
+
 <configuration supports_final="true">
+
   <property>
     <name>nfs.file.dump.dir</name>
     <value>/tmp/.hdfs-nfs</value>
@@ -28,14 +32,12 @@
       they are accumulated to exceed certain threshold (e.g., 1MB) in memory.
       One needs to make sure the directory has enough space.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>nfs.exports.allowed.hosts</name>
     <value>* rw</value>
     <display-name>Allowed hosts</display-name>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0aeaa956/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HIVE/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HIVE/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HIVE/configuration/webhcat-site.xml
index a6d66bb..2be3547 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HIVE/configuration/webhcat-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/HIVE/configuration/webhcat-site.xml
@@ -16,15 +16,17 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 -->
+
 <!-- The default settings for Templeton. -->
 <!-- Edit templeton-site.xml to change settings for your local -->
 <!-- install. -->
+
 <configuration supports_final="true">
+
   <property>
     <name>templeton.libjars</name>
     <value>file:///c:/hdp/hive/lib/zookeeper.jar,file:///c:/hdp/hive/lib/hive-common.jar</value>
     <description>Jars to add the the classpath.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0aeaa956/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/OOZIE/configuration/oozie-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/OOZIE/configuration/oozie-env.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/OOZIE/configuration/oozie-env.xml
index 1400266..b0195bc 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/OOZIE/configuration/oozie-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/OOZIE/configuration/oozie-env.xml
@@ -19,12 +19,11 @@
  * limitations under the License.
  */
 -->
+
 <configuration supports_adding_forbidden="true">
   <property>
     <name>oozie_user</name>
     <deleted>true</deleted>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>oozie_database</name>
@@ -34,8 +33,6 @@
     <value-attributes>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>oozie_data_dir</name>
@@ -46,8 +43,6 @@
       <editable-only-at-install>true</editable-only-at-install>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>oozie_log_dir</name>
@@ -58,8 +53,6 @@
       <editable-only-at-install>true</editable-only-at-install>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>oozie_pid_dir</name>
@@ -70,9 +63,8 @@
       <editable-only-at-install>true</editable-only-at-install>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <!-- oozie-env.cmd -->
   <property>
     <name>content</name>
@@ -147,7 +139,6 @@ set JAVA_LIBRARY_PATH=%HADOOP_COMMON_HOME%\bin
     <value-attributes>
       <type>content</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0aeaa956/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/OOZIE/configuration/oozie-site.xml
index b0123c1..46025ab 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/OOZIE/configuration/oozie-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/OOZIE/configuration/oozie-site.xml
@@ -16,6 +16,7 @@
   See the License for the specific language governing permissions and
   limitations under the License.
 -->
+
 <configuration supports_final="true">
   <property>
     <name>oozie.service.JPAService.jdbc.url</name>
@@ -37,9 +38,8 @@
         <name>oozie.db.schema.name</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>oozie.service.JPAService.create.db.schema</name>
     <value>true</value>
@@ -49,9 +49,8 @@
       If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
       If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>oozie.service.JPAService.jdbc.driver</name>
     <value>org.apache.derby.jdbc.EmbeddedDriver</value>
@@ -68,9 +67,8 @@
         <name>oozie_database</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
     <value>*=c:\hdp\hadoop\etc\hadoop</value>
@@ -82,9 +80,8 @@
       the Oozie configuration directory; though the path can be absolute (i.e. to point
       to Hadoop client conf/ directories in the local filesystem.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>oozie.service.coord.check.maximum.frequency</name>
     <value>false</value>
@@ -93,8 +90,6 @@
       this check or submit coordinators with frequencies faster than 5 minutes: doing so can cause unintended behavior and
       additional system stress.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>oozie.services</name>
@@ -138,19 +133,15 @@
       All services to be created and managed by Oozie Services singleton.
       Class names must be separated by commas.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>oozie.service.SchemaService.wf.ext.schemas</name>
     <value>shell-action-0.1.xsd,shell-action-0.2.xsd,shell-action-0.3.xsd,email-action-0.1.xsd,email-action-0.2.xsd,hive-action-0.2.xsd,hive-action-0.3.xsd,hive-action-0.4.xsd,hive-action-0.5.xsd,sqoop-action-0.2.xsd,sqoop-action-0.3.xsd,sqoop-action-0.4.xsd,ssh-action-0.1.xsd,ssh-action-0.2.xsd,distcp-action-0.1.xsd,distcp-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd</value>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>oozie.service.AuthorizationService.security.enabled</name>
     <deleted>true</deleted>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0aeaa956/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/STORM/configuration/storm-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/STORM/configuration/storm-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/STORM/configuration/storm-site.xml
index c82eddb..9d39e67 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/STORM/configuration/storm-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/STORM/configuration/storm-site.xml
@@ -17,7 +17,8 @@
  * limitations under the License.
  */
 -->
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
+
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
   <property>
     <name>nimbus.seeds</name>
     <value>localhost</value>
@@ -26,43 +27,31 @@
       <editable-only-at-install>true</editable-only-at-install>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.min.replication.count.default</name>
     <value>1</value>
     <description>Default minimum number of nimbus hosts where the code must be replicated before leader nimbus can mark the topology as active and create assignments. </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>topology.min.replication.count</name>
     <value>{{actual_topology_min_replication_count}}</value>
     <description>Calculated minimum number of nimbus hosts where the code must be replicated before leader nimbus can mark the topology as active and create assignments. </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
-  </property>
-  <property>
-    <name>topology.max.replication.wait.time.sec.default</name>
-    <value>60</value>
-    <description>Default maximum wait time for the nimbus host replication to achieve the nimbus.min.replication.count. Once this time is elapsed nimbus will go ahead and perform topology activation tasks even if required nimbus.min.replication.count is not achieved</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
-  </property>
+  </property>  
   <property>
-    <name>topology.max.replication.wait.time.sec</name>
-    <value>{{actual_topology_max_replication_wait_time_sec}}</value>
-    <description>Calculated maximum wait time for the nimbus host replication to achieve the nimbus.min.replication.count. Once this time is elapsed nimbus will go ahead and perform topology activation tasks even if required nimbus.min.replication.count is not achieved</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
+      <name>topology.max.replication.wait.time.sec.default</name>
+      <value>60</value>
+      <description>Default maximum wait time for the nimbus host replication to achieve the nimbus.min.replication.count. Once this time is elapsed nimbus will go ahead and perform topology activation tasks even if required nimbus.min.replication.count is not achieved</description>
   </property>
   <property>
+      <name>topology.max.replication.wait.time.sec</name>
+      <value>{{actual_topology_max_replication_wait_time_sec}}</value>
+      <description>Calculated maximum wait time for the nimbus host replication to achieve the nimbus.min.replication.count. Once this time is elapsed nimbus will go ahead and perform topology activation tasks even if required nimbus.min.replication.count is not achieved</description>
+  </property> 
+   <property>
     <name>nimbus.host</name>
     <value>localhost</value>
     <description>Deprecated config in favor of nimbus.seeds used during non HA mode.</description>
     <deleted>true</deleted>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0aeaa956/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/TEZ/configuration/tez-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/TEZ/configuration/tez-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/TEZ/configuration/tez-site.xml
index efdad35..6d66a9c 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/TEZ/configuration/tez-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/TEZ/configuration/tez-site.xml
@@ -16,6 +16,7 @@
   See the License for the specific language governing permissions and
   limitations under the License.
 -->
+
 <configuration supports_final="true">
   <property>
     <name>tez.am.view-acls</name>
@@ -26,15 +27,13 @@
     <value-attributes>
       <empty-value-valid>true</empty-value-valid>
     </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.cluster.additional.classpath.prefix</name>
     <value>C:\hdp\hadoop\share\hadoop\common\lib\hadoop-lzo-0.4.19.{{hdp_stack_version}}.jar</value>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.lib.uris</name>
     <value>file:///C:/hdp/tez-0.7.0.{{hdp_stack_version}}/tez-0.7.0.{{hdp_stack_version}}.tar.gz</value>
@@ -42,9 +41,8 @@
       Specifying a single .tar.gz or .tgz assumes that a compressed version of the tez libs is being used. This is uncompressed into a tezlibs directory when running containers, and tezlibs/;tezlibs/lib/ are added to the classpath (after . and .*).
       If multiple files are specified - files are localized as regular files, contents of directories are localized as regular files (non-recursive).
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.runtime.optimize.local.fetch</name>
     <value>true</value>
@@ -52,9 +50,8 @@
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.task.generate.counters.per.io</name>
     <value>true</value>
@@ -62,9 +59,8 @@
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.runtime.sorter.class</name>
     <value>PIPELINED</value>
@@ -83,9 +79,8 @@
       </entries>
       <selection-cardinality>1</selection-cardinality>
     </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.runtime.pipelined.sorter.sort.threads</name>
     <value>2</value>
@@ -99,9 +94,8 @@
         <name>tez.runtime.sorter.class</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.runtime.io.sort.mb</name>
     <value>272</value>
@@ -116,7 +110,5 @@
         <name>tez.runtime.sorter.class</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0aeaa956/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/YARN/configuration/capacity-scheduler.xml
index 0501cb2..95ae583 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/YARN/configuration/capacity-scheduler.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.3/services/YARN/configuration/capacity-scheduler.xml
@@ -1,4 +1,3 @@
-<?xml version="1.0"?>
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -15,23 +14,22 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
+
 <configuration supports_final="false" supports_adding_forbidden="true">
+
   <property>
     <name>yarn.scheduler.capacity.root.accessible-node-labels.default.capacity</name>
     <deleted>true</deleted>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity</name>
     <deleted>true</deleted>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.scheduler.capacity.root.default-node-label-expression</name>
     <deleted>true</deleted>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0aeaa956/ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/core-site.xml
index 61a1277..179f578 100644
--- a/ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/core-site.xml
+++ b/ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/core-site.xml
@@ -1,6 +1,7 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
+
+ <!--
     Licensed to the Apache Software Foundation (ASF) under one or more
     contributor license agreements.  See the NOTICE file distributed with
     this work for additional information regarding copyright ownership.
@@ -16,18 +17,20 @@
     See the License for the specific language governing permissions and
     limitations under the License.
  -->
+ 
 <!-- Put site-specific property overrides in this file. -->
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
+
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+
   <property>
     <name>hadoop.http.authentication.simple.anonymous.allowed</name>
     <value>true</value>
-    <description/>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
+    <description></description>
   </property>
+
   <property>
     <name>hadoop.security.key.provider.path</name>
-    <value/>
+    <value></value>
     <value-attributes>
       <empty-value-valid>true</empty-value-valid>
     </value-attributes>
@@ -49,26 +52,22 @@
         <name>ranger.service.https.attrib.ssl.enabled</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <!-- HDFS properties required for HAWQ -->
   <property>
     <name>ipc.client.connection.maxidletime</name>
     <value>3600000</value>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>ipc.client.connect.timeout</name>
     <value>300000</value>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>ipc.server.listen.queue.size</name>
     <value>3300</value>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0aeaa956/ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/hdfs-site.xml
index 273c9c2..ea84640 100644
--- a/ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/stacks/PHD/3.0/services/HDFS/configuration/hdfs-site.xml
@@ -1,5 +1,6 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -16,39 +17,38 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
+
 <!-- Put site-specific property overrides in this file. -->
+
 <configuration supports_final="true">
+
   <property>
     <name>dfs.namenode.startup.delay.block.deletion.sec</name>
     <value>3600</value>
-    <description/>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
+    <description></description>
   </property>
+
   <property>
     <name>dfs.journalnode.edits.dir</name>
     <value>/hadoop/hdfs/journalnode</value>
     <description>The path where the JournalNode daemon will store its local state. </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+  
   <property>
     <name>dfs.client.retry.policy.enabled</name>
     <value>false</value>
     <description>Enables HDFS client retry in the event of a NameNode failure.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>dfs.content-summary.limit</name>
     <value>5000</value>
     <description>Dfs content summary limit.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>dfs.encryption.key.provider.uri</name>
-    <value/>
+    <value></value>
     <value-attributes>
       <empty-value-valid>true</empty-value-valid>
     </value-attributes>
@@ -70,50 +70,42 @@
         <name>ranger.service.https.attrib.ssl.enabled</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <!-- HDFS properties required for HAWQ -->
   <property>
     <name>dfs.allow.truncate</name>
     <value>true</value>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>dfs.support.append</name>
     <value>true</value>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>dfs.client.read.shortcircuit</name>
     <value>true</value>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>dfs.block.local-path-access.user</name>
     <value>gpadmin</value>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>dfs.datanode.data.dir.perm</name>
     <value>750</value>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>dfs.datanode.handler.count</name>
     <value>60</value>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>dfs.namenode.accesstime.precision</name>
     <value>-1</value>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0aeaa956/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
index bc99117..c052a6c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
@@ -124,7 +124,7 @@ public class UpgradeResourceProviderHDP22Test {
         "placeholder-rendered-properly").anyTimes();
 
     expect(configHelper.getDefaultProperties(EasyMock.anyObject(StackId.class), EasyMock.anyObject(Cluster.class))).andReturn(
-        new HashMap<String, Set<org.apache.ambari.server.state.PropertyInfo>>()).anyTimes();
+        new HashMap<String, Map<String, String>>()).anyTimes();
 
     expect(configHelper.getEffectiveConfigAttributes(EasyMock.anyObject(Cluster.class), EasyMock.anyObject(Map.class))).andReturn(
         new HashMap<String, Map<String, Map<String, String>>>()).anyTimes();