You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by sr...@apache.org on 2016/06/02 02:00:28 UTC

[81/98] [abbrv] ambari git commit: Revert "AMBARI-16272. Ambari Upgrade shouldn't automatically add stack configs (dlysnichenko)" - failing testcases

http://git-wip-us.apache.org/repos/asf/ambari/blob/0aeaa956/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/configuration/tez-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/configuration/tez-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/configuration/tez-site.xml
index dc9804f..0835ab8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/configuration/tez-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/TEZ/configuration/tez-site.xml
@@ -16,52 +16,48 @@
   See the License for the specific language governing permissions and
   limitations under the License.
 -->
+
 <configuration supports_final="true">
+
   <property>
     <name>tez.lib.uris</name>
     <value>glusterfs:///apps/tez/,glusterfs:///apps/tez/lib/</value>
     <description>The location of the Tez libraries which will be localized for DAGs</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.am.log.level</name>
     <value>INFO</value>
     <description>Root Logging level passed to the Tez app master</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.staging-dir</name>
     <value>/tmp/${user.name}/staging</value>
     <description>The staging dir used while submitting DAGs</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.am.resource.memory.mb</name>
     <value>1536</value>
     <description>The amount of memory to be used by the AppMaster</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <!-- tez picks the java opts from yarn.app.mapreduce.am.command-opts for MR tasks. Likewise for the AM memory MB -->
   <property>
     <name>tez.am.java.opts</name>
     <value>-server -Xmx1024m -Djava.net.preferIPv4Stack=true -XX:+UseNUMA -XX:+UseParallelGC</value>
     <description>Java options for the Tez AppMaster process. The -Xmx parameter value is generally 0.8 times tez.am.resource.memory.mb config.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.am.shuffle-vertex-manager.min-src-fraction</name>
     <value>0.2</value>
     <description>In case of a ScatterGather connection, the fraction of source tasks which should
       complete before tasks for the current vertex are schedule
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.am.shuffle-vertex-manager.max-src-fraction</name>
     <value>0.4</value>
@@ -69,16 +65,14 @@
       completed, all tasks on the current vertex can be scheduled. Number of tasks ready for
       scheduling on the current vertex scales linearly between min-fraction and max-fraction
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.am.am-rm.heartbeat.interval-ms.max</name>
     <value>250</value>
     <description>The maximum heartbeat interval between the AM and RM in milliseconds</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.am.grouping.split-waves</name>
     <value>1.4</value>
@@ -86,50 +80,44 @@
       a Vertex. 1.4 with 100% queue available implies generating a number of tasks roughly equal
       to 140% of the available containers on the queue
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.am.grouping.min-size</name>
     <value>16777216</value>
     <description>Lower bound on the size (in bytes) of a grouped split, to avoid generating
       too many splits
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.am.grouping.max-size</name>
     <value>1073741824</value>
     <description>Upper bound on the size (in bytes) of a grouped split, to avoid generating
       excessively large split
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.am.container.reuse.enabled</name>
     <value>true</value>
     <description>Configuration to specify whether container should be reused</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.am.container.reuse.rack-fallback.enabled</name>
     <value>true</value>
     <description>Whether to reuse containers for rack local tasks. Active only if reuse is enabled
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.am.container.reuse.non-local-fallback.enabled</name>
     <value>true</value>
     <description>Whether to reuse containers for non-local tasks. Active only if reuse is enabled
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.am.container.session.delay-allocation-millis</name>
     <value>10000</value>
@@ -138,27 +126,24 @@
       it immediately. Only active when reuse is enabled. Set to -1 to never release a container
       in a session
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.am.container.reuse.locality.delay-allocation-millis</name>
     <value>250</value>
     <description>The amount of time to wait before assigning a container to the next level of
-      locality. NODE -&gt; RACK -&gt; NON_LOCAL
+      locality. NODE -> RACK -> NON_LOCAL
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.task.get-task.sleep.interval-ms.max</name>
     <value>200</value>
     <description>The maximum amount of time, in seconds, to wait before a task asks an AM for
       another task
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.am.env</name>
     <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &amp;&gt; /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`</value>
@@ -166,9 +151,8 @@
         Additional execution environment entries for tez. This is not an additive property. You must preserve the original value if
         you want to have access to native libraries.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <!-- Client Submission timeout value when submitting DAGs to a session -->
   <property>
     <name>tez.session.client.timeout.secs</name>
@@ -176,58 +160,56 @@
     <description>Time (in seconds) to wait for AM to come up when trying to submit a DAG from
       the client
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.session.am.dag.submit.timeout.secs</name>
     <value>300</value>
     <description>Time (in seconds) for which the Tez AM should wait for a DAG to be submitted
       before shutting down
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
+
   <!-- Configuration for runtime components -->
+
   <!-- These properties can be set on a per edge basis by configuring the payload for each
        edge independently. -->
+
   <property>
     <name>tez.runtime.intermediate-output.should-compress</name>
     <value>false</value>
     <description>Whether intermediate output should be compressed or not</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.runtime.intermediate-output.compress.codec</name>
     <value>org.apache.hadoop.io.compress.SnappyCodec</value>
     <description>The coded to be used if compressing intermediate output. Only
       applicable if tez.runtime.intermediate-output.should-compress is enabled.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.runtime.intermediate-input.is-compressed</name>
     <value>false</value>
     <description>Whether intermediate input is compressed</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>tez.runtime.intermediate-input.compress.codec</name>
     <value>org.apache.hadoop.io.compress.SnappyCodec</value>
     <description>The coded to be used when reading intermediate compressed input.
     Only applicable if tez.runtime.intermediate-input.is-compressed is enabled.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <!-- Configuration for ATS integration -->
+
   <property>
     <name>tez.yarn.ats.enabled</name>
     <value>true</value>
     <description>Whether to send history events to YARN Application Timeline Server</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
 </configuration>
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/0aeaa956/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml
index 8ba5894..671f328 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml
@@ -1,5 +1,6 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -16,82 +17,72 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
+
 <!-- Put site-specific property overrides in this file. -->
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
-  <!-- GLUSTERFS properties -->
+
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+
+
+<!-- GLUSTERFS properties -->
   <property>
     <name>mapreduce.jobhistory.intermediate-done-dir</name>
     <value>glusterfs:///mr-history/tmp</value>
     <description>
       Directory where history files are written by MapReduce jobs.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>mapreduce.jobhistory.done-dir</name>
     <value>glusterfs:///mr-history/done</value>
     <description>
       Directory where history files are managed by the MR JobHistory Server.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
-    <name>yarn.app.mapreduce.am.staging-dir</name>
-    <value>glusterfs:///user</value>
-    <description>
+     <name>yarn.app.mapreduce.am.staging-dir</name>
+     <value>glusterfs:///user</value>
+     <description>
        The staging dir used while submitting jobs.
      </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
-  </property>
-  <property>
-    <name>mapred.healthChecker.script.path</name>
-    <value>glusterfs:///mapred/jobstatus</value>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
-  </property>
-  <property>
-    <name>mapred.job.tracker.history.completed.location</name>
-    <value>glusterfs:///mapred/history/done</value>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
-  </property>
-  <property>
-    <name>mapred.system.dir</name>
-    <value>glusterfs:///mapred/system</value>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
-    <name>mapreduce.jobtracker.staging.root.dir</name>
-    <value>glusterfs:///user</value>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
-  </property>
-  <property>
-    <name>mapred.healthChecker.script.path</name>
-    <value>glusterfs:///mapred/jobstatus</value>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
-  </property>
+     <name>mapred.healthChecker.script.path</name>
+     <value>glusterfs:///mapred/jobstatus</value>
+   </property>
   <property>
-    <name>mapred.job.tracker.history.completed.location</name>
-    <value>glusterfs:///mapred/history/done</value>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
+     <name>mapred.job.tracker.history.completed.location</name>
+     <value>glusterfs:///mapred/history/done</value>
   </property>
+
   <property>
     <name>mapred.system.dir</name>
     <value>glusterfs:///mapred/system</value>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>mapreduce.jobtracker.staging.root.dir</name>
     <value>glusterfs:///user</value>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
+<property>
+<name>mapred.healthChecker.script.path</name>
+<value>glusterfs:///mapred/jobstatus</value>
+</property>
+
+<property>
+<name>mapred.job.tracker.history.completed.location</name>
+<value>glusterfs:///mapred/history/done</value>
+</property>
+
+<property>
+<name>mapred.system.dir</name>
+<value>glusterfs:///mapred/system</value>
+</property>
+
+<property>
+<name>mapreduce.jobtracker.staging.root.dir</name>
+<value>glusterfs:///user</value>
+</property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0aeaa956/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/ssl-client.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/ssl-client.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/ssl-client.xml
index 0899851..809d5c5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/ssl-client.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/ssl-client.xml
@@ -1,5 +1,6 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -17,61 +18,47 @@
    limitations under the License.
 -->
 <configuration>
-  <property>
-    <name>ssl.client.truststore.location</name>
-    <value>/etc/security/clientKeys/all.jks</value>
-    <description>Location of the trust store file.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
-  </property>
-  <property>
-    <name>ssl.client.truststore.type</name>
-    <value>jks</value>
-    <description>Optional. Default value is "jks".</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
-  </property>
-  <property>
-    <name>ssl.client.truststore.password</name>
-    <value>bigdata</value>
-    <property-type>PASSWORD</property-type>
-    <description>Password to open the trust store file.</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
-  </property>
-  <property>
-    <name>ssl.client.truststore.reload.interval</name>
-    <value>10000</value>
-    <description>Truststore reload interval, in milliseconds.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
-  </property>
-  <property>
-    <name>ssl.client.keystore.type</name>
-    <value>jks</value>
-    <description>Optional. Default value is "jks".</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
-  </property>
-  <property>
-    <name>ssl.client.keystore.location</name>
-    <value>/etc/security/clientKeys/keystore.jks</value>
-    <description>Location of the keystore file.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
-  </property>
-  <property>
-    <name>ssl.client.keystore.password</name>
-    <value>bigdata</value>
-    <property-type>PASSWORD</property-type>
-    <description>Password to open the keystore file.</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
-  </property>
+    <property>
+        <name>ssl.client.truststore.location</name>
+        <value>/etc/security/clientKeys/all.jks</value>
+        <description>Location of the trust store file.</description>
+    </property>
+    <property>
+        <name>ssl.client.truststore.type</name>
+        <value>jks</value>
+        <description>Optional. Default value is "jks".</description>
+    </property>
+    <property>
+        <name>ssl.client.truststore.password</name>
+        <value>bigdata</value>
+        <property-type>PASSWORD</property-type>
+        <description>Password to open the trust store file.</description>
+        <value-attributes>
+            <type>password</type>
+        </value-attributes>
+    </property>
+    <property>
+        <name>ssl.client.truststore.reload.interval</name>
+        <value>10000</value>
+        <description>Truststore reload interval, in milliseconds.</description>
+    </property>
+    <property>
+        <name>ssl.client.keystore.type</name>
+        <value>jks</value>
+        <description>Optional. Default value is "jks".</description>
+    </property>
+    <property>
+        <name>ssl.client.keystore.location</name>
+        <value>/etc/security/clientKeys/keystore.jks</value>
+        <description>Location of the keystore file.</description>
+    </property>
+    <property>
+        <name>ssl.client.keystore.password</name>
+        <value>bigdata</value>
+        <property-type>PASSWORD</property-type>
+        <description>Password to open the keystore file.</description>
+        <value-attributes>
+            <type>password</type>
+        </value-attributes>
+    </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0aeaa956/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/ssl-server.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/ssl-server.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/ssl-server.xml
index 4f2b8ea..32199c0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/ssl-server.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration-mapred/ssl-server.xml
@@ -1,5 +1,6 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -17,72 +18,56 @@
    limitations under the License.
 -->
 <configuration>
-  <property>
-    <name>ssl.server.truststore.location</name>
-    <value>/etc/security/serverKeys/all.jks</value>
-    <description>Location of the trust store file.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
-  </property>
-  <property>
-    <name>ssl.server.truststore.type</name>
-    <value>jks</value>
-    <description>Optional. Default value is "jks".</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
-  </property>
-  <property>
-    <name>ssl.server.truststore.password</name>
-    <value>bigdata</value>
-    <property-type>PASSWORD</property-type>
-    <description>Password to open the trust store file.</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
-  </property>
-  <property>
-    <name>ssl.server.truststore.reload.interval</name>
-    <value>10000</value>
-    <description>Truststore reload interval, in milliseconds.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
-  </property>
-  <property>
-    <name>ssl.server.keystore.type</name>
-    <value>jks</value>
-    <description>Optional. Default value is "jks".</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
-  </property>
-  <property>
-    <name>ssl.server.keystore.location</name>
-    <value>/etc/security/serverKeys/keystore.jks</value>
-    <description>Location of the keystore file.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
-  </property>
-  <property>
-    <name>ssl.server.keystore.password</name>
-    <value>bigdata</value>
-    <property-type>PASSWORD</property-type>
-    <description>Password to open the keystore file.</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
-  </property>
-  <property>
-    <name>ssl.server.keystore.keypassword</name>
-    <value>bigdata</value>
-    <property-type>PASSWORD</property-type>
-    <description>Password for private key in keystore file.</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
-  </property>
+    <property>
+        <name>ssl.server.truststore.location</name>
+        <value>/etc/security/serverKeys/all.jks</value>
+        <description>Location of the trust store file.</description>
+    </property>
+    <property>
+        <name>ssl.server.truststore.type</name>
+        <value>jks</value>
+        <description>Optional. Default value is "jks".</description>
+    </property>
+    <property>
+        <name>ssl.server.truststore.password</name>
+        <value>bigdata</value>
+        <property-type>PASSWORD</property-type>
+        <description>Password to open the trust store file.</description>
+        <value-attributes>
+            <type>password</type>
+        </value-attributes>
+    </property>
+    <property>
+        <name>ssl.server.truststore.reload.interval</name>
+        <value>10000</value>
+        <description>Truststore reload interval, in milliseconds.</description>
+    </property>
+    <property>
+        <name>ssl.server.keystore.type</name>
+        <value>jks</value>
+        <description>Optional. Default value is "jks".</description>
+    </property>
+    <property>
+        <name>ssl.server.keystore.location</name>
+        <value>/etc/security/serverKeys/keystore.jks</value>
+        <description>Location of the keystore file.</description>
+    </property>
+    <property>
+        <name>ssl.server.keystore.password</name>
+        <value>bigdata</value>
+        <property-type>PASSWORD</property-type>
+        <description>Password to open the keystore file.</description>
+        <value-attributes>
+            <type>password</type>
+        </value-attributes>
+    </property>
+    <property>
+        <name>ssl.server.keystore.keypassword</name>
+        <value>bigdata</value>
+        <property-type>PASSWORD</property-type>
+        <description>Password for private key in keystore file.</description>
+        <value-attributes>
+            <type>password</type>
+        </value-attributes>
+    </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0aeaa956/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/capacity-scheduler.xml
index 29cd3c4..cf8242b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/capacity-scheduler.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/capacity-scheduler.xml
@@ -1,4 +1,3 @@
-<?xml version="1.0"?>
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -15,16 +14,17 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
+
 <configuration supports_final="false">
+
   <property>
     <name>yarn.scheduler.capacity.maximum-applications</name>
     <value>10000</value>
     <description>
       Maximum number of applications that can be pending and running.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
     <value>0.2</value>
@@ -33,18 +33,16 @@
       application masters i.e. controls number of concurrent running
       applications.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.scheduler.capacity.root.queues</name>
     <value>default</value>
     <description>
       The queues at the this level (root is the root queue).
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.scheduler.capacity.root.capacity</name>
     <value>100</value>
@@ -54,61 +52,54 @@
       The child queues capacity should add up to their parent queue's capacity
       or less.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.scheduler.capacity.root.default.capacity</name>
     <value>100</value>
     <description>Default queue target capacity.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
     <value>1</value>
     <description>
       Default queue user limit a percentage from 0.0 to 1.0.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
     <value>100</value>
     <description>
       The maximum capacity of the default queue. 
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.scheduler.capacity.root.default.state</name>
     <value>RUNNING</value>
     <description>
       The state of the default queue. State can be one of RUNNING or STOPPED.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
     <value>*</value>
     <description>
       The ACL of who can submit jobs to the default queue.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.scheduler.capacity.root.default.acl_administer_jobs</name>
     <value>*</value>
     <description>
       The ACL of who can administer jobs on the default queue.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.scheduler.capacity.root.acl_administer_queue</name>
     <value>*</value>
@@ -116,9 +107,8 @@
       The ACL for who can administer this queue i.e. change sub-queue 
       allocations.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+  
   <property>
     <name>yarn.scheduler.capacity.node-locality-delay</name>
     <value>40</value>
@@ -128,16 +118,14 @@
       Typically this should be set to number of nodes in the cluster, By default is setting
       approximately number of nodes in one rack which is 40.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.scheduler.capacity.default.minimum-user-limit-percent</name>
     <value>100</value>
     <description>
       Default minimum queue resource limit depends on the number of users who have submitted applications.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0aeaa956/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-env.xml
index dc2eb49..ed6f959 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-env.xml
@@ -19,63 +19,48 @@
  * limitations under the License.
  */
 -->
+
 <configuration supports_final="false" supports_adding_forbidden="true">
   <property>
     <name>yarn_log_dir_prefix</name>
     <value>/var/log/hadoop-yarn</value>
     <description>YARN Log Dir Prefix</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn_pid_dir_prefix</name>
     <value>/var/run/hadoop-yarn</value>
     <description>YARN PID Dir Prefix</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn_user</name>
     <display-name>Yarn User</display-name>
     <value>yarn</value>
     <description>YARN User</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>yarn_heapsize</name>
     <value>1024</value>
     <description>Max heapsize for all YARN components using a numerical value in the scale of MB</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>resourcemanager_heapsize</name>
     <value>1024</value>
     <description>Max heapsize for ResourceManager using a numerical value in the scale of MB</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>nodemanager_heapsize</name>
     <value>1024</value>
     <description>Max heapsize for NodeManager using a numerical value in the scale of MB</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>apptimelineserver_heapsize</name>
     <value>1024</value>
     <description>Max heapsize for AppTimelineServer using a numerical value in the scale of MB</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <name>namenode_heapsize</name>
     <value>1024</value>
     <description>Max heapsize for NameNode using a numerical value in the scale of MB</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <!-- yarn-env.sh -->
   <property>
@@ -197,7 +182,5 @@ YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
     <value-attributes>
       <type>content</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0aeaa956/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-site.xml
index 9ad8f22..893ccd8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/configuration/yarn-site.xml
@@ -16,30 +16,31 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
+
 <!-- Put site-specific property overrides in this file. -->
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
+
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+
   <!-- ResourceManager -->
+
   <property>
     <name>yarn.resourcemanager.hostname</name>
     <value>localhost</value>
     <description>The hostname of the RM.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.resourcemanager.resource-tracker.address</name>
     <value>localhost:8025</value>
     <description> The address of ResourceManager. </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.resourcemanager.scheduler.address</name>
     <value>localhost:8030</value>
     <description>The address of the scheduler interface.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.resourcemanager.address</name>
     <value>localhost:8050</value>
@@ -47,23 +48,20 @@
       The address of the applications manager interface in the
       RM.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.resourcemanager.admin.address</name>
     <value>localhost:8141</value>
     <description>The address of the RM admin interface.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.resourcemanager.scheduler.class</name>
     <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
     <description>The class to use as the resource scheduler.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.scheduler.minimum-allocation-mb</name>
     <value>512</value>
@@ -72,9 +70,8 @@
       in MBs. Memory requests lower than this won't take effect,
       and the specified value will get allocated at minimum.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.scheduler.maximum-allocation-mb</name>
     <value>2048</value>
@@ -83,46 +80,41 @@
       in MBs. Memory requests higher than this won't take effect,
       and will get capped to this value.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.acl.enable</name>
     <value>false</value>
     <description> Are acls enabled. </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.admin.acl</name>
-    <value/>
+    <value></value>
     <description> ACL of who can be admin of the YARN cluster. </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <!-- NodeManager -->
+
   <property>
     <name>yarn.nodemanager.address</name>
     <value>0.0.0.0:45454</value>
     <description>The address of the container manager in the NM.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.nodemanager.resource.memory-mb</name>
     <value>5120</value>
     <description>Amount of physical memory, in MB, that can be allocated
       for containers.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.application.classpath</name>
     <value>/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*</value>
     <description>Classpath for typical applications.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.nodemanager.vmem-pmem-ratio</name>
     <value>2.1</value>
@@ -131,38 +123,33 @@
       expressed in terms of physical memory, and virtual memory usage
       is allowed to exceed this allocation by this ratio.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.nodemanager.container-executor.class</name>
     <value>org.apache.hadoop.yarn.server.nodemanager.GlusterContainerExecutor</value>
     <description>ContainerExecutor for launching containers</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.nodemanager.linux-container-executor.group</name>
     <value>hadoop</value>
     <description>Unix group of the NodeManager</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.nodemanager.aux-services</name>
     <value>mapreduce_shuffle</value>
     <description>Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and can
       not start with numbers</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
     <value>org.apache.hadoop.mapred.ShuffleHandler</value>
     <description>The auxiliary service class to use </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.nodemanager.log-dirs</name>
     <value>/hadoop/yarn/log</value>
@@ -173,9 +160,8 @@
       named container_{$contid}. Each container directory will contain the files
       stderr, stdin, and syslog generated by that container.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.nodemanager.local-dirs</name>
     <value>/hadoop/yarn/local</value>
@@ -186,9 +172,8 @@
       Individual containers' work directories, called container_${contid}, will
       be subdirectories of this.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.nodemanager.container-monitor.interval-ms</name>
     <value>3000</value>
@@ -196,9 +181,8 @@
       The interval, in milliseconds, for which the node manager
       waits  between two cycles of monitoring its containers' memory usage.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <!--
   <property>
     <name>yarn.nodemanager.health-checker.script.path</name>
@@ -206,20 +190,19 @@
     <description>The health check script to run.</description>
   </property>
    -->
+
   <property>
     <name>yarn.nodemanager.health-checker.interval-ms</name>
     <value>135000</value>
     <description>Frequency of running node health script.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.nodemanager.health-checker.script.timeout-ms</name>
     <value>60000</value>
     <description>Script time out period.</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.nodemanager.log.retain-second</name>
     <value>604800</value>
@@ -227,23 +210,20 @@
       Time in seconds to retain user logs. Only applicable if
       log aggregation is disabled.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.log-aggregation-enable</name>
     <value>true</value>
     <description>Whether to enable log aggregation. </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.nodemanager.remote-app-log-dir</name>
     <value>/app-logs</value>
     <description>Location to aggregate logs to. </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.nodemanager.remote-app-log-dir-suffix</name>
     <value>logs</value>
@@ -251,18 +231,16 @@
       The remote log dir will be created at
       {yarn.nodemanager.remote-app-log-dir}/${user}/{thisParam}.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.nodemanager.log-aggregation.compression-type</name>
     <value>gz</value>
     <description>
       T-file compression types used to compress aggregated logs.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.nodemanager.delete.debug-delay-sec</name>
     <value>0</value>
@@ -281,9 +259,8 @@
       of the Yarn applications' log directories is configurable with the
       yarn.nodemanager.log-dirs property (see also below).
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.log-aggregation.retain-seconds</name>
     <value>2592000</value>
@@ -291,9 +268,8 @@
       How long to keep aggregation logs before deleting them. -1 disables.
       Be careful set this too small and you will spam the name node.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.nodemanager.admin-env</name>
     <value>MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX</value>
@@ -301,9 +277,8 @@
       Environment variables that should be forwarded from the NodeManager's
       environment to the container's.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.nodemanager.disk-health-checker.min-healthy-disks</name>
     <value>0.25</value>
@@ -314,9 +289,8 @@
       If there are less number of healthy local-dirs (or log-dirs) available,
       then new containers will not be launched on this node.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.resourcemanager.am.max-attempts</name>
     <value>2</value>
@@ -328,36 +302,32 @@
       the resourcemanager will override it. The default number is set to 2, to
       allow at least one retry for AM.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.resourcemanager.webapp.address</name>
     <value>localhost:8088</value>
     <description>
       The address of the RM web application.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.nodemanager.vmem-check-enabled</name>
     <value>false</value>
     <description>
       Whether virtual memory limits will be enforced for containers.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.log.server.url</name>
     <value>http://localhost:19888/jobhistory/logs</value>
     <description>
       URI for the HistoryServer's log resource
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.resourcemanager.nodes.exclude-path</name>
     <value>/etc/hadoop/conf/yarn.exclude</value>
@@ -367,63 +337,56 @@
       file must be specified.  If the value is empty, no hosts are
       excluded.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.timeline-service.enabled</name>
     <value>true</value>
     <description>Indicate to clients whether timeline service is enabled or not.
       If enabled, clients will put entities and events to the timeline server.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.timeline-service.store-class</name>
     <value>org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore</value>
     <description>
       Store class name for timeline store
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.timeline-service.generic-application-history.store-class</name>
     <value>org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore</value>
     <description>
       Store class name for history store, defaulting to file system store
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.timeline-service.leveldb-timeline-store.path</name>
     <value>/mnt/glusterfs/hadoop/yarn/timeline</value>
     <description>
       Store file name for leveldb timeline store
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.timeline-service.webapp.address</name>
     <value>0.0.0.0:8188</value>
     <description>
       The http address of the timeline service web application.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.timeline-service.webapp.https.address</name>
     <value>0.0.0.0:8190</value>
     <description>
       The http address of the timeline service web application.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>yarn.timeline-service.address</name>
     <value>0.0.0.0:10200</value>
@@ -431,28 +394,20 @@
       This is default address for the timeline server to start
       the RPC server.
     </description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <description>Enable age off of timeline store data.</description>
     <name>yarn.timeline-service.ttl-enable</name>
     <value>true</value>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <description>Time to live for timeline store data in milliseconds.</description>
     <name>yarn.timeline-service.ttl-ms</name>
     <value>2678400000</value>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
   <property>
     <description>Length of time to wait between deletion cycles of leveldb timeline store in milliseconds.</description>
     <name>yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms</name>
     <value>300000</value>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0aeaa956/ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/configuration/oozie-site.xml
index 183b505..b7254bc 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/configuration/oozie-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/configuration/oozie-site.xml
@@ -16,6 +16,7 @@
   See the License for the specific language governing permissions and
   limitations under the License.
 -->
+
 <configuration supports_final="true">
   <property>
     <name>oozie.service.ELService.ext.functions.coord-job-submit-instances</name>
@@ -39,9 +40,8 @@
     <value-attributes>
       <type>custom</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>oozie.service.ELService.ext.functions.coord-action-create-inst</name>
     <value>
@@ -65,9 +65,8 @@
     <value-attributes>
       <type>custom</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>oozie.service.ELService.ext.functions.coord-action-create</name>
     <value>
@@ -91,9 +90,8 @@
     <value-attributes>
       <type>custom</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>oozie.service.ELService.ext.functions.coord-job-submit-data</name>
     <value>
@@ -118,9 +116,8 @@
     <value-attributes>
       <type>custom</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>oozie.service.ELService.ext.functions.coord-action-start</name>
     <value>
@@ -147,9 +144,8 @@
     <value-attributes>
       <type>custom</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>oozie.service.ELService.ext.functions.coord-sla-submit</name>
     <value>
@@ -162,9 +158,8 @@
     <value-attributes>
       <type>custom</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>oozie.service.ELService.ext.functions.coord-sla-create</name>
     <value>
@@ -177,9 +172,8 @@
     <value-attributes>
       <type>custom</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+  
   <property>
     <name>oozie.service.HadoopAccessorService.supported.filesystems</name>
     <value>*</value>
@@ -189,18 +183,18 @@
     <value-attributes>
       <type>custom</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+  
   <!--web ui should add following properties to oozie site accordingly to FALCON_USER-->
   <!--<property>-->
-  <!--<name>oozie.service.ProxyUserService.proxyuser.#FALCON_USER#.hosts</name>-->
-  <!--<value>*</value>-->
-  <!--<description>Falcon proxyuser hosts</description>-->
+    <!--<name>oozie.service.ProxyUserService.proxyuser.#FALCON_USER#.hosts</name>-->
+    <!--<value>*</value>-->
+    <!--<description>Falcon proxyuser hosts</description>-->
   <!--</property>-->
+
   <!--<property>-->
-  <!--<name>oozie.service.ProxyUserService.proxyuser.#FALCON_USER#.groups</name>-->
-  <!--<value>*</value>-->
-  <!--<description>Falcon proxyuser groups</description>-->
+    <!--<name>oozie.service.ProxyUserService.proxyuser.#FALCON_USER#.groups</name>-->
+    <!--<value>*</value>-->
+    <!--<description>Falcon proxyuser groups</description>-->
   <!--</property>-->
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0aeaa956/ambari-server/src/main/resources/stacks/HDP/2.1/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1/services/HDFS/configuration/hdfs-site.xml
index b191c09..d112137 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1/services/HDFS/configuration/hdfs-site.xml
@@ -1,5 +1,6 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -16,20 +17,21 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
+
 <!-- Put site-specific property overrides in this file. -->
+
 <configuration supports_final="true">
+
   <property>
     <name>dfs.namenode.audit.log.async</name>
     <value>true</value>
     <description>Whether to enable async auditlog</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
   <property>
     <name>dfs.namenode.fslock.fair</name>
     <value>false</value>
     <description>Whether fsLock is fair</description>
-    <on-ambari-upgrade add="true" change="false" delete="false"/>
-    <on-stack-upgrade add="true" change="false" delete="false"/>
   </property>
+
 </configuration>