You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by dm...@apache.org on 2016/06/09 15:01:44 UTC

[19/70] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation. Change defaults (dlysnichenko)

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/HDP/2.2/services/TEZ/configuration/tez-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/TEZ/configuration/tez-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/TEZ/configuration/tez-site.xml
index 0c9e348..d865579 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/TEZ/configuration/tez-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/TEZ/configuration/tez-site.xml
@@ -24,31 +24,31 @@
       Specifying a single .tar.gz or .tgz assumes that a compressed version of the tez libs is being used. This is uncompressed into a tezlibs directory when running containers, and tezlibs/;tezlibs/lib/ are added to the classpath (after . and .*).
       If multiple files are specified - files are localized as regular files, contents of directories are localized as regular files (non-recursive).
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.cluster.additional.classpath.prefix</name>
     <value>/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.am.log.level</name>
     <value>INFO</value>
     <description>Root Logging level passed to the Tez app master</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.generate.debug.artifacts</name>
     <value>false</value>
     <description>Generate debug artifacts such as a text representation of the submitted DAG plan</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.staging-dir</name>
     <value>/tmp/${user.name}/staging</value>
     <description>The staging dir used while submitting DAGs</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.am.resource.memory.mb</name>
@@ -59,7 +59,7 @@
     <value-attributes>
       <type>int</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.am.launch.cmd-opts</name>
@@ -67,13 +67,13 @@
     <description>Java options for the Tez AppMaster process. The Xmx value is derived based on tez.am.resource.memory.mb and is 80% of the value by default.
       Used only if the value is not specified explicitly by the DAG definition.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.am.launch.cluster-default.cmd-opts</name>
     <value>-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}</value>
     <description>Cluster default Java options for the Tez AppMaster process. These will be prepended to the properties specified via tez.am.launch.cmd-opts</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.am.launch.env</name>
@@ -83,7 +83,7 @@
         you want to have access to native libraries.
       Used only if the value is not specified explicitly by the DAG definition.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.task.resource.memory.mb</name>
@@ -101,7 +101,7 @@
         <name>yarn.scheduler.maximum-allocation-mb</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.task.launch.cmd-opts</name>
@@ -109,13 +109,13 @@
     <description>Java options for tasks. The Xmx value is derived based on tez.task.resource.memory.mb and is 80% of this value by default.
       Used only if the value is not specified explicitly by the DAG definition.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.task.launch.cluster-default.cmd-opts</name>
     <value>-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}</value>
     <description>Cluster default Java options for tasks. These will be prepended to the properties specified via tez.task.launch.cmd-opts</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.task.launch.env</name>
@@ -125,7 +125,7 @@
       you want to have access to native libraries.
       Used only if the value is not specified explicitly by the DAG definition.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.shuffle-vertex-manager.min-src-fraction</name>
@@ -133,7 +133,7 @@
     <description>In case of a ScatterGather connection, the fraction of source tasks which should
       complete before tasks for the current vertex are schedule
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.shuffle-vertex-manager.max-src-fraction</name>
@@ -142,13 +142,13 @@
       completed, all tasks on the current vertex can be scheduled. Number of tasks ready for
       scheduling on the current vertex scales linearly between min-fraction and max-fraction
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.am.am-rm.heartbeat.interval-ms.max</name>
     <value>250</value>
     <description>The maximum heartbeat interval between the AM and RM in milliseconds</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.grouping.split-waves</name>
@@ -157,7 +157,7 @@
       a Vertex. 1.7 with 100% queue available implies generating a number of tasks roughly equal
       to 170% of the available containers on the queue
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.grouping.min-size</name>
@@ -165,7 +165,7 @@
     <description>Lower bound on the size (in bytes) of a grouped split, to avoid generating
       too many splits
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.grouping.max-size</name>
@@ -173,39 +173,39 @@
     <description>Upper bound on the size (in bytes) of a grouped split, to avoid generating
       excessively large split
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.am.container.reuse.enabled</name>
     <value>true</value>
     <description>Configuration to specify whether container should be reused</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.am.container.reuse.rack-fallback.enabled</name>
     <value>true</value>
     <description>Whether to reuse containers for rack local tasks. Active only if reuse is enabled
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.am.container.reuse.non-local-fallback.enabled</name>
     <value>false</value>
     <description>Whether to reuse containers for non-local tasks. Active only if reuse is enabled
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.am.container.idle.release-timeout-min.millis</name>
     <value>10000</value>
     <description>The minimum amount of time to hold on to a container that is idle. Only active when reuse is enabled.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.am.container.idle.release-timeout-max.millis</name>
     <value>20000</value>
     <description>The maximum amount of time to hold on to a container if no task can be assigned to it immediately. Only active when reuse is enabled.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.am.container.reuse.locality.delay-allocation-millis</name>
@@ -213,13 +213,13 @@
     <description>The amount of time to wait before assigning a container to the next level of
       locality. NODE -&gt; RACK -&gt; NON_LOCAL
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.am.max.app.attempts</name>
     <value>2</value>
     <description>Specifies the total number of time the app master will run in case recovery is triggered</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.am.maxtaskfailures.per.node</name>
@@ -227,13 +227,13 @@
     <description>The maximum number of allowed task attempt failures on a node before
       it gets marked as blacklisted
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.task.am.heartbeat.counter.interval-ms.max</name>
     <value>4000</value>
     <description>Time interval at which task counters are sent to the AM</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.task.get-task.sleep.interval-ms.max</name>
@@ -241,13 +241,13 @@
     <description>The maximum amount of time, in seconds, to wait before a task asks an AM for
       another task
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.task.max-events-per-heartbeat</name>
     <value>500</value>
     <description>Maximum number of of events to fetch from the AM by the tasks in a single heartbeat.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.session.client.timeout.secs</name>
@@ -255,7 +255,7 @@
     <description>Time (in seconds) to wait for AM to come up when trying to submit a DAG from
       the client
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.session.am.dag.submit.timeout.secs</name>
@@ -263,19 +263,19 @@
     <description>Time (in seconds) for which the Tez AM should wait for a DAG to be submitted
       before shutting down
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.counters.max</name>
     <value>5000</value>
     <description>The number of allowed counters for the executing DAG</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.counters.max.groups</name>
     <value>1000</value>
     <description>The number of allowed counter groups for the executing DAG</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- Configuration for runtime components -->
   <!-- These properties can be set on a per edge basis by configuring the payload for each
@@ -284,7 +284,7 @@
     <name>tez.runtime.compress</name>
     <value>true</value>
     <description>Whether intermediate data should be compressed or not</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.runtime.compress.codec</name>
@@ -292,7 +292,7 @@
     <description>The coded to be used if compressing intermediate data. Only
       applicable if tez.runtime.compress is enabled
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.runtime.io.sort.mb</name>
@@ -304,7 +304,7 @@
         <name>tez.task.resource.memory.mb</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.runtime.unordered.output.buffer.size-mb</name>
@@ -316,7 +316,7 @@
         <name>tez.task.resource.memory.mb</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.history.logging.service.class</name>
@@ -325,19 +325,19 @@
       Set to org.apache.tez.dag.history.logging.ats.ATSHistoryLoggingService to log to ATS
       Set to org.apache.tez.dag.history.logging.impl.SimpleHistoryLoggingService to log to the filesystem specified by ${fs.defaultFS}
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.runtime.convert.user-payload.to.history-text</name>
     <value>false</value>
     <description>Whether to publish configuration information to History logger</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.use.cluster.hadoop-libs</name>
     <value>false</value>
     <description>This being true implies that the deployment is relying on hadoop jars being available on the cluster on all nodes.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.am.tez-ui.history-url.template</name>
@@ -346,11 +346,11 @@
        Template replaces __APPLICATION_ID__ with the actual applicationId and
        __HISTORY_URL_BASE__ with the value from the tez.tez-ui.history-url.base config property
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tez.tez-ui.history-url.base</name>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration-mapred/mapred-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration-mapred/mapred-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration-mapred/mapred-env.xml
index 0132beb..869f44a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration-mapred/mapred-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration-mapred/mapred-env.xml
@@ -45,6 +45,6 @@ export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}"
     <value-attributes>
       <type>content</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration-mapred/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration-mapred/mapred-site.xml
index 57af55b..e34b43e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration-mapred/mapred-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration-mapred/mapred-site.xml
@@ -26,7 +26,7 @@
       This is not an additive property. You must preserve the original value if
       you want your map and reduce tasks to have access to native libraries (compression, etc)
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.application.classpath</name>
@@ -35,13 +35,13 @@
       CLASSPATH for MR applications. A comma-separated list of CLASSPATH
       entries.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.application.framework.path</name>
     <value>/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.app.mapreduce.am.admin-command-opts</name>
@@ -59,48 +59,48 @@
       of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
       mapreduce.reduce.env config settings.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.admin.map.child.java.opts</name>
     <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.admin.reduce.child.java.opts</name>
     <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.reduce.shuffle.fetch.retry.enabled</name>
     <value>1</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.reduce.shuffle.fetch.retry.interval-ms</name>
     <value>1000</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.reduce.shuffle.fetch.retry.timeout-ms</name>
     <value>30000</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.job.emit-timeline-data</name>
     <value>false</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.jobhistory.bind-host</name>
     <value>0.0.0.0</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/capacity-scheduler.xml
index 426937d..7443771 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/capacity-scheduler.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/capacity-scheduler.xml
@@ -35,20 +35,20 @@
       <selection-cardinality>1</selection-cardinality>
     </value-attributes>
     <property-type>DONT_ADD_ON_UPGRADE</property-type>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.accessible-node-labels</name>
     <value>*</value>
     <description/>
     <property-type>DONT_ADD_ON_UPGRADE</property-type>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.default-node-label-expression</name>
     <value> </value>
     <description/>
     <property-type>DONT_ADD_ON_UPGRADE</property-type>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-env.xml
index ec7946b..858264f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-env.xml
@@ -39,6 +39,6 @@
       </entries>
       <selection-cardinality>1</selection-cardinality>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-site.xml
index 1b06817..56b8839 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-site.xml
@@ -22,7 +22,7 @@
     <name>yarn.application.classpath</name>
     <value>$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*</value>
     <description>Classpath for typical applications.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hadoop.registry.rm.enabled</name>
@@ -30,7 +30,7 @@
     <description>
       Is the registry enabled: does the RM start it up, create the user and system paths, and purge service records when containers, application attempts and applications complete
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hadoop.registry.zk.quorum</name>
@@ -39,13 +39,13 @@
     <description>
       List of hostname:port pairs defining the zookeeper quorum binding for the registry
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.recovery.enabled</name>
     <value>true</value>
     <description>Enable the node manager to recover after starting</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.recovery.dir</name>
@@ -54,19 +54,19 @@
       The local filesystem directory in which the node manager will store
       state when recovery is enabled.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.client.nodemanager-connect.retry-interval-ms</name>
     <value>10000</value>
     <description>Time interval between each attempt to connect to NM</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.client.nodemanager-connect.max-wait-ms</name>
     <value>60000</value>
     <description>Max time to wait to establish a connection to NM</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.recovery.enabled</name>
@@ -75,7 +75,7 @@
       Enable RM to recover state after starting.
       If true, then yarn.resourcemanager.store.class must be specified.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.work-preserving-recovery.enabled</name>
@@ -87,7 +87,7 @@
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.store.class</name>
@@ -98,7 +98,7 @@
       the store is implicitly fenced; meaning a single ResourceManager
       is able to use the store at any point in time.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-address</name>
@@ -107,37 +107,37 @@
     <description>
       List Host:Port of the ZooKeeper servers to be used by the RM. comma separated host:port pairs, each corresponding to a zk server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" If the optional chroot suffix is used the example would look like: "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002/app/a" where the client would be rooted at "/app/a" and all paths would be relative to this root - ie getting/setting/etc...  "/foo/bar" would result in operations being run on "/app/a/foo/bar" (from the server perspective).
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-state-store.parent-path</name>
     <value>/rmstore</value>
     <description>Full path of the ZooKeeper znode where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore as the value for yarn.resourcemanager.store.class</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-acl</name>
     <value>world:anyone:rwcda</value>
     <description>ACL's to be used for ZooKeeper znodes.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms</name>
     <value>10000</value>
     <description>Set the amount of time RM waits before allocating new containers on work-preserving-recovery. Such wait period gives RM a chance to settle down resyncing with NMs in the cluster on recovery, before assigning new containers to applications.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.connect.retry-interval.ms</name>
     <value>30000</value>
     <description>How often to try connecting to the ResourceManager.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.connect.max-wait.ms</name>
     <value>900000</value>
     <description>Maximum time to wait to establish connection to ResourceManager</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-retry-interval-ms</name>
@@ -147,67 +147,67 @@
       automatically from yarn.resourcemanager.zk-timeout-ms and
       yarn.resourcemanager.zk-num-retries."
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-num-retries</name>
     <value>1000</value>
     <description>Number of times RM tries to connect to ZooKeeper.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-timeout-ms</name>
     <value>10000</value>
     <description>ZooKeeper session timeout in milliseconds. Session expiration is managed by the ZooKeeper cluster itself, not by the client. This value is used by the cluster to determine when the client's session expires. Expirations happens when the cluster does not hear from the client within the specified session timeout period (i.e. no heartbeat).</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.state-store.max-completed-applications</name>
     <value>${yarn.resourcemanager.max-completed-applications}</value>
     <description>The maximum number of completed applications RM state store keeps, less than or equals to ${yarn.resourcemanager.max-completed-applications}. By default, it equals to ${yarn.resourcemanager.max-completed-applications}. This ensures that the applications kept in the state store are consistent with the applications remembered in RM memory. Any values larger than ${yarn.resourcemanager.max-completed-applications} will be reset to ${yarn.resourcemanager.max-completed-applications}. Note that this value impacts the RM recovery performance.Typically,  a smaller value indicates better performance on RM recovery.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.fs.state-store.retry-policy-spec</name>
     <value>2000, 500</value>
     <description>hdfs client retry policy specification. hdfs client retry is always enabled. Specified in pairs of sleep-time and number-of-retries and (t0, n0), (t1, n1), ..., the first n0 retries sleep t0 milliseconds on average, the following n1 retries sleep t1 milliseconds on average, and so on.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.fs.state-store.uri</name>
     <value> </value>
     <description>RI pointing to the location of the FileSystem path where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore as the value for yarn.resourcemanager.store.class </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.ha.enabled</name>
     <value>false</value>
     <description>enable RM HA or not</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.resources-handler.class</name>
     <value>org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler</value>
     <description>Pre-requisite to use CGroups</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name>
     <value>hadoop-yarn</value>
     <description>Name of the Cgroups hierarchy under which all YARN jobs will be launched</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.mount</name>
     <value>false</value>
     <description>If true, YARN will automount the CGroup, however the directory needs to already exist; else, the cgroup should be mounted by the admin</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage</name>
     <value>false</value>
     <description>Strictly limit CPU resource usage to allocated usage even if spare CPU is available</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.resource.cpu-vcores</name>
@@ -225,7 +225,7 @@
         <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
@@ -238,37 +238,37 @@
       <maximum>100</maximum>
       <increment-step>1</increment-step>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.node-labels.manager-class</name>
     <value>org.apache.hadoop.yarn.server.resourcemanager.nodelabels.MemoryRMNodeLabelsManager</value>
     <description>If user want to enable this feature, specify it to "org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.node-labels.fs-store.retry-policy-spec</name>
     <value>2000, 500</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb</name>
     <value>1000</value>
     <description>This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name>
     <value>90</value>
     <description>This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
     <value>-1</value>
     <description>Defines how often NMs wake up to upload log files. The default value is -1. By default, the logs will be uploaded whenthe application is finished. By setting this configure, logs can be uploaded periodically when the application is running. The minimum rolling-interval-seconds can be set is 3600.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.log-aggregation.debug-enabled</name>
@@ -277,37 +277,37 @@
       This configuration is for debug and test purpose.
       By setting this configuration as true.
       We can break the lower bound of yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.log-aggregation.num-log-files-per-app</name>
     <value>30</value>
     <description>This is temporary solution. The configuration will be deleted once, we find a more scalable method to only write a single log file per LRS.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.system-metrics-publisher.enabled</name>
     <value>true</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size</name>
     <value>10</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.client.max-retries</name>
     <value>30</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.client.retry-interval-ms</name>
     <value>1000</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.ttl-enable</name>
@@ -318,7 +318,7 @@
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.recovery.enabled</name>
@@ -326,13 +326,13 @@
     <description>Enable timeline server to recover state after starting. If
       true, then yarn.timeline-service.state-store-class must be specified.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.state-store-class</name>
     <value>org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore</value>
     <description>Store class name for timeline state store.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.leveldb-state-store.path</name>
@@ -341,7 +341,7 @@
     <value-attributes>
       <type>directory</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.leveldb-timeline-store.path</name>
@@ -350,7 +350,7 @@
     <value-attributes>
       <type>directory</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.leveldb-timeline-store.read-cache-size</name>
@@ -358,7 +358,7 @@
     <description>
       Size of read cache for uncompressed blocks for leveldb timeline store in bytes.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size</name>
@@ -366,7 +366,7 @@
     <description>
       Size of cache for recently read entity start times for leveldb timeline store in number of entities.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size</name>
@@ -374,7 +374,7 @@
     <description>
       Size of cache for recently written entity start times for leveldb timeline store in number of entities.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.http-authentication.type</name>
@@ -383,13 +383,13 @@
       Defines authentication used for the Timeline Server HTTP endpoint.
       Supported values are: simple | kerberos | $AUTHENTICATION_HANDLER_CLASSNAME
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.http-authentication.simple.anonymous.allowed</name>
     <value>true</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled</name>
@@ -400,31 +400,31 @@
       tokens(fallback to kerberos if the tokens are missing).
       Only applicable when the http authentication type is kerberos.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.bind-host</name>
     <value>0.0.0.0</value>
     <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.bind-host</name>
     <value>0.0.0.0</value>
     <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.bind-host</name>
     <value>0.0.0.0</value>
     <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.node-labels.fs-store.root-dir</name>
     <value>/system/yarn/node-labels</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.minimum-allocation-vcores</name>
@@ -443,7 +443,7 @@
         <name>yarn.nodemanager.resource.cpu-vcores</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.maximum-allocation-vcores</name>
@@ -462,7 +462,7 @@
         <name>yarn.nodemanager.resource.cpu-vcores</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.node-labels.enabled</name>
@@ -485,14 +485,14 @@
       </entries>
       <selection-cardinality>1</selection-cardinality>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.node-labels.manager-class</name>
     <value>org.apache.hadoop.yarn.server.resourcemanager.nodelabels.MemoryRMNodeLabelsManager</value>
     <description>If user want to enable this feature, specify it to "org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager</description>
     <deleted>true</deleted>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.container-executor.class</name>
@@ -508,7 +508,7 @@
         <name>hadoop.security.authentication</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.group</name>
@@ -524,7 +524,7 @@
         <name>user_group</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.scheduler.monitor.enable</name>
@@ -544,6 +544,6 @@
       </entries>
       <selection-cardinality>1</selection-cardinality>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/configuration/core-site.xml
index ef97390..b062601 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/configuration/core-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/configuration/core-site.xml
@@ -22,65 +22,65 @@
     <name>fs.defaultFS</name>
     <value/>
     <description>Provide VIPRFS bucket details using the format viprfs://$BUCKET_NAME.$NAMESPACE.$SITE_NAME_from_fs.vipr.installations</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hadoop.security.authentication</name>
     <value>simple</value>
     <description>Supported values: simple, kerberos</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hadoop.security.authorization</name>
     <value>false</value>
     <description>Supported values true, false</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hadoop.security.auth_to_local</name>
     <value>DEFAULT</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>fs.permissions.umask-mode</name>
     <value>022</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- VIPRFS Configurations -->
   <property>
     <name>fs.vipr.installations</name>
     <value>Site1</value>
     <description>Provide site name of the tenant</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>fs.vipr.installation.Site1.hosts</name>
     <value/>
     <description>Provide ECS node IPs or VIP</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>fs.vipr.installation.Site1.resolution</name>
     <value>dynamic</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>fs.vipr.installation.Site1.resolution.dynamic.time_to_live_ms</name>
     <value>900000</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>fs.viprfs.auth.anonymous_translation</name>
     <value>LOCAL_USER</value>
     <final>true</final>
     <description>Supported values are LOCAL_USER. Applicable only for insecure cluster deployment.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>fs.viprfs.auth.identity_translation</name>
     <value>NONE</value>
     <description>Supported values are NONE(default), FIXED_REALM, and CURRENT_USER_REALM</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!--  Moving the configuration to kerberos.json as this is applicable to only secure cluster
     <property>
@@ -93,30 +93,30 @@
     <name>fs.viprfs.impl</name>
     <value>com.emc.hadoop.fs.vipr.ViPRFileSystem</value>
     <final>true</final>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>fs.AbstractFileSystem.viprfs.impl</name>
     <value>com.emc.hadoop.fs.vipr.ViPRAbstractFileSystem</value>
     <final>true</final>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>fs.trace.viprfs.dfs.impl</name>
     <value>com.emc.hadoop.fs.trace.TraceDistributedFileSystem</value>
     <final>true</final>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>fs.trace.viprfs.dfs.inner</name>
     <value>org.apache.hadoop.hdfs.DistributedFileSystemShim</value>
     <final>true</final>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>fs.viprfs.dfs.impl</name>
     <value>org.apache.hadoop.hdfs.DistributedFileSystemShim</value>
     <final>true</final>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/configuration/hadoop-env.xml
index fa3cf78..692ab18 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/configuration/hadoop-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/configuration/hadoop-env.xml
@@ -24,7 +24,7 @@
     <name>hdfs_log_dir_prefix</name>
     <value>/var/log/hadoop</value>
     <description>Hadoop Log Dir Prefix</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>proxyuser_group</name>
@@ -35,7 +35,7 @@
       <type>user</type>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hdfs_user</name>
@@ -46,7 +46,7 @@
       <type>user</type>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>user_group</name>
@@ -56,7 +56,7 @@
     <value-attributes>
       <type>user</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- hadoop-env.sh -->
   <property>
@@ -143,6 +143,6 @@ export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-a
     <value-attributes>
       <type>content</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/configuration/hdfs-site.xml
index 3b548cb..ee7939f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/configuration/hdfs-site.xml
@@ -23,11 +23,11 @@
   <property>
     <name>dfs.permissions.enabled</name>
     <value>true</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.permissions.superusergroup</name>
     <value>hdfs</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/HBASE/configuration/hbase-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/HBASE/configuration/hbase-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/HBASE/configuration/hbase-env.xml
index ef69fba..0d42f28 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/HBASE/configuration/hbase-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/HBASE/configuration/hbase-env.xml
@@ -105,6 +105,6 @@ export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_
     <value-attributes>
       <type>content</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/HBASE/configuration/hbase-site.xml
index 3a83f88..e1b6faf 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/HBASE/configuration/hbase-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/HBASE/configuration/hbase-site.xml
@@ -23,6 +23,6 @@
   <property>
     <name>hbase.rootdir</name>
     <value/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/TEZ/configuration/tez-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/TEZ/configuration/tez-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/TEZ/configuration/tez-site.xml
index a5f1067..07e8d8b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/TEZ/configuration/tez-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/TEZ/configuration/tez-site.xml
@@ -21,6 +21,6 @@
     <name>tez.cluster.additional.classpath.prefix</name>
     <value>/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure:/usr/lib/hadoop/lib/*</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/configuration-mapred/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/configuration-mapred/mapred-site.xml
index 82ae995..5016f50 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/configuration-mapred/mapred-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/configuration-mapred/mapred-site.xml
@@ -25,6 +25,6 @@
       CLASSPATH for MR applications. A comma-separated list of CLASSPATH
       entries.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/configuration/yarn-site.xml
index a9d5db9..7e518db 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/configuration/yarn-site.xml
@@ -22,6 +22,6 @@
     <name>yarn.application.classpath</name>
     <value>$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*,/usr/lib/hadoop/lib/*</value>
     <description>Classpath for typical applications.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/configuration/accumulo-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/configuration/accumulo-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/configuration/accumulo-log4j.xml
index 2e15558..e25ea81 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/configuration/accumulo-log4j.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/configuration/accumulo-log4j.xml
@@ -24,38 +24,38 @@
     <name>audit_log_level</name>
     <value>OFF</value>
     <description>Log level for audit logging</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>monitor_forwarding_log_level</name>
     <value>WARN</value>
     <description>Log level for logging forwarded to the Accumulo
       Monitor</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>debug_log_size</name>
     <value>512M</value>
     <description>Size of each debug rolling log file</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>debug_num_logs</name>
     <value>10</value>
     <description>Number of rolling debug log files to keep</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>info_log_size</name>
     <value>512M</value>
     <description>Size of each info rolling log file</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>info_num_logs</name>
     <value>10</value>
     <description>Number of rolling info log files to keep</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>content</name>
@@ -109,6 +109,6 @@ log4j.appender.A1.layout=org.apache.log4j.PatternLayout
     <value-attributes>
       <type>content</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
index 6b79265..60d1fe6 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
@@ -23,25 +23,25 @@
     <name>fs.AbstractFileSystem.glusterfs.impl</name>
     <value>org.apache.hadoop.fs.local.GlusterFs</value>
     <display-name>GlusterFS Abstract File System Implementation</display-name>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>fs.glusterfs.impl</name>
     <display-name>GlusterFS fs impl</display-name>
     <value>org.apache.hadoop.fs.glusterfs.GlusterFileSystem</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>fs.defaultFS</name>
     <value>glusterfs:///localhost:8020</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- HDFS core-site props and additional props (not sure if all are needed or not)-->
   <property>
     <name>ha.failover-controller.active-standby-elector.zk.op.retries</name>
     <value>120</value>
     <description>ZooKeeper Failover Controller retries setting for your environment</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- i/o properties -->
   <property>
@@ -51,21 +51,21 @@
   The size of this buffer should probably be a multiple of hardware
   page size (4096 on Intel x86), and it determines how much data is
   buffered during read and write operations.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>io.serializations</name>
     <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
     <description> A list of comma-delimited serialization classes that can be used for obtaining serializers and deserializers.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>io.compression.codecs</name>
     <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
     <description>A list of the compression codec classes that can be used
                  for compression/decompression.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>fs.trash.interval</name>
@@ -76,7 +76,7 @@
         If trash is disabled server side then the client side configuration is checked.
         If trash is enabled on the server side then the value configured on the server is used and the client configuration value is ignored.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- ipc properties: copied from kryptonite configuration -->
   <property>
@@ -85,7 +85,7 @@
     <description>Defines the threshold number of connections after which
                connections will be inspected for idleness.
   </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ipc.client.connection.maxidletime</name>
@@ -93,13 +93,13 @@
     <description>The maximum time after which a client will bring down the
                connection to the server.
   </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ipc.client.connect.max.retries</name>
     <value>50</value>
     <description>Defines the maximum number of retries for IPC connections.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ipc.server.tcpnodelay</name>
@@ -110,7 +110,7 @@
       decrease latency
       with a cost of more/smaller packets.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- Web Interface Configuration -->
   <property>
@@ -121,7 +121,7 @@
                 not be exposed to public. Enable this option if the interfaces
                 are only reachable by those who have the right authorization.
   </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hadoop.security.authentication</name>
@@ -130,7 +130,7 @@
    Set the authentication for the cluster. Valid values are: simple or
    kerberos.
    </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hadoop.security.authorization</name>
@@ -138,7 +138,7 @@
     <description>
      Enable authorization for different protocols.
   </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hadoop.security.auth_to_local</name>
@@ -181,7 +181,7 @@ If you want to treat all principals from APACHE.ORG with /admin as "admin", your
 RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
 DEFAULT
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>net.topology.script.file.name</name>
@@ -189,6 +189,6 @@ DEFAULT
     <description>
       Location of topology script used by Hadoop to determine the rack location of nodes.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml
index a55b5c3..f005a75 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml
@@ -30,7 +30,7 @@
       <editable-only-at-install>true</editable-only-at-install>
       <visible>false</visible>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hadoop_heapsize</name>
@@ -40,7 +40,7 @@
     <value-attributes>
       <unit>MB</unit>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>glusterfs_user</name>
@@ -50,7 +50,7 @@
     <value-attributes>
       <visible>false</visible>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hdfs_log_dir_prefix</name>
@@ -62,7 +62,7 @@
       <editable-only-at-install>true</editable-only-at-install>
       <visible>false</visible>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>namenode_heapsize</name>
@@ -74,19 +74,19 @@
       <editable-only-at-install>true</editable-only-at-install>
       <visible>false</visible>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>namenode_host</name>
     <value/>
     <description>NameNode Host.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>snamenode_host</name>
     <value/>
     <description>Secondary NameNode.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>proxyuser_group</name>
@@ -96,14 +96,14 @@
     <value-attributes>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hdfs_user</name>
     <display-name>HDFS User</display-name>
     <value>hdfs</value>
     <description>User to run HDFS as</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!--
   <property>
@@ -232,6 +232,6 @@ export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-a
     <value-attributes>
       <type>content</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HBASE/configuration/hbase-site.xml
index c7bc145..8d90dd4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HBASE/configuration/hbase-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HBASE/configuration/hbase-site.xml
@@ -32,7 +32,7 @@
     into /tmp.  Change this configuration else all data will be lost
     on machine restart.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.cluster.distributed</name>
@@ -42,13 +42,13 @@
       false, startup will run all HBase and ZooKeeper daemons together
       in the one JVM.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.master.port</name>
     <value>16000</value>
     <description>The port the HBase Master should bind to.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.tmp.dir</name>
@@ -62,33 +62,33 @@
     <value-attributes>
       <type>directory</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.local.dir</name>
     <value>${hbase.tmp.dir}/local</value>
     <description>Directory on the local filesystem to be used as a local storage
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.master.info.bindAddress</name>
     <value>0.0.0.0</value>
     <description>The bind address for the HBase Master web UI
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.master.info.port</name>
     <value>16010</value>
     <description>The port for the HBase Master web UI.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.regionserver.info.port</name>
     <value>16030</value>
     <description>The port for the HBase RegionServer web UI.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.regionserver.global.memstore.upperLimit</name>
@@ -96,7 +96,7 @@
     <description>Maximum size of all memstores in a region server before new
       updates are blocked and flushes are forced. Defaults to 40% of heap
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.regionserver.handler.count</name>
@@ -105,7 +105,7 @@
     Same property is used by the Master for count of master handlers.
     Default is 10.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.hregion.majorcompaction</name>
@@ -114,7 +114,7 @@
     HStoreFiles in a region.  Default: 1 day.
     Set to 0 to disable automated major compactions.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.regionserver.global.memstore.lowerLimit</name>
@@ -125,7 +125,7 @@
       the minimum possible flushing to occur when updates are blocked due to
       memstore limiting.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.hregion.memstore.block.multiplier</name>
@@ -137,7 +137,7 @@
     resultant flush files take a long time to compact or split, or
     worse, we OOME
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.hregion.memstore.flush.size</name>
@@ -147,7 +147,7 @@
     exceeds this number of bytes.  Value is checked by a thread that runs
     every hbase.server.thread.wakefrequency.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.hregion.memstore.mslab.enabled</name>
@@ -158,7 +158,7 @@
       heavy write loads. This can reduce the frequency of stop-the-world
       GC pauses on large heaps.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.hregion.max.filesize</name>
@@ -168,7 +168,7 @@
     grown to exceed this value, the hosting HRegion is split in two.
     Default: 1G.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.client.scanner.caching</name>
@@ -180,7 +180,7 @@
     Do not set this value such that the time between invocations is greater
     than the scanner timeout; i.e. hbase.regionserver.lease.period
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>zookeeper.session.timeout</name>
@@ -192,7 +192,7 @@
       "The client sends a requested timeout, the server responds with the
       timeout that it can give the client. " In milliseconds.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.client.keyvalue.maxsize</name>
@@ -204,7 +204,7 @@
     to set this to a fraction of the maximum region size. Setting it to zero
     or less disables the check.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.hstore.compactionThreshold</name>
@@ -215,7 +215,7 @@
     is run to rewrite all HStoreFiles files as one.  Larger numbers
     put off compaction but when it runs, it takes longer to complete.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.hstore.flush.retries.number</name>
@@ -223,7 +223,7 @@
     <description>
     The number of times the region flush operation will be retried.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.hstore.blockingStoreFiles</name>
@@ -234,7 +234,7 @@
     blocked for this HRegion until a compaction is completed, or
     until hbase.hstore.blockingWaitTime has been exceeded.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hfile.block.cache.size</name>
@@ -244,7 +244,7 @@
         used by HFile/StoreFile. Default of 0.25 means allocate 25%.
         Set to 0 to disable but it's not recommended.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- The following properties configure authentication information for
        HBase processes when using Kerberos security.  There are no default
@@ -255,7 +255,7 @@
     <description>Full path to the kerberos keytab file to use for logging in
     the configured HMaster server principal.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.master.kerberos.principal</name>
@@ -266,7 +266,7 @@
     portion, it will be replaced with the actual hostname of the running
     instance.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.regionserver.keytab.file</name>
@@ -274,7 +274,7 @@
     <description>Full path to the kerberos keytab file to use for logging in
     the configured HRegionServer server principal.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.regionserver.kerberos.principal</name>
@@ -286,7 +286,7 @@
     running instance.  An entry for this principal must exist in the file
     specified in hbase.regionserver.keytab.file
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- Additional configuration specific to HBase security -->
   <property>
@@ -296,7 +296,7 @@
     full privileges, regardless of stored ACLs, across the cluster.
     Only used when HBase security is enabled.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.security.authentication</name>
@@ -304,14 +304,14 @@
     <description>  Controls whether or not secure authentication is enabled for HBase. Possible values are 'simple'
       (no authentication), and 'kerberos'.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.security.authorization</name>
     <value>false</value>
     <description>Enables HBase authorization. Set the value of this property to false to disable HBase authorization.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.coprocessor.region.classes</name>
@@ -322,7 +322,7 @@
     it in HBase's classpath and add the fully qualified class name here.
     A coprocessor can also be loaded on demand by setting HTableDescriptor.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.coprocessor.master.classes</name>
@@ -334,7 +334,7 @@
       implementing your own MasterObserver, just put it in HBase's classpath
       and add the fully qualified class name here.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.zookeeper.property.clientPort</name>
@@ -342,7 +342,7 @@
     <description>Property from ZooKeeper's config zoo.cfg.
     The port at which the clients will connect.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!--
   The following three properties are used together to create the list of
@@ -358,7 +358,7 @@
     list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
     this is the list of servers which we will start/stop ZooKeeper on.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
   <property>
@@ -371,7 +371,7 @@
     and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
     not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>zookeeper.znode.parent</name>
@@ -381,19 +381,19 @@
       By default, all of HBase's ZooKeeper file path are configured with a
       relative path, so they will all go under this directory unless changed.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.defaults.for.version.skip</name>
     <value>true</value>
     <description>Disables version verification.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.domain.socket.path</name>
     <value>/var/lib/hadoop-hdfs/dn_socket</value>
     <description>Path to domain socket.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.coprocessor.regionserver.classes</name>
@@ -404,6 +404,6 @@
         <name>hbase.security.authorization</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>