You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by dm...@apache.org on 2016/06/09 15:01:50 UTC

[25/70] ambari git commit: AMBARI-17112. Fixed implementation of on-ambari-upgrade support. Update all stack configuration xmls to pass validation. Change defaults (dlysnichenko)

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration-mapred/mapred-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration-mapred/mapred-env.xml b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration-mapred/mapred-env.xml
index 28277f8..54b4586 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration-mapred/mapred-env.xml
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration-mapred/mapred-env.xml
@@ -24,13 +24,13 @@
     <name>mapred_log_dir_prefix</name>
     <value>/var/log/hadoop-mapreduce</value>
     <description>Mapreduce Log Dir Prefix</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapred_pid_dir_prefix</name>
     <value>/var/run/hadoop-mapreduce</value>
     <description>Mapreduce PID Dir Prefix</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapred_user</name>
@@ -42,13 +42,13 @@
       <type>user</type>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>jobhistory_heapsize</name>
     <value>900</value>
     <description>Value for JobHistoryServer heap_size variable in hadoop-env.sh</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- mapred-env.sh -->
   <property>
@@ -71,6 +71,6 @@ export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA
     <value-attributes>
       <type>content</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration-mapred/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration-mapred/mapred-site.xml
index 10f70e6..0cbe36f 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration-mapred/mapred-site.xml
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration-mapred/mapred-site.xml
@@ -26,7 +26,7 @@
       The total amount of buffer memory to use while sorting files, in megabytes.
       By default, gives each merge stream 1MB, which should minimize seeks.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.map.sort.spill.percent</name>
@@ -38,7 +38,7 @@
       is already in progress, so spills may be larger than this threshold when
       it is set to less than .5
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.task.io.sort.factor</name>
@@ -47,7 +47,7 @@
       The number of streams to merge at once while sorting files.
       This determines the number of open file handles.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- map/reduce properties -->
   <property>
@@ -56,7 +56,7 @@
     <description>
       Administrators for MapReduce applications.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.reduce.shuffle.parallelcopies</name>
@@ -65,7 +65,7 @@
       The default number of parallel transfers run by reduce during
       the copy(shuffle) phase.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.map.speculative</name>
@@ -74,7 +74,7 @@
       If true, then multiple instances of some map tasks
       may be executed in parallel.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.reduce.speculative</name>
@@ -83,7 +83,7 @@
       If true, then multiple instances of some reduce tasks may be
       executed in parallel.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.job.reduce.slowstart.completedmaps</name>
@@ -92,7 +92,7 @@
       Fraction of the number of maps in the job which should be complete before
       reduces are scheduled for the job.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.reduce.shuffle.merge.percent</name>
@@ -103,7 +103,7 @@
       storing in-memory map outputs, as defined by
       mapreduce.reduce.shuffle.input.buffer.percent.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
@@ -112,7 +112,7 @@
       The percentage of memory to be allocated from the maximum heap
       size to storing map outputs during the shuffle.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.output.fileoutputformat.compress.type</name>
@@ -121,7 +121,7 @@
       If the job outputs are to compressed as SequenceFiles, how should
       they be compressed? Should be one of NONE, RECORD or BLOCK.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.reduce.input.buffer.percent</name>
@@ -132,7 +132,7 @@
       remaining map outputs in memory must consume less than this threshold before
       the reduce can begin.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- copied from kryptonite configuration -->
   <property>
@@ -141,7 +141,7 @@
     <description>
       Should the outputs of the maps be compressed before being sent across the network. Uses SequenceFile compression.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.task.timeout</name>
@@ -151,19 +151,19 @@
       terminated if it neither reads an input, writes an output, nor
       updates its status string.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.map.memory.mb</name>
     <value>1024</value>
     <description>Virtual memory for single Map task</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.reduce.memory.mb</name>
     <value>1024</value>
     <description>Virtual memory for single Reduce task</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.shuffle.port</name>
@@ -173,7 +173,7 @@
       ShuffleHandler is a service run at the NodeManager to facilitate
       transfers of intermediate Map outputs to requesting Reducers.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.jobhistory.intermediate-done-dir</name>
@@ -181,7 +181,7 @@
     <description>
       Directory where history files are written by MapReduce jobs.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.jobhistory.done-dir</name>
@@ -189,19 +189,19 @@
     <description>
       Directory where history files are managed by the MR JobHistory Server.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.jobhistory.address</name>
     <value>localhost:10020</value>
     <description>Enter your JobHistoryServer hostname.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.jobhistory.webapp.address</name>
     <value>localhost:19888</value>
     <description>Enter your JobHistoryServer hostname.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.framework.name</name>
@@ -210,7 +210,7 @@
       The runtime framework for executing MapReduce jobs. Can be one of local,
       classic or yarn.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.app.mapreduce.am.staging-dir</name>
@@ -218,13 +218,13 @@
     <description>
       The staging dir used while submitting jobs.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.app.mapreduce.am.resource.mb</name>
     <value>512</value>
     <description>The amount of memory the MR AppMaster needs.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.app.mapreduce.am.command-opts</name>
@@ -242,7 +242,7 @@
       of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
       mapreduce.reduce.env config settings.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.app.mapreduce.am.admin-command-opts</name>
@@ -257,25 +257,25 @@
       of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
       mapreduce.reduce.env config settings.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.app.mapreduce.am.log.level</name>
     <value>INFO</value>
     <description>MR App Master process log level.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.admin.map.child.java.opts</name>
     <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
     <description>This property stores Java options for map tasks.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.admin.reduce.child.java.opts</name>
     <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
     <description>This property stores Java options for reduce tasks.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.application.classpath</name>
@@ -284,7 +284,7 @@
       CLASSPATH for MR applications. A comma-separated list of CLASSPATH
       entries.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.am.max-attempts</name>
@@ -295,7 +295,7 @@
       set by resourcemanager. Otherwise, it will be override. The default number is
       set to 2, to allow at least one retry for AM.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.map.java.opts</name>
@@ -303,7 +303,7 @@
     <description>
       Larger heap-size for child jvms of maps.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.reduce.java.opts</name>
@@ -311,7 +311,7 @@
     <description>
       Larger heap-size for child jvms of reduces.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.map.log.level</name>
@@ -320,7 +320,7 @@
       The logging level for the map task. The allowed levels are:
       OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.reduce.log.level</name>
@@ -329,7 +329,7 @@
       The logging level for the reduce task. The allowed levels are:
       OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.admin.user.env</name>
@@ -339,7 +339,7 @@
       This is not an additive property. You must preserve the original value if
       you want your map and reduce tasks to have access to native libraries (compression, etc)
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>mapreduce.output.fileoutputformat.compress</name>
@@ -347,6 +347,6 @@
     <description>
       Should the job outputs be compressed?
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/capacity-scheduler.xml
index b117816..3ef5968 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/capacity-scheduler.xml
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/capacity-scheduler.xml
@@ -22,7 +22,7 @@
     <description>
       Maximum number of applications that can be pending and running.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
@@ -32,7 +32,7 @@
       application masters i.e. controls number of concurrent running
       applications.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.queues</name>
@@ -40,7 +40,7 @@
     <description>
       The queues at the this level (root is the root queue).
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.capacity</name>
@@ -51,13 +51,13 @@
       The child queues capacity should add up to their parent queue's capacity
       or less.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.default.capacity</name>
     <value>100</value>
     <description>Default queue target capacity.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
@@ -65,7 +65,7 @@
     <description>
       Default queue user limit a percentage from 0.0 to 1.0.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
@@ -73,7 +73,7 @@
     <description>
       The maximum capacity of the default queue. 
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.default.state</name>
@@ -81,7 +81,7 @@
     <description>
       The state of the default queue. State can be one of RUNNING or STOPPED.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
@@ -89,7 +89,7 @@
     <description>
       The ACL of who can submit jobs to the default queue.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.default.acl_administer_jobs</name>
@@ -97,7 +97,7 @@
     <description>
       The ACL of who can administer jobs on the default queue.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.root.acl_administer_queue</name>
@@ -106,7 +106,7 @@
       The ACL for who can administer this queue i.e. change sub-queue 
       allocations.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.node-locality-delay</name>
@@ -117,7 +117,7 @@
       Typically this should be set to number of nodes in the cluster, By default is setting
       approximately number of nodes in one rack which is 40.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.capacity.default.minimum-user-limit-percent</name>
@@ -125,6 +125,6 @@
     <description>
       Default minimum queue resource limit depends on the number of users who have submitted applications.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-env.xml b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-env.xml
index 4bebc19..ca747d0 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-env.xml
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-env.xml
@@ -24,13 +24,13 @@
     <name>yarn_log_dir_prefix</name>
     <value>/var/log/hadoop-yarn</value>
     <description>YARN Log Dir Prefix</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn_pid_dir_prefix</name>
     <value>/var/run/hadoop-yarn</value>
     <description>YARN PID Dir Prefix</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn_user</name>
@@ -42,37 +42,37 @@
       <type>user</type>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn_heapsize</name>
     <value>1024</value>
     <description>Max heapsize for all YARN components using a numerical value in the scale of MB</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>resourcemanager_heapsize</name>
     <value>1024</value>
     <description>Max heapsize for ResourceManager using a numerical value in the scale of MB</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>nodemanager_heapsize</name>
     <value>1024</value>
     <description>Max heapsize for NodeManager using a numerical value in the scale of MB</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>min_user_id</name>
     <value>1000</value>
     <description>Set to 0 to disallow root from submitting jobs. Set to 1000 to disallow all superusers from submitting jobs</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>apptimelineserver_heapsize</name>
     <value>1024</value>
     <description>Max heapsize for AppTimelineServer using a numerical value in the scale of MB</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- yarn-env.sh -->
   <property>
@@ -193,6 +193,6 @@ YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
     <value-attributes>
       <type>content</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-log4j.xml b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-log4j.xml
index 7299f49..7d0a0af 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-log4j.xml
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-log4j.xml
@@ -67,6 +67,6 @@ log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$Appl
     <value-attributes>
       <type>content</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml
index cefa82a..452bdd8 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml
@@ -23,19 +23,19 @@
     <name>yarn.resourcemanager.hostname</name>
     <value>localhost</value>
     <description>The hostname of the RM.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.resource-tracker.address</name>
     <value>localhost:8025</value>
     <description> The address of ResourceManager. </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.scheduler.address</name>
     <value>localhost:8030</value>
     <description>The address of the scheduler interface.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.address</name>
@@ -44,19 +44,19 @@
       The address of the applications manager interface in the
       RM.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.admin.address</name>
     <value>localhost:8141</value>
     <description>The address of the RM admin interface.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.scheduler.class</name>
     <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
     <description>The class to use as the resource scheduler.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.minimum-allocation-mb</name>
@@ -66,7 +66,7 @@
       in MBs. Memory requests lower than this won't take effect,
       and the specified value will get allocated at minimum.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.scheduler.maximum-allocation-mb</name>
@@ -76,39 +76,39 @@
       in MBs. Memory requests higher than this won't take effect,
       and will get capped to this value.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.acl.enable</name>
     <value>false</value>
     <description> Are acls enabled. </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.admin.acl</name>
     <value/>
     <description> ACL of who can be admin of the YARN cluster. </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- NodeManager -->
   <property>
     <name>yarn.nodemanager.address</name>
     <value>0.0.0.0:45454</value>
     <description>The address of the container manager in the NM.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.resource.memory-mb</name>
     <value>5120</value>
     <description>Amount of physical memory, in MB, that can be allocated
       for containers.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.application.classpath</name>
     <value>/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*</value>
     <description>Classpath for typical applications.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.vmem-pmem-ratio</name>
@@ -118,32 +118,32 @@
       expressed in terms of physical memory, and virtual memory usage
       is allowed to exceed this allocation by this ratio.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.container-executor.class</name>
     <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
     <description>ContainerExecutor for launching containers</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.group</name>
     <value>hadoop</value>
     <description>Unix group of the NodeManager</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.aux-services</name>
     <value>mapreduce_shuffle</value>
     <description>Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and can
       not start with numbers</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
     <value>org.apache.hadoop.mapred.ShuffleHandler</value>
     <description>The auxiliary service class to use </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.log-dirs</name>
@@ -155,7 +155,7 @@
       named container_{$contid}. Each container directory will contain the files
       stderr, stdin, and syslog generated by that container.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.local-dirs</name>
@@ -167,7 +167,7 @@
       Individual containers' work directories, called container_${contid}, will
       be subdirectories of this.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.container-monitor.interval-ms</name>
@@ -176,7 +176,7 @@
       The interval, in milliseconds, for which the node manager
       waits  between two cycles of monitoring its containers' memory usage.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!--
   <property>
@@ -189,13 +189,13 @@
     <name>yarn.nodemanager.health-checker.interval-ms</name>
     <value>135000</value>
     <description>Frequency of running node health script.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.health-checker.script.timeout-ms</name>
     <value>60000</value>
     <description>Script time out period.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.log.retain-second</name>
@@ -204,19 +204,19 @@
       Time in seconds to retain user logs. Only applicable if
       log aggregation is disabled.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.log-aggregation-enable</name>
     <value>true</value>
     <description>Whether to enable log aggregation. </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.remote-app-log-dir</name>
     <value>/app-logs</value>
     <description>Location to aggregate logs to. </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.remote-app-log-dir-suffix</name>
@@ -225,7 +225,7 @@
       The remote log dir will be created at
       {yarn.nodemanager.remote-app-log-dir}/${user}/{thisParam}.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.log-aggregation.compression-type</name>
@@ -233,7 +233,7 @@
     <description>
       T-file compression types used to compress aggregated logs.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.delete.debug-delay-sec</name>
@@ -253,7 +253,7 @@
       of the Yarn applications' log directories is configurable with the
       yarn.nodemanager.log-dirs property (see also below).
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.log-aggregation.retain-seconds</name>
@@ -262,7 +262,7 @@
       How long to keep aggregation logs before deleting them. -1 disables.
       Be careful set this too small and you will spam the name node.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.admin-env</name>
@@ -271,7 +271,7 @@
       Environment variables that should be forwarded from the NodeManager's
       environment to the container's.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.disk-health-checker.min-healthy-disks</name>
@@ -283,7 +283,7 @@
       If there are less number of healthy local-dirs (or log-dirs) available,
       then new containers will not be launched on this node.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.am.max-attempts</name>
@@ -296,7 +296,7 @@
       the resourcemanager will override it. The default number is set to 2, to
       allow at least one retry for AM.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.webapp.address</name>
@@ -304,7 +304,7 @@
     <description>
       The address of the RM web application.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.nodemanager.vmem-check-enabled</name>
@@ -312,7 +312,7 @@
     <description>
       Whether virtual memory limits will be enforced for containers.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.log.server.url</name>
@@ -320,7 +320,7 @@
     <description>
       URI for the HistoryServer's log resource
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.nodes.exclude-path</name>
@@ -331,7 +331,7 @@
       file must be specified.  If the value is empty, no hosts are
       excluded.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.enabled</name>
@@ -342,7 +342,7 @@
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.store-class</name>
@@ -350,7 +350,7 @@
     <description>
       Store class name for timeline store
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.generic-application-history.store-class</name>
@@ -358,7 +358,7 @@
     <description>
       Store class name for history store, defaulting to file system store
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.leveldb-timeline-store.path</name>
@@ -369,7 +369,7 @@
     <value-attributes>
       <type>directory</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.webapp.address</name>
@@ -377,7 +377,7 @@
     <description>
       The http address of the timeline service web application.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.webapp.https.address</name>
@@ -385,7 +385,7 @@
     <description>
       The http address of the timeline service web application.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.timeline-service.address</name>
@@ -394,7 +394,7 @@
       This is default address for the timeline server to start
       the RPC server.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <description>Enable age off of timeline store data.</description>
@@ -403,7 +403,7 @@
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <description>Time to live for timeline store data in milliseconds.</description>
@@ -412,7 +412,7 @@
     <value-attributes>
       <type>int</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <description>Length of time to wait between deletion cycles of leveldb timeline store in milliseconds.</description>
@@ -421,6 +421,6 @@
     <value-attributes>
       <type>int</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/ZOOKEEPER/configuration/zookeeper-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/ZOOKEEPER/configuration/zookeeper-env.xml b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/ZOOKEEPER/configuration/zookeeper-env.xml
index 5c074a8..d5f6b1b 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/ZOOKEEPER/configuration/zookeeper-env.xml
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/ZOOKEEPER/configuration/zookeeper-env.xml
@@ -30,49 +30,49 @@
       <type>user</type>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>zk_data_dir</name>
     <value>/hadoop/zookeeper</value>
     <description>Data directory for ZooKeeper.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>zk_log_dir</name>
     <value>/var/log/zookeeper</value>
     <description>ZooKeeper Log Dir</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>zk_pid_dir</name>
     <value>/var/run/zookeeper</value>
     <description>ZooKeeper Pid Dir</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>tickTime</name>
     <value>2000</value>
     <description>The length of a single tick in milliseconds, which is the basic time unit used by ZooKeeper</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>initLimit</name>
     <value>10</value>
     <description>Ticks to allow for sync at Init.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>syncLimit</name>
     <value>5</value>
     <description>Ticks to allow for sync at Runtime.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>clientPort</name>
     <value>2181</value>
     <description>Port for running ZK Server.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- zookeeper-env.sh -->
   <property>
@@ -95,6 +95,6 @@ export CLIENT_JVMFLAGS="$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_
     <value-attributes>
       <type>content</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/ZOOKEEPER/configuration/zookeeper-log4j.xml b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
index c33124f..67ce39c 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
@@ -97,6 +97,6 @@ log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%
     <value-attributes>
       <type>content</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
index 31eca9c..594c14a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
@@ -22,28 +22,28 @@
   <property>
     <name>fs.AbstractFileSystem.glusterfs.impl</name>
     <value>org.apache.hadoop.fs.local.GlusterFs</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>fs.glusterfs.impl</name>
     <value>org.apache.hadoop.fs.glusterfs.GlusterFileSystem</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>fs.glusterfs.volumes</name>
     <description>The name of the gluster volume(s) you would like Hadoop to use.  Values should be seperated by commas i.e. gv0, gv1</description>
     <value>gv0</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>fs.glusterfs.volume.fuse.gv0</name>
     <description>The mount point that corresponds to the fs.glusterfs.volumes value</description>
     <value>/mnt/gv0</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>fs.defaultFS</name>
     <value>glusterfs:///localhost:8020</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml
index 96b4f3f..5c1c441 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml
@@ -24,43 +24,43 @@
     <name>hadoop_pid_dir_prefix</name>
     <value>/var/run/hadoop</value>
     <description>Hadoop PID Dir Prefix</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hadoop_heapsize</name>
     <value>1024</value>
     <description>Hadoop maximum Java heap size</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>glusterfs_user</name>
     <value>root</value>
     <description/>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hdfs_log_dir_prefix</name>
     <value>/var/log/hadoop</value>
     <description>Hadoop Log Dir Prefix</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>namenode_heapsize</name>
     <value>1024</value>
     <description>NameNode Java heap size</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>namenode_host</name>
     <value/>
     <description>NameNode Host.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>snamenode_host</name>
     <value/>
     <description>Secondary NameNode.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>proxyuser_group</name>
@@ -70,14 +70,14 @@
     <value-attributes>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hdfs_user</name>
     <display-name>HDFS User</display-name>
     <value>hdfs</value>
     <description>User to run HDFS as</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!--
   <property>
@@ -206,6 +206,6 @@ export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-a
     <value-attributes>
       <type>content</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HBASE/configuration/hbase-site.xml
index d875089..962a800 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HBASE/configuration/hbase-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HBASE/configuration/hbase-site.xml
@@ -32,7 +32,7 @@
     into /tmp.  Change this configuration else all data will be lost
     on machine restart.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.cluster.distributed</name>
@@ -42,13 +42,13 @@
       false, startup will run all HBase and ZooKeeper daemons together
       in the one JVM.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.master.port</name>
     <value>60000</value>
     <description>The port the HBase Master should bind to.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.tmp.dir</name>
@@ -61,33 +61,33 @@
     <value-attributes>
       <type>directory</type>
     </value-attributes>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.local.dir</name>
     <value>${hbase.tmp.dir}/local</value>
     <description>Directory on the local filesystem to be used as a local storage
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.master.info.bindAddress</name>
     <value>0.0.0.0</value>
     <description>The bind address for the HBase Master web UI
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.master.info.port</name>
     <value>60010</value>
     <description>The port for the HBase Master web UI.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.regionserver.info.port</name>
     <value>60030</value>
     <description>The port for the HBase RegionServer web UI.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.regionserver.global.memstore.upperLimit</name>
@@ -95,7 +95,7 @@
     <description>Maximum size of all memstores in a region server before new
       updates are blocked and flushes are forced. Defaults to 40% of heap
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.regionserver.handler.count</name>
@@ -104,7 +104,7 @@
     Same property is used by the Master for count of master handlers.
     Default is 10.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.hregion.majorcompaction</name>
@@ -113,7 +113,7 @@
     HStoreFiles in a region.  Default: 1 day.
     Set to 0 to disable automated major compactions.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.regionserver.global.memstore.lowerLimit</name>
@@ -124,7 +124,7 @@
       the minimum possible flushing to occur when updates are blocked due to
       memstore limiting.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.hregion.memstore.block.multiplier</name>
@@ -136,7 +136,7 @@
     resultant flush files take a long time to compact or split, or
     worse, we OOME
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.hregion.memstore.flush.size</name>
@@ -146,7 +146,7 @@
     exceeds this number of bytes.  Value is checked by a thread that runs
     every hbase.server.thread.wakefrequency.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.hregion.memstore.mslab.enabled</name>
@@ -157,7 +157,7 @@
       heavy write loads. This can reduce the frequency of stop-the-world
       GC pauses on large heaps.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.hregion.max.filesize</name>
@@ -167,7 +167,7 @@
     grown to exceed this value, the hosting HRegion is split in two.
     Default: 1G.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.client.scanner.caching</name>
@@ -179,7 +179,7 @@
     Do not set this value such that the time between invocations is greater
     than the scanner timeout; i.e. hbase.regionserver.lease.period
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>zookeeper.session.timeout</name>
@@ -191,7 +191,7 @@
       "The client sends a requested timeout, the server responds with the
       timeout that it can give the client. " In milliseconds.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.client.keyvalue.maxsize</name>
@@ -203,7 +203,7 @@
     to set this to a fraction of the maximum region size. Setting it to zero
     or less disables the check.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.hstore.compactionThreshold</name>
@@ -214,7 +214,7 @@
     is run to rewrite all HStoreFiles files as one.  Larger numbers
     put off compaction but when it runs, it takes longer to complete.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.hstore.flush.retries.number</name>
@@ -222,7 +222,7 @@
     <description>
     The number of times the region flush operation will be retried.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.hstore.blockingStoreFiles</name>
@@ -233,7 +233,7 @@
     blocked for this HRegion until a compaction is completed, or
     until hbase.hstore.blockingWaitTime has been exceeded.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hfile.block.cache.size</name>
@@ -243,7 +243,7 @@
         used by HFile/StoreFile. Default of 0.25 means allocate 25%.
         Set to 0 to disable but it's not recommended.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- The following properties configure authentication information for
        HBase processes when using Kerberos security.  There are no default
@@ -254,7 +254,7 @@
     <description>Full path to the kerberos keytab file to use for logging in
     the configured HMaster server principal.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.master.kerberos.principal</name>
@@ -265,7 +265,7 @@
     portion, it will be replaced with the actual hostname of the running
     instance.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.regionserver.keytab.file</name>
@@ -273,7 +273,7 @@
     <description>Full path to the kerberos keytab file to use for logging in
     the configured HRegionServer server principal.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.regionserver.kerberos.principal</name>
@@ -285,7 +285,7 @@
     running instance.  An entry for this principal must exist in the file
     specified in hbase.regionserver.keytab.file
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- Additional configuration specific to HBase security -->
   <property>
@@ -295,7 +295,7 @@
     full privileges, regardless of stored ACLs, across the cluster.
     Only used when HBase security is enabled.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.security.authentication</name>
@@ -303,14 +303,14 @@
     <description>  Controls whether or not secure authentication is enabled for HBase. Possible values are 'simple'
       (no authentication), and 'kerberos'.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.security.authorization</name>
     <value>false</value>
     <description>Enables HBase authorization. Set the value of this property to false to disable HBase authorization.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.coprocessor.region.classes</name>
@@ -321,7 +321,7 @@
     it in HBase's classpath and add the fully qualified class name here.
     A coprocessor can also be loaded on demand by setting HTableDescriptor.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.coprocessor.master.classes</name>
@@ -333,7 +333,7 @@
       implementing your own MasterObserver, just put it in HBase's classpath
       and add the fully qualified class name here.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.zookeeper.property.clientPort</name>
@@ -341,7 +341,7 @@
     <description>Property from ZooKeeper's config zoo.cfg.
     The port at which the clients will connect.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!--
   The following three properties are used together to create the list of
@@ -357,7 +357,7 @@
     list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
     this is the list of servers which we will start/stop ZooKeeper on.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
   <property>
@@ -370,7 +370,7 @@
     and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
     not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>zookeeper.znode.parent</name>
@@ -380,18 +380,18 @@
       By default, all of HBase's ZooKeeper file path are configured with a
       relative path, so they will all go under this directory unless changed.
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hbase.defaults.for.version.skip</name>
     <value>true</value>
     <description>Disables version verification.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.domain.socket.path</name>
     <value>/var/lib/hadoop-hdfs/dn_socket</value>
     <description>Path to domain socket.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/configuration/core-site.xml
index aed5222..7018010 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/configuration/core-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/configuration/core-site.xml
@@ -26,19 +26,19 @@
   The size of this buffer should probably be a multiple of hardware
   page size (4096 on Intel x86), and it determines how much data is
   buffered during read and write operations.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>io.serializations</name>
     <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>io.compression.codecs</name>
     <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec</value>
     <description>A list of the compression codec classes that can be used
                  for compression/decompression.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- file system properties -->
   <property>
@@ -48,7 +48,7 @@
     <description>The name of the default file system.  Either the
   literal string "local" or a host:port for HDFS.</description>
     <final>true</final>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>fs.trash.interval</name>
@@ -56,7 +56,7 @@
     <description>Number of minutes between trash checkpoints.
   If zero, the trash feature is disabled.
   </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- ipc properties: copied from kryptonite configuration -->
   <property>
@@ -65,7 +65,7 @@
     <description>Defines the threshold number of connections after which
                connections will be inspected for idleness.
   </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ipc.client.connection.maxidletime</name>
@@ -73,13 +73,13 @@
     <description>The maximum time after which a client will bring down the
                connection to the server.
   </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ipc.client.connect.max.retries</name>
     <value>50</value>
     <description>Defines the maximum number of retries for IPC connections.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <!-- Web Interface Configuration -->
   <property>
@@ -90,7 +90,7 @@
                 not be exposed to public. Enable this option if the interfaces
                 are only reachable by those who have the right authorization.
   </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hadoop.security.authentication</name>
@@ -99,7 +99,7 @@
    Set the authentication for the cluster. Valid values are: simple or
    kerberos.
    </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hadoop.security.authorization</name>
@@ -107,7 +107,7 @@
     <description>
      Enable authorization for different protocols.
   </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hadoop.security.auth_to_local</name>
@@ -157,6 +157,6 @@ If you want to treat all principals from APACHE.ORG with /admin as "admin", your
 RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
 DEFAULT
     </description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/configuration/global.xml
index 424eabb..9649cd0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/configuration/global.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/configuration/global.xml
@@ -24,204 +24,204 @@
     <name>namenode_host</name>
     <value/>
     <description>NameNode Host.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs_namenode_name_dir</name>
     <value>/hadoop/hdfs/namenode</value>
     <description>NameNode Directories.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>snamenode_host</name>
     <value/>
     <description>Secondary NameNode.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs_namenode_checkpoint_dir</name>
     <value>/hadoop/hdfs/namesecondary</value>
     <description>Secondary NameNode checkpoint dir.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>datanode_hosts</name>
     <value/>
     <description>List of Datanode Hosts.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs_datanode_data_dir</name>
     <value>/hadoop/hdfs/data</value>
     <description>Data directories for Data Nodes.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hdfs_log_dir_prefix</name>
     <value>/var/log/hadoop</value>
     <description>Hadoop Log Dir Prefix</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hadoop_pid_dir_prefix</name>
     <value>/var/run/hadoop</value>
     <description>Hadoop PID Dir Prefix</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs_webhdfs_enabled</name>
     <value>true</value>
     <description>WebHDFS enabled</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hadoop_heapsize</name>
     <value>1024</value>
     <description>Hadoop maximum Java heap size</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>namenode_heapsize</name>
     <value>1024</value>
     <description>NameNode Java heap size</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>namenode_opt_newsize</name>
     <value>200</value>
     <description>NameNode new generation size</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>namenode_opt_maxnewsize</name>
     <value>200</value>
     <description>NameNode maximum new generation size</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>namenode_opt_permsize</name>
     <value>128</value>
     <description>NameNode permanent generation size</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>namenode_opt_maxpermsize</name>
     <value>256</value>
     <description>NameNode maximum permanent generation size</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>datanode_du_reserved</name>
     <value>1073741824</value>
     <description>Reserved space for HDFS</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dtnode_heapsize</name>
     <value>1024</value>
     <description>DataNode maximum Java heap size</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs_datanode_failed_volume_tolerated</name>
     <value>0</value>
     <description>DataNode volumes failure toleration</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs_namenode_checkpoint_period</name>
     <value>21600</value>
     <description>HDFS Maximum Checkpoint Delay</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>fs_checkpoint_size</name>
     <value>0.5</value>
     <description>FS Checkpoint Size.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>proxyuser_group</name>
     <value>users</value>
     <description>Proxy user group.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs_exclude</name>
     <value/>
     <description>HDFS Exclude hosts.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs_replication</name>
     <value>3</value>
     <description>Default Block Replication.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs_block_local_path_access_user</name>
     <value>hbase</value>
     <description>Default Block Replication.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs_datanode_address</name>
     <value>50010</value>
     <description>Port for datanode address.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs_datanode_http_address</name>
     <value>50075</value>
     <description>Port for datanode address.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs_datanode_data_dir_perm</name>
     <value>750</value>
     <description>Datanode dir perms.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>security_enabled</name>
     <value>false</value>
     <description>Hadoop Security</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>kerberos_domain</name>
     <value>EXAMPLE.COM</value>
     <description>Kerberos realm.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>kadmin_pw</name>
     <value/>
     <description>Kerberos realm admin password</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>keytab_path</name>
     <value>/etc/security/keytabs</value>
     <description>Kerberos keytab path.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>keytab_path</name>
     <value>/etc/security/keytabs</value>
     <description>KeyTab Directory.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>namenode_formatted_mark_dir</name>
     <value>/var/run/hadoop/hdfs/namenode/formatted/</value>
     <description>Formatteed Mark Directory.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hdfs_user</name>
     <value>hdfs</value>
     <description>User and Groups.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa1b536d/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/configuration/hadoop-policy.xml
index 9f53c8f..8e9486d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/configuration/hadoop-policy.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/configuration/hadoop-policy.xml
@@ -26,7 +26,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>security.client.datanode.protocol.acl</name>
@@ -36,7 +36,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>security.datanode.protocol.acl</name>
@@ -46,7 +46,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>security.inter.datanode.protocol.acl</name>
@@ -56,7 +56,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>security.namenode.protocol.acl</name>
@@ -66,7 +66,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>security.inter.tracker.protocol.acl</name>
@@ -76,7 +76,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>security.job.client.protocol.acl</name>
@@ -86,7 +86,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>security.job.task.protocol.acl</name>
@@ -96,7 +96,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>security.admin.operations.protocol.acl</name>
@@ -105,7 +105,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>security.refresh.usertogroups.mappings.protocol.acl</name>
@@ -115,7 +115,7 @@
     group names. The user and group list is separated by a blank. For
     e.g. "alice,bob users,wheel".  A special value of "*" means all
     users are allowed.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>security.refresh.policy.protocol.acl</name>
@@ -125,6 +125,6 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true" update="false" delete="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>