You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2017/07/11 16:26:12 UTC

[39/50] [abbrv] ambari git commit: AMBARI-21431. Update BigInsight configuration files to be compliant with XSD

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d6fcfa1/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml
index 0ca6807..dd45141 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml
@@ -58,6 +58,7 @@ index.search.solr.mode=cloud
 index.search.solr.zookeeper-url={{solr_server_host}}/solr
 index.search.solr.configset=titan
     </value>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <show-property-name>false</show-property-name>
     </value-attributes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d6fcfa1/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml
index 3363d81..f61a479 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml
@@ -57,6 +57,7 @@
       # log4j level means it will be at WARN by default, which is ideal.
       log4j.logger.org.apache.hadoop.mapred.LocalDistributedCacheManager=${gremlin.log4j.level}
     </value>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <show-property-name>false</show-property-name>
     </value-attributes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d6fcfa1/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration-mapred/mapred-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration-mapred/mapred-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration-mapred/mapred-env.xml
index 9a9d6ee..1536ff6 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration-mapred/mapred-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration-mapred/mapred-env.xml
@@ -26,6 +26,7 @@
     <display-name>Mapreduce Log Dir Prefix</display-name>
     <value>/var/log/hadoop-mapreduce</value>
     <description>Mapreduce Log Dir Prefix</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>directory</type>
       <overridable>false</overridable>
@@ -37,6 +38,7 @@
     <display-name>Mapreduce PID Dir Prefix</display-name>
     <value>/var/run/hadoop-mapreduce</value>
     <description>Mapreduce PID Dir Prefix</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>directory</type>
       <overridable>false</overridable>
@@ -46,6 +48,7 @@
   <property>
     <name>mapred_user</name>
     <value>mapred</value>
+    <on-ambari-upgrade add="true"/>
     <property-type>USER</property-type>
     <description>Mapreduce User</description>
   </property>
@@ -54,6 +57,7 @@
     <display-name>History Server heap size</display-name>
     <value>900</value>
     <description>Value for JobHistoryServer heap_size variable in hadoop-env.sh</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <unit>MB</unit>
       <type>int</type>
@@ -64,12 +68,14 @@
     <name>mapred_user_nofile_limit</name>
     <value>32768</value>
     <description>Max open files limit setting for MAPREDUCE user.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>mapred_user_nproc_limit</name>
     <value>65536</value>
     <description>Max number of processes limit setting for MAPREDUCE user.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <!-- mapred-env.sh -->
@@ -92,5 +98,6 @@ export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA
 export HADOOP_OPTS="-Djava.io.tmpdir={{hadoop_java_io_tmpdir}} $HADOOP_OPTS"
 export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}
     </value>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d6fcfa1/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration-mapred/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration-mapred/mapred-site.xml
index 7d58e23..c11b459 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration-mapred/mapred-site.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration-mapred/mapred-site.xml
@@ -32,6 +32,7 @@
       By default, gives each merge stream 1MB, which should minimize seeks.
     </description>
     <display-name>Sort Allocation Memory</display-name>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
       <minimum>0</minimum>
@@ -57,6 +58,7 @@
       is already in progress, so spills may be larger than this threshold when
       it is set to less than .5
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -66,6 +68,7 @@
       The number of streams to merge at once while sorting files.
       This determines the number of open file handles.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
 <!-- map/reduce properties -->
@@ -75,6 +78,7 @@
     <description>
       Administrators for MapReduce applications.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -84,6 +88,7 @@
       The default number of parallel transfers run by reduce during
       the copy(shuffle) phase.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -93,6 +98,7 @@
       If true, then multiple instances of some map tasks
       may be executed in parallel.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -102,6 +108,7 @@
       If true, then multiple instances of some reduce tasks may be
       executed in parallel.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -111,6 +118,7 @@
       Fraction of the number of maps in the job which should be complete before
       reduces are scheduled for the job.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -119,6 +127,7 @@
     <description>
       Limit on the number of counters allowed per job.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -130,6 +139,7 @@
       storing in-memory map outputs, as defined by
       mapreduce.reduce.shuffle.input.buffer.percent.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -139,6 +149,7 @@
       The percentage of memory to be allocated from the maximum heap
       size to storing map outputs during the shuffle.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -148,6 +159,7 @@
       If the job outputs are to compressed as SequenceFiles, how should
       they be compressed? Should be one of NONE, RECORD or BLOCK.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -159,6 +171,7 @@
       remaining map outputs in memory must consume less than this threshold before
       the reduce can begin.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <!-- copied from kryptonite configuration -->
@@ -168,6 +181,7 @@
     <description>
       Should the outputs of the maps be compressed before being sent across the network. Uses SequenceFile compression.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -178,6 +192,7 @@
       terminated if it neither reads an input, writes an output, nor
       updates its status string.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -185,6 +200,7 @@
     <value>1024</value>
     <description>Virtual memory for single Map task</description>
     <display-name>Map Memory</display-name>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
       <minimum>512</minimum>
@@ -209,6 +225,7 @@
     <value>1024</value>
     <description>Virtual memory for single Reduce task</description>
     <display-name>Reduce Memory</display-name>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
       <minimum>512</minimum>
@@ -236,6 +253,7 @@
       ShuffleHandler is a service run at the NodeManager to facilitate
       transfers of intermediate Map outputs to requesting Reducers.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -244,6 +262,7 @@
     <description>
       Directory where history files are written by MapReduce jobs.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -252,23 +271,27 @@
     <description>
       Directory where history files are managed by the MR JobHistory Server.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
     <name>mapreduce.jobhistory.http.policy</name>
     <value>HTTP_ONLY</value>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>       
     <name>mapreduce.jobhistory.address</name>
     <value>localhost:10020</value>
     <description>Enter your JobHistoryServer hostname.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>       
     <name>mapreduce.jobhistory.webapp.address</name>
     <value>localhost:19888</value>
     <description>Enter your JobHistoryServer hostname.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -278,6 +301,7 @@
       The runtime framework for executing MapReduce jobs. Can be one of local,
       classic or yarn.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -286,6 +310,7 @@
     <description>
       The staging dir used while submitting jobs.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -293,6 +318,7 @@
     <value>512</value>
     <description>The amount of memory the MR AppMaster needs.</description>
     <display-name>AppMaster Memory</display-name>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
       <minimum>512</minimum>
@@ -329,6 +355,7 @@
       mapreduce.reduce.env config settings.
     </description>
     <display-name>MR AppMaster Java Heap Size</display-name>
+    <on-ambari-upgrade add="true"/>
     <depends-on>
       <property>
         <type>mapred-site</type>
@@ -351,6 +378,7 @@
       mapreduce.reduce.env config settings.
     </description>
     <display-name>MR AppMaster Java Heap Size</display-name>
+    <on-ambari-upgrade add="true"/>
     <depends-on>
       <property>
         <type>mapred-site</type>
@@ -363,18 +391,21 @@
     <name>yarn.app.mapreduce.am.log.level</name>
     <value>INFO</value>
     <description>MR App Master process log level.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>mapreduce.admin.map.child.java.opts</name>
     <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN -Diop.version=${iop.version}</value>
     <description>This property stores Java options for map tasks.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>mapreduce.admin.reduce.child.java.opts</name>
     <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN -Diop.version=${iop.version}</value>
     <description>This property stores Java options for reduce tasks.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -384,12 +415,14 @@
       CLASSPATH for MR applications. A comma-separated list of CLASSPATH
       entries.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>mapreduce.application.framework.path</name>
     <value>/iop/apps/${iop.version}/mapreduce/mapreduce.tar.gz#mr-framework</value>
     <description></description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
 
@@ -402,6 +435,7 @@
       set by resourcemanager. Otherwise, it will be override. The default number is
       set to 2, to allow at least one retry for AM.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -411,6 +445,7 @@
       Larger heap-size for child jvms of maps.
     </description>
     <display-name>MR Map Java Heap Size</display-name>
+    <on-ambari-upgrade add="true"/>
     <depends-on>
       <property>
         <type>mapred-site</type>
@@ -426,6 +461,7 @@
       Larger heap-size for child jvms of reduces.
     </description>
     <display-name>MR Reduce Java Heap Size</display-name>
+    <on-ambari-upgrade add="true"/>
     <depends-on>
       <property>
         <type>mapred-site</type>
@@ -441,6 +477,7 @@
       The logging level for the map task. The allowed levels are:
       OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -450,6 +487,7 @@
       The logging level for the reduce task. The allowed levels are:
       OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -460,6 +498,7 @@
       This is not an additive property. You must preserve the original value if
       you want your map and reduce tasks to have access to native libraries (compression, etc)
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -468,6 +507,7 @@
     <description>
       User added environment variables for the MR App Master processes.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -476,6 +516,7 @@
     <description>
       Should the job outputs be compressed?
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
@@ -485,6 +526,7 @@
       server state upon startup.  If enabled then
       mapreduce.jobhistory.recovery.store.class must be specified.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -493,6 +535,7 @@
     <description>The HistoryServerStateStoreService class to store history server
       state for recovery.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -501,36 +544,42 @@
     <description>The URI where history server state will be stored if HistoryServerLeveldbSystemStateStoreService
       is configured as the recovery storage class.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
     <name>mapreduce.reduce.shuffle.fetch.retry.enabled</name>
     <value>1</value>
     <description></description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>mapreduce.reduce.shuffle.fetch.retry.interval-ms</name>
     <value>1000</value>
     <description></description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>mapreduce.reduce.shuffle.fetch.retry.timeout-ms</name>
     <value>30000</value>
     <description></description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>mapreduce.job.emit-timeline-data</name>
     <value>false</value>
     <description></description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>mapreduce.jobhistory.bind-host</name>
     <value>0.0.0.0</value>
     <description></description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d6fcfa1/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/capacity-scheduler.xml
index d26c67f..0fa0e99 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/capacity-scheduler.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/capacity-scheduler.xml
@@ -23,6 +23,7 @@
     <description>
       Maximum number of applications that can be pending and running.
     </description>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <property>
@@ -33,6 +34,7 @@
       application masters i.e. controls number of concurrent running
       applications.
     </description>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <property>
@@ -41,6 +43,7 @@
     <description>
       The queues at the this level (root is the root queue).
     </description>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <property>
@@ -52,12 +55,14 @@
       The child queues capacity should add up to their parent queue's capacity
       or less.
     </description>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <property>
     <name>yarn.scheduler.capacity.root.default.capacity</name>
     <value>100</value>
     <description>Default queue target capacity.</description>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <property>
@@ -66,6 +71,7 @@
     <description>
       Default queue user limit a percentage from 0.0 to 1.0.
     </description>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <property>
@@ -74,6 +80,7 @@
     <description>
       The maximum capacity of the default queue. 
     </description>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <property>
@@ -82,6 +89,7 @@
     <description>
       The state of the default queue. State can be one of RUNNING or STOPPED.
     </description>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <property>
@@ -90,6 +98,7 @@
     <description>
       The ACL of who can submit jobs to the default queue.
     </description>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <property>
@@ -98,6 +107,7 @@
     <description>
       The ACL of who can administer jobs on the default queue.
     </description>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <property>
@@ -107,6 +117,7 @@
       The ACL for who can administer this queue i.e. change sub-queue 
       allocations.
     </description>
+    <on-ambari-upgrade add="false"/>
   </property>
   
   <property>
@@ -118,6 +129,7 @@
       Typically this should be set to number of nodes in the cluster, By default is setting
       approximately number of nodes in one rack which is 40.
     </description>
+    <on-ambari-upgrade add="false"/>
   </property>
   
   <property>
@@ -126,12 +138,14 @@
     <description>
       Default minimum queue resource limit depends on the number of users who have submitted applications.
     </description>
+    <on-ambari-upgrade add="false"/>
   </property>
   
   <property>
     <name>yarn.scheduler.capacity.resource-calculator</name>
     <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
     <display-name>CPU Scheduling</display-name>
+    <on-ambari-upgrade add="false"/>
     <value-attributes>
       <type>value-list</type>
       <entries>
@@ -152,6 +166,7 @@
     <name>yarn.scheduler.capacity.root.accessible-node-labels</name>
     <value>*</value>
     <description></description>
+    <on-ambari-upgrade add="false"/>
   </property>
 
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d6fcfa1/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/ranger-yarn-audit.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/ranger-yarn-audit.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/ranger-yarn-audit.xml
index d9de749..d005eb0 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/ranger-yarn-audit.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/ranger-yarn-audit.xml
@@ -24,6 +24,7 @@
     <name>xasecure.audit.is.enabled</name>
     <value>true</value>
     <description>Is Audit enabled?</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -31,6 +32,7 @@
     <value>false</value>
     <display-name>Audit to DB</display-name>
     <description>Is Audit to DB enabled?</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
@@ -46,17 +48,20 @@
     <name>xasecure.audit.destination.db.jdbc.url</name>
     <value>{{audit_jdbc_url}}</value>
     <description>Audit DB JDBC URL</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>xasecure.audit.destination.db.user</name>
     <value>{{xa_audit_db_user}}</value>
     <description>Audit DB JDBC User</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>xasecure.audit.destination.db.password</name>
     <value>crypted</value>
+    <on-ambari-upgrade add="true"/>
     <property-type>PASSWORD</property-type>
     <description>Audit DB JDBC Password</description>
   </property>
@@ -65,18 +70,21 @@
     <name>xasecure.audit.destination.db.jdbc.driver</name>
     <value>{{jdbc_driver}}</value>
     <description>Audit DB JDBC Driver</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>xasecure.audit.credential.provider.file</name>
     <value>jceks://file{{credential_file}}</value>
     <description>Credential file store</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>xasecure.audit.destination.db.batch.filespool.dir</name>
     <value>/var/log/hadoop/yarn/audit/db/spool</value>
     <description>/var/log/hadoop/yarn/audit/db/spool</description>
+    <on-ambari-upgrade add="true"/>
   </property>  
 
   <property>
@@ -84,6 +92,7 @@
     <value>true</value>
     <display-name>Audit to HDFS</display-name>
     <description>Is Audit to HDFS enabled?</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
@@ -99,6 +108,7 @@
     <name>xasecure.audit.destination.hdfs.dir</name>
     <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
     <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
+    <on-ambari-upgrade add="true"/>
     <depends-on>
       <property>
         <type>ranger-env</type>
@@ -111,6 +121,7 @@
     <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
     <value>/var/log/hadoop/yarn/audit/hdfs/spool</value>
     <description>/var/log/hadoop/yarn/audit/hdfs/spool</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
  <!-- Removing auditing to Solr   
@@ -119,6 +130,7 @@
     <value>false</value>
     <display-name>Audit to SOLR</display-name>
     <description>Is Solr audit enabled?</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
@@ -134,6 +146,7 @@
     <name>xasecure.audit.destination.solr.urls</name>
     <value></value>
     <description>Solr URL</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <empty-value-valid>true</empty-value-valid>
     </value-attributes>
@@ -149,6 +162,7 @@
     <name>xasecure.audit.destination.solr.zookeepers</name>
     <value>NONE</value>
     <description>Solr Zookeeper string</description>
+    <on-ambari-upgrade add="true"/>
     <depends-on>
       <property>
         <type>ranger-admin-site</type>
@@ -161,6 +175,7 @@
     <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
     <value>/var/log/hadoop/yarn/audit/solr/spool</value>
     <description>/var/log/hadoop/yarn/audit/solr/spool</description>
+    <on-ambari-upgrade add="true"/>
   </property>
   --> 
   
@@ -169,6 +184,7 @@
     <value>false</value>
     <display-name>Audit provider summary enabled</display-name>
     <description>Enable Summary audit?</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>boolean</type>
     </value-attributes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d6fcfa1/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/ranger-yarn-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/ranger-yarn-plugin-properties.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/ranger-yarn-plugin-properties.xml
index 0f220c4..9b5f869 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/ranger-yarn-plugin-properties.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/ranger-yarn-plugin-properties.xml
@@ -25,12 +25,14 @@
     <value>ambari-qa</value>
     <display-name>Policy user for YARN</display-name>
     <description>This user must be system user and also present at Ranger admin portal</description>
+    <on-ambari-upgrade add="true"/>
   </property> 
 
   <property>
     <name>hadoop.rpc.protection</name>
     <value></value>
     <description>Used for repository creation on ranger admin</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <empty-value-valid>true</empty-value-valid>
     </value-attributes>
@@ -40,6 +42,7 @@
     <name>common.name.for.certificate</name>
     <value></value>
     <description>Common name for certificate, this value should match what is specified in repo within ranger admin</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <empty-value-valid>true</empty-value-valid>
     </value-attributes>
@@ -50,6 +53,7 @@
     <value>No</value>
     <display-name>Enable Ranger for YARN</display-name>
     <description>Enable ranger yarn plugin ?</description>
+    <on-ambari-upgrade add="true"/>
     <depends-on>
       <property>
         <type>ranger-env</type>
@@ -67,12 +71,14 @@
     <value>yarn</value>
     <display-name>Ranger repository config user</display-name>
     <description>Used for repository creation on ranger admin</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>REPOSITORY_CONFIG_PASSWORD</name>
     <value>yarn</value>
     <display-name>Ranger repository config password</display-name>
+    <on-ambari-upgrade add="true"/>
     <property-type>PASSWORD</property-type>
     <description>Used for repository creation on ranger admin</description>
   </property> 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d6fcfa1/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml
index ad69775..2ce7063 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml
@@ -24,11 +24,13 @@
     <name>xasecure.policymgr.clientssl.keystore</name>
     <value>/usr/hdp/current/hadoop-client/conf/ranger-yarn-plugin-keystore.jks</value>
     <description>Java Keystore files</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>xasecure.policymgr.clientssl.keystore.password</name>
     <value>myKeyFilePassword</value>
+    <on-ambari-upgrade add="true"/>
     <property-type>PASSWORD</property-type>
     <description>password for keystore</description>
   </property>
@@ -37,11 +39,13 @@
     <name>xasecure.policymgr.clientssl.truststore</name>
     <value>/usr/hdp/current/hadoop-client/conf/ranger-yarn-plugin-truststore.jks</value>
     <description>java truststore file</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>xasecure.policymgr.clientssl.truststore.password</name>
     <value>changeit</value>
+    <on-ambari-upgrade add="true"/>
     <property-type>PASSWORD</property-type>
     <description>java truststore password</description>
   </property>
@@ -50,12 +54,14 @@
     <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
     <value>jceks://file{{credential_file}}</value>
     <description>java keystore credential file</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
     <value>jceks://file{{credential_file}}</value>
     <description>java truststore credential file</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d6fcfa1/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/ranger-yarn-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/ranger-yarn-security.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/ranger-yarn-security.xml
index c5eb0b5..54ee6fb 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/ranger-yarn-security.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/ranger-yarn-security.xml
@@ -24,36 +24,42 @@
     <name>ranger.plugin.yarn.service.name</name>
     <value>{{repo_name}}</value>
     <description>Name of the Ranger service containing policies for this Yarn instance</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>ranger.plugin.yarn.policy.source.impl</name>
     <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
     <description>Class to retrieve policies from the source</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>ranger.plugin.yarn.policy.rest.url</name>
     <value>{{policymgr_mgr_url}}</value>
     <description>URL to Ranger Admin</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>ranger.plugin.yarn.policy.rest.ssl.config.file</name>
     <value>/etc/hadoop/conf/ranger-policymgr-ssl-yarn.xml</value>
     <description>Path to the file containing SSL details to contact Ranger Admin</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>ranger.plugin.yarn.policy.pollIntervalMs</name>
     <value>30000</value>
     <description>How often to poll for changes in policies?</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>ranger.plugin.yarn.policy.cache.dir</name>
     <value>/etc/ranger/{{repo_name}}/policycache</value>
     <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d6fcfa1/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/yarn-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/yarn-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/yarn-env.xml
index af2f49b..55384cc 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/yarn-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/yarn-env.xml
@@ -26,6 +26,7 @@
     <display-name>YARN Log Dir Prefix</display-name>
     <value>/var/log/hadoop-yarn</value>
     <description>YARN Log Dir Prefix</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>directory</type>
       <overridable>false</overridable>
@@ -37,6 +38,7 @@
     <display-name>YARN PID Dir Prefix</display-name>
     <value>/var/run/hadoop-yarn</value>
     <description>YARN PID Dir Prefix</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>directory</type>
       <overridable>false</overridable>
@@ -46,6 +48,7 @@
   <property>
     <name>yarn_user</name>
     <value>yarn</value>
+    <on-ambari-upgrade add="true"/>
     <property-type>USER</property-type>
     <description>YARN User</description>
   </property>
@@ -54,6 +57,7 @@
     <display-name>YARN Java heap size</display-name>
     <value>1024</value>
     <description>Max heapsize for all YARN components using a numerical value in the scale of MB</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
       <unit>MB</unit>
@@ -64,6 +68,7 @@
     <display-name>ResourceManager Java heap size</display-name>
     <value>1024</value>
     <description>Max heapsize for ResourceManager using a numerical value in the scale of MB</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
       <overridable>false</overridable>
@@ -75,6 +80,7 @@
     <display-name>NodeManager Java heap size</display-name>
     <value>1024</value>
     <description>Max heapsize for NodeManager using a numerical value in the scale of MB</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
       <unit>MB</unit>
@@ -85,6 +91,7 @@
     <display-name>Minimum user ID for submitting job</display-name>
     <value>1000</value>
     <description>Set to 0 to disallow root from submitting jobs. Set to 1000 to disallow all superusers from submitting jobs</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
     </value-attributes>
@@ -94,6 +101,7 @@
     <display-name>AppTimelineServer Java heap size</display-name>
     <value>1024</value>
     <description>Max heapsize for AppTimelineServer using a numerical value in the scale of MB</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <overridable>false</overridable>
       <unit>MB</unit>
@@ -105,18 +113,21 @@
     <name>is_supported_yarn_ranger</name>
     <value>true</value>
     <description>Set to false by default,  needs to be set to true in stacks that use Ranger Yarn Plugin</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>yarn_user_nofile_limit</name>
     <value>32768</value>
     <description>Max open files limit setting for YARN user.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>yarn_user_nproc_limit</name>
     <value>65536</value>
     <description>Max number of processes limit setting for YARN user.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <!-- yarn-env.sh -->
@@ -245,6 +256,7 @@
       YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
       YARN_OPTS="$YARN_OPTS -Djava.io.tmpdir={{hadoop_java_io_tmpdir}}"
     </value>
+    <on-ambari-upgrade add="true"/>
   </property>
 
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d6fcfa1/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/yarn-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/yarn-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/yarn-log4j.xml
index bbcd524..1127dc2 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/yarn-log4j.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/yarn-log4j.xml
@@ -75,6 +75,7 @@ log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
 log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
 log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}
     </value>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <show-property-name>false</show-property-name>
     </value-attributes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d6fcfa1/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/yarn-site.xml
index 34799f4..0bab493 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/YARN/configuration/yarn-site.xml
@@ -27,18 +27,21 @@
     <name>yarn.resourcemanager.hostname</name>
     <value>localhost</value>
     <description>The hostname of the RM.</description>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <property>
     <name>yarn.resourcemanager.resource-tracker.address</name>
     <value>localhost:8025</value>
     <description> The address of ResourceManager. </description>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <property>
     <name>yarn.resourcemanager.scheduler.address</name>
     <value>localhost:8030</value>
     <description>The address of the scheduler interface.</description>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <property>
@@ -48,18 +51,21 @@
       The address of the applications manager interface in the
       RM.
     </description>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <property>
     <name>yarn.resourcemanager.admin.address</name>
     <value>localhost:8141</value>
     <description>The address of the RM admin interface.</description>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <property>
     <name>yarn.resourcemanager.scheduler.class</name>
     <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
     <description>The class to use as the resource scheduler.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -71,6 +77,7 @@
       and the specified value will get allocated at minimum.
     </description>
     <display-name>Minimum Container Size (Memory)</display-name>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
       <minimum>0</minimum>
@@ -95,6 +102,7 @@
       and will get capped to this value.
     </description>
     <display-name>Maximum Container Size (Memory)</display-name>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
       <minimum>0</minimum>
@@ -114,6 +122,7 @@
     <name>yarn.acl.enable</name>
     <value>false</value>
     <description> Are acls enabled. </description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
@@ -128,6 +137,7 @@
   <property>
     <name>yarn.authorization-provider</name>
     <description> Yarn authorization provider class. </description>
+    <on-ambari-upgrade add="true"/>
     <depends-on>
       <property>
         <type>ranger-yarn-plugin-properties</type>
@@ -140,6 +150,7 @@
     <name>yarn.admin.acl</name>
     <value>yarn</value>
     <description> ACL of who can be admin of the YARN cluster. </description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <empty-value-valid>true</empty-value-valid>
     </value-attributes>
@@ -151,6 +162,7 @@
     <name>yarn.nodemanager.address</name>
     <value>0.0.0.0:45454</value>
     <description>The address of the container manager in the NM.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -159,6 +171,7 @@
     <description>Amount of physical memory, in MB, that can be allocated
       for containers.</description>
     <display-name>Memory allocated for all YARN containers on a node</display-name>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
       <minimum>0</minimum>
@@ -172,6 +185,7 @@
     <name>yarn.application.classpath</name>
     <value>/etc/hadoop/conf,/usr/iop/current/hadoop-client/*,/usr/iop/current/hadoop-client/lib/*,/usr/iop/current/hadoop-hdfs-client/*,/usr/iop/current/hadoop-hdfs-client/lib/*,/usr/iop/current/hadoop-yarn-client/*,/usr/iop/current/hadoop-yarn-client/lib/*,/usr/lib/hadoop-lzo/lib/*</value>
     <description>Classpath for typical applications.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
@@ -180,6 +194,7 @@
     <description>
       Is the registry enabled: does the RM start it up, create the user and system paths, and purge service records when containers, application attempts and applications complete
     </description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
@@ -194,6 +209,7 @@
       is allowed to exceed this allocation by this ratio.
     </description>
     <display-name>Virtual Memory Ratio</display-name>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>float</type>
       <minimum>0.1</minimum>
@@ -206,6 +222,7 @@
     <name>yarn.nodemanager.container-executor.class</name>
     <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
     <description>ContainerExecutor for launching containers</description>
+    <on-ambari-upgrade add="true"/>
     <depends-on>
       <property>
         <type>yarn-env</type>
@@ -218,6 +235,7 @@
     <name>yarn.nodemanager.linux-container-executor.group</name>
     <value>hadoop</value>
     <description>Unix group of the NodeManager</description>
+    <on-ambari-upgrade add="true"/>
     <depends-on>
       <property>
         <type>yarn-env</type>
@@ -231,12 +249,14 @@
     <value>mapreduce_shuffle</value>
     <description>Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and can
       not start with numbers</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
     <value>org.apache.hadoop.mapred.ShuffleHandler</value>
     <description>The auxiliary service class to use </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -249,6 +269,7 @@
       named container_{$contid}. Each container directory will contain the files
       stderr, stdin, and syslog generated by that container.
     </description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>directories</type>
     </value-attributes>
@@ -264,6 +285,7 @@
       Individual containers' work directories, called container_${contid}, will
       be subdirectories of this.
     </description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>directories</type>
     </value-attributes>
@@ -276,6 +298,7 @@
       The interval, in milliseconds, for which the node manager
       waits  between two cycles of monitoring its containers' memory usage.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <!--
@@ -283,6 +306,7 @@
     <name>yarn.nodemanager.health-checker.script.path</name>
     <value>/etc/hadoop/conf/health_check_nodemanager</value>
     <description>The health check script to run.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
    -->
 
@@ -290,12 +314,14 @@
     <name>yarn.nodemanager.health-checker.interval-ms</name>
     <value>135000</value>
     <description>Frequency of running node health script.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>yarn.nodemanager.health-checker.script.timeout-ms</name>
     <value>60000</value>
     <description>Script time out period.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -305,6 +331,7 @@
       Time in seconds to retain user logs. Only applicable if
       log aggregation is disabled.
     </description>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <property>
@@ -312,6 +339,7 @@
     <value>true</value>
     <description>Whether to enable log aggregation. </description>
     <display-name>Enable Log Aggregation</display-name>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
@@ -321,6 +349,7 @@
     <name>yarn.nodemanager.remote-app-log-dir</name>
     <value>/app-logs</value>
     <description>Location to aggregate logs to. </description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>directory</type>
     </value-attributes>
@@ -333,6 +362,7 @@
       The remote log dir will be created at
       {yarn.nodemanager.remote-app-log-dir}/${user}/{thisParam}.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -341,6 +371,7 @@
     <description>
       T-file compression types used to compress aggregated logs.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -361,6 +392,7 @@
       of the Yarn applications' log directories is configurable with the
       yarn.nodemanager.log-dirs property (see also below).
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -370,6 +402,7 @@
       How long to keep aggregation logs before deleting them. -1 disables.
       Be careful set this too small and you will spam the name node.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -379,6 +412,7 @@
       Environment variables that should be forwarded from the NodeManager's
       environment to the container's.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -391,6 +425,7 @@
       If there are less number of healthy local-dirs (or log-dirs) available,
       then new containers will not be launched on this node.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -404,6 +439,7 @@
       the resourcemanager will override it. The default number is set to 2, to
       allow at least one retry for AM.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -412,6 +448,7 @@
     <description>
       The address of the RM web application.
     </description>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <property>
@@ -420,6 +457,7 @@
     <description>
       The https address of the RM web application.
     </description>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <property>
@@ -428,6 +466,7 @@
     <description>
       Whether virtual memory limits will be enforced for containers.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -436,6 +475,7 @@
     <description>
       URI for the HistoryServer's log resource
     </description>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <property>
@@ -447,6 +487,7 @@
       file must be specified.  If the value is empty, no hosts are
       excluded.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -455,6 +496,7 @@
     <description>Indicate to clients whether timeline service is enabled or not.
       If enabled, clients will put entities and events to the timeline server.
     </description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
@@ -466,6 +508,7 @@
     <description>
       Store class name for history store, defaulting to file system store
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -474,6 +517,7 @@
     <description>
       Store file name for leveldb timeline store
     </description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>directory</type>
     </value-attributes>
@@ -485,6 +529,7 @@
     <description>
       Size of read cache for uncompressed blocks for leveldb timeline store in bytes.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
@@ -493,6 +538,7 @@
     <description>
       Size of cache for recently written entity start times for leveldb timeline store in number of entities.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
@@ -501,6 +547,7 @@
     <description>
       The http address of the timeline service web application.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -509,6 +556,7 @@
     <description>
       The http address of the timeline service web application.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -518,12 +566,14 @@
       This is default address for the timeline server to start
       the RPC server.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>yarn.timeline-service.ttl-enable</name>
     <description>Enable age off of timeline store data.</description>
     <value>true</value>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
@@ -533,6 +583,7 @@
     <description>Time to live for timeline store data in milliseconds.</description>
     <name>yarn.timeline-service.ttl-ms</name>
     <value>2678400000</value>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
     </value-attributes>
@@ -542,6 +593,7 @@
     <description>Length of time to wait between deletion cycles of leveldb timeline store in milliseconds.</description>
     <name>yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms</name>
     <value>300000</value>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
     </value-attributes>
@@ -551,12 +603,14 @@
     <name>yarn.timeline-service.client.max-retries</name>
     <value>30</value>
     <description></description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
     <name>yarn.timeline-service.client.retry-interval-ms</name>
     <value>1000</value>
     <description></description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <!-- Default Values Set for IOP Stack -->
@@ -566,6 +620,7 @@
     <name>yarn.nodemanager.recovery.enabled</name>
     <value>true</value>
     <description>Enable the node manager to recover after starting</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -575,18 +630,21 @@
       The local filesystem directory in which the node manager will store
       state when recovery is enabled.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>yarn.client.nodemanager-connect.retry-interval-ms</name>
     <value>10000</value>
     <description>Time interval between each attempt to connect to NM</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>yarn.client.nodemanager-connect.max-wait-ms</name>
     <value>60000</value>
     <description>Max time to wait to establish a connection to NM</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -596,6 +654,7 @@
       Enable RM to recover state after starting.
       If true, then yarn.resourcemanager.store.class must be specified.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -605,6 +664,7 @@
       Enable RM work preserving recovery. This configuration is private to YARN for experimenting the feature.
     </description>
     <display-name>Enable Work Preserving Restart</display-name>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
@@ -619,6 +679,7 @@
       the store is implicitly fenced; meaning a single ResourceManager
       is able to use the store at any point in time.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
@@ -627,36 +688,42 @@
     <description>
       List Host:Port of the ZooKeeper servers to be used by the RM. comma separated host:port pairs, each corresponding to a zk server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" If the optional chroot suffix is used the example would look like: "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002/app/a" where the client would be rooted at "/app/a" and all paths would be relative to this root - ie getting/setting/etc...  "/foo/bar" would result in operations being run on "/app/a/foo/bar" (from the server perspective).
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
     <name>yarn.resourcemanager.zk-state-store.parent-path</name>
     <value>/rmstore</value>
     <description>Full path of the ZooKeeper znode where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore as the value for yarn.resourcemanager.store.class</description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
     <name>yarn.resourcemanager.zk-acl</name>
     <value>world:anyone:rwcda </value>
     <description>ACL's to be used for ZooKeeper znodes.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
     <name>yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms</name>
     <value>10000</value>
     <description>Set the amount of time RM waits before allocating new containers on work-preserving-recovery. Such wait period gives RM a chance to settle down resyncing with NMs in the cluster on recovery, before assigning new containers to applications.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
     <name>yarn.resourcemanager.connect.retry-interval.ms</name>
     <value>30000</value>
     <description>How often to try connecting to the ResourceManager.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
     <name>yarn.resourcemanager.connect.max-wait.ms</name>
     <value>900000</value>
     <description>Maximum time to wait to establish connection to ResourceManager</description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
@@ -667,42 +734,49 @@
       automatically from yarn.resourcemanager.zk-timeout-ms and
       yarn.resourcemanager.zk-num-retries."
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
     <name>yarn.resourcemanager.zk-num-retries</name>
     <value>1000</value>
     <description>Number of times RM tries to connect to ZooKeeper.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
     <name>yarn.resourcemanager.zk-timeout-ms</name>
     <value>10000</value>
     <description>ZooKeeper session timeout in milliseconds. Session expiration is managed by the ZooKeeper cluster itself, not by the client. This value is used by the cluster to determine when the client's session expires. Expirations happens when the cluster does not hear from the client within the specified session timeout period (i.e. no heartbeat).</description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
     <name>yarn.resourcemanager.state-store.max-completed-applications</name>
     <value>${yarn.resourcemanager.max-completed-applications}</value>
     <description>The maximum number of completed applications RM state store keeps, less than or equals to ${yarn.resourcemanager.max-completed-applications}. By default, it equals to ${yarn.resourcemanager.max-completed-applications}. This ensures that the applications kept in the state store are consistent with the applications remembered in RM memory. Any values larger than ${yarn.resourcemanager.max-completed-applications} will be reset to ${yarn.resourcemanager.max-completed-applications}. Note that this value impacts the RM recovery performance.Typically,  a smaller value indicates better performance on RM recovery.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
     <name>yarn.resourcemanager.fs.state-store.retry-policy-spec</name>
     <value>2000, 500</value>
     <description>hdfs client retry policy specification. hdfs client retry is always enabled. Specified in pairs of sleep-time and number-of-retries and (t0, n0), (t1, n1), ..., the first n0 retries sleep t0 milliseconds on average, the following n1 retries sleep t1 milliseconds on average, and so on.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
     <name>yarn.resourcemanager.fs.state-store.uri</name>
     <value> </value>
     <description>RI pointing to the location of the FileSystem path where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore as the value for yarn.resourcemanager.store.class </description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
     <name>yarn.resourcemanager.ha.enabled</name>
     <value>false</value>
     <description>enable RM HA or not</description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <!-- Isolation -->
@@ -710,6 +784,7 @@
     <name>yarn.nodemanager.linux-container-executor.resources-handler.class</name>
     <value>org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler</value>
     <description>Pre-requisite to use CGroups</description>
+    <on-ambari-upgrade add="true"/>
     <depends-on>
       <property>
         <type>yarn-env</type>
@@ -722,6 +797,7 @@
     <name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name>
     <value>hadoop-yarn</value>
     <description>Name of the Cgroups hierarchy under which all YARN jobs will be launched</description>
+    <on-ambari-upgrade add="true"/>
     <depends-on>
       <property>
         <type>yarn-env</type>
@@ -734,6 +810,7 @@
     <name>yarn.nodemanager.linux-container-executor.cgroups.mount</name>
     <value>false</value>
     <description>If true, YARN will automount the CGroup, however the directory needs to already exist; else, the cgroup should be mounted by the admin</description>
+    <on-ambari-upgrade add="true"/>
     <depends-on>
       <property>
         <type>yarn-env</type>
@@ -746,6 +823,7 @@
     <name>yarn.nodemanager.linux-container-executor.cgroups.mount-path</name>
     <value>/cgroup</value>
     <description>mount</description>
+    <on-ambari-upgrade add="true"/>
     <depends-on>
       <property>
         <type>yarn-env</type>
@@ -758,6 +836,7 @@
     <name>yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage</name>
     <value>false</value>
     <description>Strictly limit CPU resource usage to allocated usage even if spare CPU is available</description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <!-- Scheduler -->
@@ -766,6 +845,7 @@
     <value>8</value>
     <description></description>
     <display-name>Number of virtual cores</display-name>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
       <minimum>0</minimum>
@@ -784,6 +864,7 @@
     <value>80</value>
     <description>The amount of CPU allocated for YARN containers - only effective when used with CGroups</description>
     <display-name>Percentage of physical CPU allocated for all containers on a node</display-name>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
       <minimum>0</minimum>
@@ -799,12 +880,14 @@
       Defines authentication used for the Timeline Server HTTP endpoint.
       Supported values are: simple | kerberos | $AUTHENTICATION_HANDLER_CLASSNAME
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
     <name>yarn.timeline-service.http-authentication.simple.anonymous.allowed</name>
     <value>true</value>
     <description></description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
@@ -816,6 +899,7 @@
       tokens(fallback to kerberos if the tokens are missing).
       Only applicable when the http authentication type is kerberos.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
@@ -826,6 +910,7 @@
       I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, 
       with blank as the default.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
@@ -836,6 +921,7 @@
       I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, 
       with blank as the default.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
@@ -846,22 +932,26 @@
       with blank as the default.
     </description>
     <value>0.0.0.0</value>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
     <name>yarn.resourcemanager.system-metrics-publisher.enabled</name>
     <value>true</value>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>yarn.resourcemanager.system-metrics-publisher.enabled</name>
     <value>true</value>
     <description></description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
     <name>yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size</name>
     <value>10</value>
     <description></description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
@@ -869,12 +959,14 @@
      <value>false</value>
      <description> Is the registry enabled: does the RM start it up, create the user and system paths, and purge service records when
      </description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
      <name>hadoop.registry.zk.quorum</name>
      <value>localhost:2181</value>
      <description> List of hostname:port pairs defining the zookeeper quorum binding for the registry </description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
@@ -884,9 +976,7 @@
       Enable node labels to restrict YARN applications so that they run only on cluster nodes that have a specified node label.
     </description>
     <display-name>Node Labels</display-name>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>value-list</type>
       <entries>
@@ -907,12 +997,14 @@
     <name>yarn.node-labels.fs-store.root-dir</name>
     <value>/system/yarn/node-labels</value>
     <description></description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>yarn.resourcemanager.scheduler.monitor.enable</name>
     <value>false</value>
     <display-name>Pre-emption</display-name>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>value-list</type>
       <entries>
@@ -934,6 +1026,7 @@
     <value>1</value>
     <description></description>
     <display-name>Minimum Container Size (VCores)</display-name>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
       <minimum>0</minimum>
@@ -953,6 +1046,7 @@
     <value>8</value>
     <description></description>
     <display-name>Maximum Container Size (VCores)</display-name>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
       <minimum>0</minimum>
@@ -973,6 +1067,7 @@
     <description>
       This configures the HTTP endpoint for Yarn Daemons.The following values are supported: - HTTP_ONLY : Service is provided only on http - HTTPS_ONLY : Service is provided only on https
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -981,12 +1076,14 @@
     <description>Enable timeline server to recover state after starting. If
       true, then yarn.timeline-service.state-store-class must be specified.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
     <name>yarn.timeline-service.leveldb-state-store.path</name>
     <value>/hadoop/yarn/timeline</value>
     <description>Store file name for leveldb state store.</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>directory</type>
     </value-attributes>
@@ -996,30 +1093,35 @@
     <name>yarn.timeline-service.state-store-class</name>
     <value>org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore</value>
     <description>Store class name for timeline state store.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>yarn.node-labels.fs-store.retry-policy-spec</name>
     <value>2000, 500</value>
     <description></description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
     <name>yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb</name>
     <value>1000</value>
     <description>This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used</description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
     <name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name>
     <value>90</value>
     <description>This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used</description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
     <name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
     <value>-1</value>
     <description>Defines how often NMs wake up to upload log files. The default value is -1. By default, the logs will be uploaded whenthe application is finished. By setting this configure, logs can be uploaded periodically when the application is running. The minimum rolling-interval-seconds can be set is 3600.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
@@ -1029,24 +1131,28 @@
       This configuration is for debug and test purpose.
       By setting this configuration as true.
       We can break the lower bound of yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</description>
+    <on-ambari-upgrade add="true"/>
   </property>
   
   <property>
     <name>yarn.nodemanager.log-aggregation.num-log-files-per-app</name>
     <value>30</value>
     <description>This is temporary solution. The configuration will be deleted once, we find a more scalable method to only write a single log file per LRS.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size</name>
     <value>10</value>
     <description></description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <description>Plugins that can translate a timeline entity read request into a list of timeline cache ids, separated by commas.</description>
     <name>yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes</name>
     <value></value>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <empty-value-valid>true</empty-value-valid>
     </value-attributes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d6fcfa1/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/ZOOKEEPER/configuration/zoo.cfg.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/ZOOKEEPER/configuration/zoo.cfg.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/ZOOKEEPER/configuration/zoo.cfg.xml
index 58574c7..157b754 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/ZOOKEEPER/configuration/zoo.cfg.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/ZOOKEEPER/configuration/zoo.cfg.xml
@@ -26,6 +26,7 @@
     <display-name>Length of single Tick</display-name>
     <value>2000</value>
     <description>The length of a single tick in milliseconds, which is the basic time unit used by ZooKeeper</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
       <overridable>false</overridable>
@@ -37,6 +38,7 @@
     <display-name>Ticks to allow for sync at Init</display-name>
     <value>10</value>
     <description>Ticks to allow for sync at Init.</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
       <overridable>false</overridable>
@@ -47,6 +49,7 @@
     <display-name>Ticks to allow for sync at Runtime</display-name>
     <value>5</value>
     <description>Ticks to allow for sync at Runtime.</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
       <overridable>false</overridable>
@@ -57,6 +60,7 @@
     <display-name>Port for running ZK Server</display-name>
     <value>2181</value>
     <description>Port for running ZK Server.</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
       <overridable>false</overridable>
@@ -67,6 +71,7 @@
     <display-name>ZooKeeper directory</display-name>
     <value>/hadoop/zookeeper</value>
     <description>Data directory for ZooKeeper.</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>directory</type>
     </value-attributes>
@@ -75,10 +80,12 @@
     <name>autopurge.snapRetainCount</name>
     <value>30</value>
     <description>ZooKeeper purge feature retains the autopurge.snapRetainCount most recent snapshots and the corresponding transaction logs in the dataDir and dataLogDir respectively and deletes the rest. </description>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>autopurge.purgeInterval</name>
     <value>24</value>
     <description>The time interval in hours for which the purge task has to be triggered. Set to a positive integer (1 and above) to enable the auto purging.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d6fcfa1/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/ZOOKEEPER/configuration/zookeeper-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/ZOOKEEPER/configuration/zookeeper-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/ZOOKEEPER/configuration/zookeeper-env.xml
index 87cb0fd..332cd4d 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/ZOOKEEPER/configuration/zookeeper-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/ZOOKEEPER/configuration/zookeeper-env.xml
@@ -24,6 +24,7 @@
   <property>
     <name>zk_user</name>
     <value>zookeeper</value>
+    <on-ambari-upgrade add="true"/>
     <property-type>USER</property-type>
     <description>ZooKeeper User.</description>
   </property>
@@ -32,6 +33,7 @@
     <display-name>ZooKeeper Log Dir</display-name>
     <value>/var/log/zookeeper</value>
     <description>ZooKeeper Log Dir</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>directory</type>
       <editable-only-at-install>true</editable-only-at-install>
@@ -43,6 +45,7 @@
     <display-name>ZooKeeper PID Dir</display-name>
     <value>/var/run/zookeeper</value>
     <description>ZooKeeper Pid Dir</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>directory</type>
       <editable-only-at-install>true</editable-only-at-install>
@@ -69,5 +72,6 @@ export SERVER_JVMFLAGS="$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_
 export CLIENT_JVMFLAGS="$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}"
 {% endif %}
     </value>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d6fcfa1/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/ZOOKEEPER/configuration/zookeeper-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
index 5f1e295..20f572f 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
@@ -96,6 +96,7 @@ log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
 ### Notice we are including log4j's NDC here (%x)
 log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n
     </value>
+    <on-ambari-upgrade add="true"/>
   </property>
 
 </configuration>