You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by al...@apache.org on 2017/02/09 19:27:55 UTC

[1/2] ambari git commit: AMBARI-19831. HDP 3.0 TP - Support changed configs and scripts for YARN/MR (alejandro)

Repository: ambari
Updated Branches:
  refs/heads/trunk 00e18721e -> 43323f997


http://git-wip-us.apache.org/repos/asf/ambari/blob/43323f99/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
index 4f30cb9..dd5e9a4 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
@@ -56,7 +56,7 @@
     <name>yarn.resourcemanager.scheduler.class</name>
     <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
     <description>The class to use as the resource scheduler.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.scheduler.minimum-allocation-mb</name>
@@ -80,7 +80,7 @@
         <name>yarn.nodemanager.resource.memory-mb</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.scheduler.maximum-allocation-mb</name>
@@ -104,7 +104,7 @@
         <name>yarn.nodemanager.resource.memory-mb</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.acl.enable</name>
@@ -116,7 +116,7 @@
         <name>ranger-yarn-plugin-enabled</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.admin.acl</name>
@@ -125,14 +125,14 @@
     <value-attributes>
       <empty-value-valid>true</empty-value-valid>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <!-- NodeManager -->
   <property>
     <name>yarn.nodemanager.address</name>
     <value>0.0.0.0:45454</value>
     <description>The address of the container manager in the NM.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.resource.memory-mb</name>
@@ -147,13 +147,13 @@
       <unit>MB</unit>
       <increment-step>256</increment-step>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.application.classpath</name>
     <value>$HADOOP_CONF_DIR,{{stack_root}}/current/hadoop-client/*,{{stack_root}}/current/hadoop-client/lib/*,{{stack_root}}/current/hadoop-hdfs-client/*,{{stack_root}}/current/hadoop-hdfs-client/lib/*,{{stack_root}}/current/hadoop-yarn-client/*,{{stack_root}}/current/hadoop-yarn-client/lib/*</value>
     <description>Classpath for typical applications.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.vmem-pmem-ratio</name>
@@ -170,7 +170,7 @@
       <maximum>5.0</maximum>
       <increment-step>0.1</increment-step>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.group</name>
@@ -186,11 +186,12 @@
         <name>user_group</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.aux-services</name>
-    <value>mapreduce_shuffle,spark_shuffle,spark2_shuffle</value>
+    <!-- TODO HDP 3.0, restore back to "mapreduce_shuffle,spark_shuffle,spark2_shuffle" after Spark is added to HDP 3.0 stack. -->
+    <value>mapreduce_shuffle</value>
     <description>Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and cannot start with numbers</description>
     <on-ambari-upgrade add="false"/>
   </property>
@@ -198,7 +199,7 @@
     <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
     <value>org.apache.hadoop.mapred.ShuffleHandler</value>
     <description>The auxiliary service class to use </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.log-dirs</name>
@@ -213,7 +214,7 @@
     <value-attributes>
       <type>directories</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.local-dirs</name>
@@ -228,7 +229,7 @@
     <value-attributes>
       <type>directories</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.container-monitor.interval-ms</name>
@@ -237,19 +238,19 @@
       The interval, in milliseconds, for which the node manager
       waits  between two cycles of monitoring its containers' memory usage.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.health-checker.interval-ms</name>
     <value>135000</value>
     <description>Frequency of running node health script.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.health-checker.script.timeout-ms</name>
     <value>60000</value>
     <description>Script time out period.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.log.retain-seconds</name>
@@ -258,7 +259,7 @@
       Time in seconds to retain user logs. Only applicable if
       log aggregation is disabled.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.log-aggregation-enable</name>
@@ -268,14 +269,14 @@
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.remote-app-log-dir</name>
     <value>/app-logs</value>
     <description>Location to aggregate logs to. </description>
     <property-type>NOT_MANAGED_HDFS_PATH</property-type>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.remote-app-log-dir-suffix</name>
@@ -284,7 +285,7 @@
       The remote log dir will be created at
       {yarn.nodemanager.remote-app-log-dir}/${user}/{thisParam}.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.log-aggregation.compression-type</name>
@@ -292,7 +293,7 @@
     <description>
       T-file compression types used to compress aggregated logs.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.delete.debug-delay-sec</name>
@@ -312,7 +313,7 @@
       of the Yarn applications' log directories is configurable with the
       yarn.nodemanager.log-dirs property (see also below).
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.log-aggregation.retain-seconds</name>
@@ -321,7 +322,7 @@
       How long to keep aggregation logs before deleting them. -1 disables.
       Be careful set this too small and you will spam the name node.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.admin-env</name>
@@ -330,7 +331,7 @@
       Environment variables that should be forwarded from the NodeManager's
       environment to the container's.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.disk-health-checker.min-healthy-disks</name>
@@ -342,7 +343,7 @@
       If there are less number of healthy local-dirs (or log-dirs) available,
       then new containers will not be launched on this node.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.am.max-attempts</name>
@@ -355,7 +356,7 @@
       the resourcemanager will override it. The default number is set to 2, to
       allow at least one retry for AM.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.webapp.address</name>
@@ -379,7 +380,7 @@
     <description>
       Whether virtual memory limits will be enforced for containers.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.log.server.url</name>
@@ -398,7 +399,7 @@
       file must be specified.  If the value is empty, no hosts are
       excluded.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.http.policy</name>
@@ -406,7 +407,7 @@
     <description>
       This configures the HTTP endpoint for Yarn Daemons.The following values are supported: - HTTP_ONLY : Service is provided only on http - HTTPS_ONLY : Service is provided only on https
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <!-- These configs were inherited from HDP 2.1 -->
@@ -419,7 +420,7 @@
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.generic-application-history.store-class</name>
@@ -427,7 +428,7 @@
     <description>
       Store class name for history store, defaulting to file system store
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.webapp.address</name>
@@ -461,7 +462,7 @@
     <value-attributes>
       <type>int</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms</name>
@@ -470,7 +471,7 @@
     <value-attributes>
       <type>int</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <!-- These properties were inherited from HDP 2.2 -->
@@ -480,7 +481,7 @@
     <description>
       Is the registry enabled: does the RM start it up, create the user and system paths, and purge service records when containers, application attempts and applications complete
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>hadoop.registry.zk.quorum</name>
@@ -494,7 +495,7 @@
     <name>yarn.nodemanager.recovery.enabled</name>
     <value>true</value>
     <description>Enable the node manager to recover after starting</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.recovery.dir</name>
@@ -503,19 +504,19 @@
       The local filesystem directory in which the node manager will store
       state when recovery is enabled.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.client.nodemanager-connect.retry-interval-ms</name>
     <value>10000</value>
     <description>Time interval between each attempt to connect to NM</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.client.nodemanager-connect.max-wait-ms</name>
     <value>60000</value>
     <description>Max time to wait to establish a connection to NM</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.recovery.enabled</name>
@@ -524,7 +525,7 @@
       Enable RM to recover state after starting.
       If true, then yarn.resourcemanager.store.class must be specified.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.work-preserving-recovery.enabled</name>
@@ -536,7 +537,7 @@
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.store.class</name>
@@ -547,7 +548,7 @@
       the store is implicitly fenced; meaning a single ResourceManager
       is able to use the store at any point in time.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-address</name>
@@ -561,31 +562,31 @@
     <name>yarn.resourcemanager.zk-state-store.parent-path</name>
     <value>/rmstore</value>
     <description>Full path of the ZooKeeper znode where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore as the value for yarn.resourcemanager.store.class</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-acl</name>
     <value>world:anyone:rwcda</value>
     <description>ACL's to be used for ZooKeeper znodes.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms</name>
     <value>10000</value>
     <description>Set the amount of time RM waits before allocating new containers on work-preserving-recovery. Such wait period gives RM a chance to settle down resyncing with NMs in the cluster on recovery, before assigning new containers to applications.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.connect.retry-interval.ms</name>
     <value>30000</value>
     <description>How often to try connecting to the ResourceManager.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.connect.max-wait.ms</name>
     <value>900000</value>
     <description>Maximum time to wait to establish connection to ResourceManager</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-retry-interval-ms</name>
@@ -595,43 +596,43 @@
       automatically from yarn.resourcemanager.zk-timeout-ms and
       yarn.resourcemanager.zk-num-retries."
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-num-retries</name>
     <value>1000</value>
     <description>Number of times RM tries to connect to ZooKeeper.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.zk-timeout-ms</name>
     <value>10000</value>
     <description>ZooKeeper session timeout in milliseconds. Session expiration is managed by the ZooKeeper cluster itself, not by the client. This value is used by the cluster to determine when the client's session expires. Expirations happens when the cluster does not hear from the client within the specified session timeout period (i.e. no heartbeat).</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.state-store.max-completed-applications</name>
     <value>${yarn.resourcemanager.max-completed-applications}</value>
     <description>The maximum number of completed applications RM state store keeps, less than or equals to ${yarn.resourcemanager.max-completed-applications}. By default, it equals to ${yarn.resourcemanager.max-completed-applications}. This ensures that the applications kept in the state store are consistent with the applications remembered in RM memory. Any values larger than ${yarn.resourcemanager.max-completed-applications} will be reset to ${yarn.resourcemanager.max-completed-applications}. Note that this value impacts the RM recovery performance.Typically,  a smaller value indicates better performance on RM recovery.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.fs.state-store.retry-policy-spec</name>
     <value>2000, 500</value>
     <description>hdfs client retry policy specification. hdfs client retry is always enabled. Specified in pairs of sleep-time and number-of-retries and (t0, n0), (t1, n1), ..., the first n0 retries sleep t0 milliseconds on average, the following n1 retries sleep t1 milliseconds on average, and so on.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.fs.state-store.uri</name>
     <value> </value>
     <description>RI pointing to the location of the FileSystem path where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore as the value for yarn.resourcemanager.store.class </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.ha.enabled</name>
     <value>false</value>
     <description>enable RM HA or not</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.resources-handler.class</name>
@@ -643,7 +644,7 @@
         <name>yarn_cgroups_enabled</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name>
@@ -655,7 +656,7 @@
         <name>yarn_cgroups_enabled</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.mount</name>
@@ -667,7 +668,7 @@
         <name>yarn_cgroups_enabled</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.mount-path</name>
@@ -679,13 +680,13 @@
         <name>yarn_cgroups_enabled</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage</name>
     <value>false</value>
     <description>Strictly limit CPU resource usage to allocated usage even if spare CPU is available</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.resource.cpu-vcores</name>
@@ -710,7 +711,7 @@
         <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
@@ -723,7 +724,7 @@
       <maximum>100</maximum>
       <increment-step>1</increment-step>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.node-labels.fs-store.retry-policy-spec</name>
@@ -733,19 +734,19 @@
       specified by N pairs of sleep-time in milliseconds and number-of-retries
       &quot;s1,n1,s2,n2,...&quot;.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb</name>
     <value>1000</value>
     <description>This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name>
     <value>90</value>
     <description>This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
@@ -760,37 +761,37 @@
       This configuration is for debug and test purpose.
       By setting this configuration as true.
       We can break the lower bound of yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.log-aggregation.num-log-files-per-app</name>
     <value>30</value>
     <description>This is temporary solution. The configuration will be deleted once, we find a more scalable method to only write a single log file per LRS.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.system-metrics-publisher.enabled</name>
     <value>true</value>
     <description/>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size</name>
     <value>10</value>
     <description>Number of worker threads that send the yarn system metrics data.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.client.max-retries</name>
     <value>30</value>
     <description/>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.client.retry-interval-ms</name>
     <value>1000</value>
     <description/>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.ttl-enable</name>
@@ -801,13 +802,13 @@
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.state-store-class</name>
     <value>org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore</value>
     <description>Store class name for timeline state store.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.leveldb-state-store.path</name>
@@ -816,7 +817,7 @@
     <value-attributes>
       <type>directory</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.leveldb-timeline-store.path</name>
@@ -825,7 +826,7 @@
     <value-attributes>
       <type>directory</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.leveldb-timeline-store.read-cache-size</name>
@@ -833,7 +834,7 @@
     <description>
       Size of read cache for uncompressed blocks for leveldb timeline store in bytes.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size</name>
@@ -841,7 +842,7 @@
     <description>
       Size of cache for recently read entity start times for leveldb timeline store in number of entities.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size</name>
@@ -849,7 +850,7 @@
     <description>
       Size of cache for recently written entity start times for leveldb timeline store in number of entities.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.http-authentication.type</name>
@@ -858,13 +859,13 @@
       Defines authentication used for the Timeline Server HTTP endpoint.
       Supported values are: simple | kerberos | $AUTHENTICATION_HANDLER_CLASSNAME
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.http-authentication.simple.anonymous.allowed</name>
     <value>true</value>
     <description/>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled</name>
@@ -875,25 +876,25 @@
       tokens(fallback to kerberos if the tokens are missing).
       Only applicable when the http authentication type is kerberos.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.bind-host</name>
     <value>0.0.0.0</value>
     <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.bind-host</name>
     <value>0.0.0.0</value>
     <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.bind-host</name>
     <value>0.0.0.0</value>
     <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.node-labels.fs-store.root-dir</name>
@@ -901,7 +902,7 @@
     <description>
       URI for NodeLabelManager.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.scheduler.minimum-allocation-vcores</name>
@@ -920,7 +921,7 @@
         <name>yarn.nodemanager.resource.cpu-vcores</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.scheduler.maximum-allocation-vcores</name>
@@ -939,7 +940,7 @@
         <name>yarn.nodemanager.resource.cpu-vcores</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.node-labels.enabled</name>
@@ -962,7 +963,7 @@
       </entries>
       <selection-cardinality>1</selection-cardinality>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.nodemanager.container-executor.class</name>
@@ -978,7 +979,7 @@
         <name>hadoop.security.authentication</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.resourcemanager.scheduler.monitor.enable</name>
@@ -1003,7 +1004,7 @@
       </entries>
       <selection-cardinality>1</selection-cardinality>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <!-- In HDP 2.3, these properties were deleted:
 yarn.node-labels.manager-class
@@ -1017,7 +1018,7 @@ yarn.node-labels.manager-class
       true, then yarn.timeline-service.state-store-class must be specified.
     </description>
     <value>true</value>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.authorization-provider</name>
@@ -1028,33 +1029,53 @@ yarn.node-labels.manager-class
         <name>ranger-yarn-plugin-enabled</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 
-  <!--ats v1.5 properties-->
+  <!--ats v2.0 properties-->
+
+  <!-- TODO HDP 3.0, set version to 2.0 once ready. -->
   <property>
     <name>yarn.timeline-service.version</name>
     <value>1.5</value>
     <description>Timeline service version we&#x2019;re currently using.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.system-metricspublisher.enabled</name>
+    <value>true</value>
+    <description>Enables YARN to publish metrics to timeline v2</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.rm.system-metricspublisher.emit-container-events</name>
+    <value>true</value>
+    <description>Experimentally enable each container to post timeline events to timeline v2</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.recovery.supervised</name>
+    <value>true</value>
+    <description>Default of false causes NM to kill its containers on shutdown. Should be set to true when NM recovery is enabled.</description>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.store-class</name>
     <value>org.apache.hadoop.yarn.server.timeline.EntityGroupFSTimelineStore</value>
     <description>Main storage class for YARN timeline server.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.entity-group-fs-store.active-dir</name>
     <value>/ats/active/</value>
     <description>DFS path to store active application&#x2019;s timeline data</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.entity-group-fs-store.done-dir</name>
     <value>/ats/done/</value>
     <description>DFS path to store done application&#x2019;s timeline data</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes</name>
@@ -1063,7 +1084,7 @@ yarn.node-labels.manager-class
     <value-attributes>
       <empty-value-valid>true</empty-value-valid>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <!-- advanced ats v1.5 properties-->
   <property>
@@ -1071,7 +1092,7 @@ yarn.node-labels.manager-class
     <description>Summary storage for ATS v1.5</description>
     <!-- Use rolling leveldb, advanced -->
     <value>org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore</value>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.entity-group-fs-store.scan-interval-seconds</name>
@@ -1082,7 +1103,7 @@ yarn.node-labels.manager-class
     </description>
     <!-- Default is 60 seconds, advanced -->
     <value>60</value>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds</name>
@@ -1093,7 +1114,7 @@ yarn.node-labels.manager-class
     </description>
     <!-- 3600 is default, advanced -->
     <value>3600</value>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.timeline-service.entity-group-fs-store.retain-seconds</name>
@@ -1103,7 +1124,7 @@ yarn.node-labels.manager-class
     </description>
     <!-- 7 days is default, advanced -->
     <value>604800</value>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <!-- These configs were inherited from HDP 2.4 -->
@@ -1111,7 +1132,7 @@ yarn.node-labels.manager-class
     <name>yarn.nodemanager.aux-services.spark_shuffle.class</name>
     <value>org.apache.spark.network.yarn.YarnShuffleService</value>
     <description>The auxiliary service class to use for Spark</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <!-- These configs were inherited from HDP 2.5 -->

http://git-wip-us.apache.org/repos/asf/ambari/blob/43323f99/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
index 14eae07..b5b6d48 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
@@ -40,7 +40,7 @@
             <timeout>1200</timeout>
           </commandScript>
 
-          <!-- TODO Alejandro add later after UI is fixed,
+          <!-- TODO HDP 3.0, add later after UI is fixed,
           <dependencies>
             <dependency>
               <name>TEZ/TEZ_CLIENT</name>
@@ -73,7 +73,7 @@
             <timeout>1200</timeout>
           </commandScript>
 
-          <!-- TODO Alejandro add later after UI is fixed,
+          <!-- TODO HDP 3.0, add later after UI is fixed,
           <dependencies>
             <dependency>
               <name>TEZ/TEZ_CLIENT</name>
@@ -268,7 +268,7 @@
             <co-locate>YARN/RESOURCEMANAGER</co-locate>
           </auto-deploy>
 
-          <!-- TODO Alejandro add later after UI is fixed,
+          <!-- TODO HDP 3.0, add later after UI is fixed,
           <dependencies>
             <dependency>
               <name>HDFS/HDFS_CLIENT</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43323f99/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/status_params.py
index c2e9d92..7c1b2c7 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/status_params.py
@@ -46,11 +46,11 @@ else:
   yarn_pid_dir = format("{yarn_pid_dir_prefix}/{yarn_user}")
   mapred_pid_dir = format("{mapred_pid_dir_prefix}/{mapred_user}")
 
-  resourcemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-resourcemanager.pid")
-  nodemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-nodemanager.pid")
-  yarn_historyserver_pid_file_old = format("{yarn_pid_dir}/yarn-{yarn_user}-historyserver.pid")
-  yarn_historyserver_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-timelineserver.pid")  # *-historyserver.pid is deprecated
-  mapred_historyserver_pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-historyserver.pid")
+  resourcemanager_pid_file = format("{yarn_pid_dir}/hadoop-{yarn_user}-resourcemanager.pid")
+  nodemanager_pid_file = format("{yarn_pid_dir}/hadoop-{yarn_user}-nodemanager.pid")
+  yarn_historyserver_pid_file_old = format("{yarn_pid_dir}/hadoop-{yarn_user}-historyserver.pid")
+  yarn_historyserver_pid_file = format("{yarn_pid_dir}/hadoop-{yarn_user}-timelineserver.pid")  # *-historyserver.pid is deprecated
+  mapred_historyserver_pid_file = format("{mapred_pid_dir}/hadoop-{mapred_user}-historyserver.pid")
 
   hadoop_conf_dir = functions.conf_select.get_hadoop_conf_dir()
 


[2/2] ambari git commit: AMBARI-19831. HDP 3.0 TP - Support changed configs and scripts for YARN/MR (alejandro)

Posted by al...@apache.org.
AMBARI-19831. HDP 3.0 TP - Support changed configs and scripts for YARN/MR (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/43323f99
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/43323f99
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/43323f99

Branch: refs/heads/trunk
Commit: 43323f997b34a0bbb4ee9122734311e0b1205fcb
Parents: 00e1872
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Fri Feb 3 15:47:02 2017 -0800
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Thu Feb 9 11:27:49 2017 -0800

----------------------------------------------------------------------
 .../HDFS/3.0.0.3.0/configuration/core-site.xml  |  30 +--
 .../HDFS/3.0.0.3.0/configuration/hadoop-env.xml |  42 ++--
 .../hadoop-metrics2.properties.xml              |   2 +-
 .../3.0.0.3.0/configuration/hadoop-policy.xml   |  22 +-
 .../HDFS/3.0.0.3.0/configuration/hdfs-log4j.xml |   2 +-
 .../configuration/hdfs-logsearch-conf.xml       |   6 +-
 .../HDFS/3.0.0.3.0/configuration/hdfs-site.xml  | 109 +++++----
 .../HDFS/3.0.0.3.0/configuration/ssl-client.xml |  14 +-
 .../HDFS/3.0.0.3.0/configuration/ssl-server.xml |  16 +-
 .../configuration-mapred/mapred-env.xml         |  14 +-
 .../mapred-logsearch-conf.xml                   |   6 +-
 .../configuration-mapred/mapred-site.xml        |  90 ++++----
 .../YARN/3.0.0.3.0/configuration/yarn-env.xml   |  26 +--
 .../YARN/3.0.0.3.0/configuration/yarn-log4j.xml |   2 +-
 .../configuration/yarn-logsearch-conf.xml       |   6 +-
 .../YARN/3.0.0.3.0/configuration/yarn-site.xml  | 225 ++++++++++---------
 .../common-services/YARN/3.0.0.3.0/metainfo.xml |   6 +-
 .../3.0.0.3.0/package/scripts/status_params.py  |  10 +-
 18 files changed, 324 insertions(+), 304 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/43323f99/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/core-site.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/core-site.xml
index 20b1930..f323faa 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/core-site.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/core-site.xml
@@ -22,7 +22,7 @@
     <name>ha.failover-controller.active-standby-elector.zk.op.retries</name>
     <value>120</value>
     <description>ZooKeeper Failover Controller retries setting for your environment</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <!-- i/o properties -->
   <property>
@@ -32,21 +32,21 @@
   The size of this buffer should probably be a multiple of hardware
   page size (4096 on Intel x86), and it determines how much data is
   buffered during read and write operations.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>io.serializations</name>
     <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
     <description> A list of comma-delimited serialization classes that can be used for obtaining serializers and deserializers.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>io.compression.codecs</name>
     <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
     <description>A list of the compression codec classes that can be used
                  for compression/decompression.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <!-- file system properties -->
   <property>
@@ -67,7 +67,7 @@
         If trash is disabled server side then the client side configuration is checked.
         If trash is enabled on the server side then the value configured on the server is used and the client configuration value is ignored.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <!-- ipc properties: copied from kryptonite configuration -->
   <property>
@@ -76,7 +76,7 @@
     <description>Defines the threshold number of connections after which
                connections will be inspected for idleness.
   </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>ipc.client.connection.maxidletime</name>
@@ -84,13 +84,13 @@
     <description>The maximum time after which a client will bring down the
                connection to the server.
   </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>ipc.client.connect.max.retries</name>
     <value>50</value>
     <description>Defines the maximum number of retries for IPC connections.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>ipc.server.tcpnodelay</name>
@@ -101,7 +101,7 @@
       decrease latency
       with a cost of more/smaller packets.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <!-- Web Interface Configuration -->
   <property>
@@ -112,7 +112,7 @@
                 not be exposed to public. Enable this option if the interfaces
                 are only reachable by those who have the right authorization.
   </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>hadoop.security.authentication</name>
@@ -121,7 +121,7 @@
    Set the authentication for the cluster. Valid values are: simple or
    kerberos.
    </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>hadoop.security.authorization</name>
@@ -129,7 +129,7 @@
     <description>
      Enable authorization for different protocols.
   </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>hadoop.security.auth_to_local</name>
@@ -175,7 +175,7 @@ DEFAULT
     <value-attributes>
       <type>multiLine</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>net.topology.script.file.name</name>
@@ -183,7 +183,7 @@ DEFAULT
     <description>
       Location of topology script used by Hadoop to determine the rack location of nodes.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <!-- These configs were inherited from HDP 2.2 -->
@@ -193,7 +193,7 @@ DEFAULT
     <description>
       Indicates if anonymous requests are allowed when using &apos;simple&apos; authentication.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>hadoop.security.key.provider.path</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43323f99/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-env.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-env.xml
index 3e7388e..d493fe6 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-env.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-env.xml
@@ -29,7 +29,7 @@
       <type>directory</type>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>hadoop_pid_dir_prefix</name>
@@ -41,7 +41,7 @@
       <overridable>false</overridable>
       <editable-only-at-install>true</editable-only-at-install>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>hadoop_root_logger</name>
@@ -51,7 +51,7 @@
     <value-attributes>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>hadoop_heapsize</name>
@@ -63,7 +63,7 @@
       <unit>MB</unit>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>namenode_heapsize</name>
@@ -84,7 +84,7 @@
         <name>dfs.datanode.data.dir</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>namenode_opt_newsize</name>
@@ -105,7 +105,7 @@
       <increment-step>256</increment-step>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>namenode_opt_maxnewsize</name>
@@ -126,7 +126,7 @@
       <increment-step>256</increment-step>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>namenode_opt_permsize</name>
@@ -141,7 +141,7 @@
       <increment-step>128</increment-step>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>namenode_opt_maxpermsize</name>
@@ -156,7 +156,7 @@
       <increment-step>128</increment-step>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dtnode_heapsize</name>
@@ -170,7 +170,7 @@
       <unit>MB</unit>
       <increment-step>128</increment-step>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>proxyuser_group</name>
@@ -182,7 +182,7 @@
       <type>user</type>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>hdfs_user</name>
@@ -194,7 +194,7 @@
       <type>user</type>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>hdfs_tmp_dir</name>
@@ -207,19 +207,19 @@
       <overridable>false</overridable>
       <visible>false</visible>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>hdfs_user_nofile_limit</name>
     <value>128000</value>
     <description>Max open files limit setting for HDFS user.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>hdfs_user_nproc_limit</name>
     <value>65536</value>
     <description>Max number of processes limit setting for HDFS user.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>namenode_backup_dir</name>
@@ -230,13 +230,13 @@
   <property>
     <name>hdfs_user_keytab</name>
     <description>HDFS keytab path</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>hdfs_principal_name</name>
     <description>HDFS principal name</description>
     <property-type>KERBEROS_PRINCIPAL</property-type>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <!-- These configs were inherited from HDP 2.2 -->
@@ -248,7 +248,7 @@
     <value-attributes>
       <type>string</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>keyserver_port</name>
@@ -259,7 +259,7 @@
       <type>int</type>
       <empty-value-valid>true</empty-value-valid>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <!-- These configs were inherited from HDP 2.3 -->
@@ -410,7 +410,7 @@
     <value-attributes>
       <type>content</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>nfsgateway_heapsize</name>
@@ -421,6 +421,6 @@
       <type>int</type>
       <unit>MB</unit>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43323f99/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-metrics2.properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-metrics2.properties.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-metrics2.properties.xml
index 6b45e84..4aadb83 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-metrics2.properties.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-metrics2.properties.xml
@@ -120,6 +120,6 @@ namenode.sink.timeline.metric.rpc.healthcheck.port={{nn_rpc_healthcheck_port}}
     <value-attributes>
       <type>content</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43323f99/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-policy.xml
index 8e9486d..9193bad 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-policy.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-policy.xml
@@ -26,7 +26,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>security.client.datanode.protocol.acl</name>
@@ -36,7 +36,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>security.datanode.protocol.acl</name>
@@ -46,7 +46,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>security.inter.datanode.protocol.acl</name>
@@ -56,7 +56,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>security.namenode.protocol.acl</name>
@@ -66,7 +66,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>security.inter.tracker.protocol.acl</name>
@@ -76,7 +76,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>security.job.client.protocol.acl</name>
@@ -86,7 +86,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>security.job.task.protocol.acl</name>
@@ -96,7 +96,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>security.admin.operations.protocol.acl</name>
@@ -105,7 +105,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>security.refresh.usertogroups.mappings.protocol.acl</name>
@@ -115,7 +115,7 @@
     group names. The user and group list is separated by a blank. For
     e.g. "alice,bob users,wheel".  A special value of "*" means all
     users are allowed.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>security.refresh.policy.protocol.acl</name>
@@ -125,6 +125,6 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43323f99/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-log4j.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-log4j.xml
index 35554e6..448f224 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-log4j.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-log4j.xml
@@ -221,6 +221,6 @@
       <type>content</type>
       <show-property-name>false</show-property-name>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43323f99/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-logsearch-conf.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-logsearch-conf.xml
index d85a028..6540c86 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-logsearch-conf.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-logsearch-conf.xml
@@ -25,14 +25,14 @@
     <display-name>Service name</display-name>
     <description>Service name for Logsearch Portal (label)</description>
     <value>HDFS</value>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>component_mappings</name>
     <display-name>Component mapping</display-name>
     <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
     <value>NAMENODE:hdfs_namenode;DATANODE:hdfs_datanode;SECONDARY_NAMENODE:hdfs_secondarynamenode;JOURNALNODE:hdfs_journalnode;ZKFC:hdfs_zkfc;NFS_GATEWAY:hdfs_nfs3</value>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>content</name>
@@ -243,6 +243,6 @@
       <type>content</type>
       <show-property-name>false</show-property-name>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43323f99/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-site.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-site.xml
index 60fde60..dbeaaaf 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-site.xml
@@ -33,16 +33,15 @@
       <type>directories</type>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
-  <!--
-  This property is deleted in Hadoop 3.0. Need to remove it during Stack Upgrade.
+  <!-- TODO HDP 3.0, this property is deleted in Hadoop 3.0. Need to remove it during Stack Upgrade.
   <property>
     <name>dfs.support.append</name>
     <value>true</value>
     <description>to enable dfs append</description>
     <final>true</final>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
     <deleted>true</deleted>
   </property>
   -->
@@ -56,7 +55,7 @@
       <type>boolean</type>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.datanode.failed.volumes.tolerated</name>
@@ -76,7 +75,7 @@
         <name>dfs.datanode.data.dir</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.datanode.data.dir</name>
@@ -92,7 +91,7 @@
     <value-attributes>
       <type>directories</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.hosts.exclude</name>
@@ -101,7 +100,7 @@
       not permitted to connect to the namenode.  The full pathname of the
       file must be specified.  If the value is empty, no hosts are
       excluded.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <!--
     <property>
@@ -126,7 +125,7 @@
       <type>directories</type>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.namenode.checkpoint.edits.dir</name>
@@ -137,7 +136,7 @@
       replicated in all of the directories for redundancy.
       Default value is same as dfs.namenode.checkpoint.dir
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.namenode.checkpoint.period</name>
@@ -148,7 +147,7 @@
       <type>int</type>
       <unit>seconds</unit>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.namenode.checkpoint.txns</name>
@@ -157,14 +156,14 @@
       of the namespace every 'dfs.namenode.checkpoint.txns' transactions,
       regardless of whether 'dfs.namenode.checkpoint.period' has expired.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.replication.max</name>
     <value>50</value>
     <description>Maximal block replication.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.replication</name>
@@ -175,13 +174,13 @@
     <value-attributes>
       <type>int</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.heartbeat.interval</name>
     <value>3</value>
     <description>Determines datanode heartbeat interval in seconds.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.namenode.safemode.threshold-pct</name>
@@ -199,7 +198,7 @@
       <maximum>1.000</maximum>
       <increment-step>0.001</increment-step>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.datanode.balance.bandwidthPerSec</name>
@@ -209,7 +208,7 @@
       can utilize for the balancing purpose in term of
       the number of bytes per second.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.https.port</name>
@@ -217,7 +216,7 @@
     <description>
       This property is used by HftpFileSystem.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.datanode.address</name>
@@ -225,7 +224,7 @@
     <description>
       The datanode server address and port for data transfer.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.datanode.http.address</name>
@@ -233,7 +232,7 @@
     <description>
       The datanode http server address and port.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.datanode.https.address</name>
@@ -241,13 +240,13 @@
     <description>
       The datanode https server address and port.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.blocksize</name>
     <value>134217728</value>
     <description>The default block size for new files.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.namenode.http-address</name>
@@ -280,7 +279,7 @@
         <name>dfs.datanode.data.dir</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.datanode.ipc.address</name>
@@ -289,13 +288,13 @@
       The datanode ipc server address and port.
       If the port is 0 then the server will start on a free port.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.blockreport.initialDelay</name>
     <value>120</value>
     <description>Delay for first block report in seconds.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.datanode.max.transfer.threads</name>
@@ -307,7 +306,7 @@
       <minimum>0</minimum>
       <maximum>48000</maximum>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <!-- Permissions configuration -->
   <property>
@@ -316,7 +315,7 @@
     <description>
       The octal umask used when creating files and directories.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.permissions.enabled</name>
@@ -328,13 +327,13 @@
       Switching from one parameter value to the other does not change the mode,
       owner or group of files or directories.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.permissions.superusergroup</name>
     <value>hdfs</value>
     <description>The name of the group of super-users.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.namenode.handler.count</name>
@@ -346,7 +345,7 @@
       <minimum>1</minimum>
       <maximum>200</maximum>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.block.access.token.enable</name>
@@ -355,7 +354,7 @@
       If "true", access tokens are used as capabilities for accessing datanodes.
       If "false", no access tokens are checked on accessing datanodes.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <!-- cluster variant -->
@@ -381,7 +380,7 @@
     <value-attributes>
       <type>int</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.namenode.accesstime.precision</name>
@@ -394,13 +393,13 @@
     <value-attributes>
       <type>int</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.cluster.administrators</name>
     <value> hdfs</value>
     <description>ACL for who all can view the default servlets in the HDFS</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.namenode.avoid.read.stale.datanode</name>
@@ -410,7 +409,7 @@
       heartbeat messages have not been received by the namenode for more than a
       specified time interval.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.namenode.avoid.write.stale.datanode</name>
@@ -420,7 +419,7 @@
       heartbeat messages have not been received by the namenode for more than a
       specified time interval.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.namenode.write.stale.datanode.ratio</name>
@@ -428,33 +427,33 @@
     <description>When the ratio of number stale datanodes to total datanodes marked is greater
       than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.namenode.stale.datanode.interval</name>
     <value>30000</value>
     <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.journalnode.http-address</name>
     <value>0.0.0.0:8480</value>
     <description>The address and port the JournalNode web UI listens on.
       If the port is 0 then the server will start on a free port. </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.journalnode.https-address</name>
     <value>0.0.0.0:8481</value>
     <description>The address and port the JournalNode HTTPS server listens on.
       If the port is 0 then the server will start on a free port. </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.journalnode.edits.dir</name>
     <value>/hadoop/hdfs/journalnode</value>
     <description>The path where the JournalNode daemon will store its local state. </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <!-- HDFS Short-Circuit Local Reads -->
   <property>
@@ -467,7 +466,7 @@
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.domain.socket.path</name>
@@ -476,7 +475,7 @@
       This is a path to a UNIX domain socket that will be used for communication between the DataNode and local HDFS clients.
       If the string "_PORT" is present in this path, it will be replaced by the TCP port of the DataNode.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.client.read.shortcircuit.streams.cache.size</name>
@@ -487,14 +486,14 @@
       more file descriptors, but potentially provide better performance on
       workloads involving lots of seeks.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.namenode.name.dir.restore</name>
     <value>true</value>
     <description>Set to true to enable NameNode to attempt recovering a previously failed dfs.namenode.name.dir.
       When enabled, a recovery of any failed directory is attempted during checkpoint.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.http.policy</name>
@@ -504,7 +503,7 @@
       The following values are supported: - HTTP_ONLY : Service is provided only on http - HTTPS_ONLY :
       Service is provided only on https - HTTP_AND_HTTPS : Service is provided both on http and https
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <!-- These configs were inherited from HDP 2.1 -->
@@ -512,13 +511,13 @@
     <name>dfs.namenode.audit.log.async</name>
     <value>true</value>
     <description>Whether to enable async auditlog</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.namenode.fslock.fair</name>
     <value>false</value>
     <description>Whether fsLock is fair</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <!-- These configs were inherited from HDP 2.2 -->
@@ -533,19 +532,19 @@
       to notice large number of pending deletion blocks and take corrective
       action.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.client.retry.policy.enabled</name>
     <value>false</value>
     <description>Enables HDFS client retry in the event of a NameNode failure.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.content-summary.limit</name>
     <value>5000</value>
     <description>Dfs content summary limit.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.encryption.key.provider.uri</name>
@@ -592,7 +591,7 @@
     <value-attributes>
       <type>directory</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>nfs.exports.allowed.hosts</name>
@@ -606,7 +605,7 @@
       by &quot;;&quot;. For example: &quot;192.168.0.0/22 rw ; host*.example.com ; host1.test.org ro;&quot;.
     </description>
     <display-name>Allowed hosts</display-name>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.encrypt.data.transfer.cipher.suites</name>
@@ -617,7 +616,7 @@
       If not defined, then only the algorithm specified in dfs.encrypt.data.transfer.algorithm
       is used. By default, the property is not defined.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>dfs.namenode.inode.attributes.provider.class</name>
@@ -631,6 +630,6 @@
     <value-attributes>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43323f99/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ssl-client.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ssl-client.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ssl-client.xml
index 6ec064a..7e0f265 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ssl-client.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ssl-client.xml
@@ -21,13 +21,13 @@
     <name>ssl.client.truststore.location</name>
     <value>/etc/security/clientKeys/all.jks</value>
     <description>Location of the trust store file.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>ssl.client.truststore.type</name>
     <value>jks</value>
     <description>Optional. Default value is "jks".</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>ssl.client.truststore.password</name>
@@ -37,25 +37,25 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>ssl.client.truststore.reload.interval</name>
     <value>10000</value>
     <description>Truststore reload interval, in milliseconds.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>ssl.client.keystore.type</name>
     <value>jks</value>
     <description>Optional. Default value is "jks".</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>ssl.client.keystore.location</name>
     <value>/etc/security/clientKeys/keystore.jks</value>
     <description>Location of the keystore file.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>ssl.client.keystore.password</name>
@@ -65,6 +65,6 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43323f99/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ssl-server.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ssl-server.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ssl-server.xml
index 5d2745f..2177cef 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ssl-server.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/ssl-server.xml
@@ -21,13 +21,13 @@
     <name>ssl.server.truststore.location</name>
     <value>/etc/security/serverKeys/all.jks</value>
     <description>Location of the trust store file.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>ssl.server.truststore.type</name>
     <value>jks</value>
     <description>Optional. Default value is "jks".</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>ssl.server.truststore.password</name>
@@ -37,25 +37,25 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>ssl.server.truststore.reload.interval</name>
     <value>10000</value>
     <description>Truststore reload interval, in milliseconds.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>ssl.server.keystore.type</name>
     <value>jks</value>
     <description>Optional. Default value is "jks".</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>ssl.server.keystore.location</name>
     <value>/etc/security/serverKeys/keystore.jks</value>
     <description>Location of the keystore file.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>ssl.server.keystore.password</name>
@@ -65,7 +65,7 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>ssl.server.keystore.keypassword</name>
@@ -75,6 +75,6 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43323f99/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration-mapred/mapred-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration-mapred/mapred-env.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration-mapred/mapred-env.xml
index 2ac0bff..07cfafe 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration-mapred/mapred-env.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration-mapred/mapred-env.xml
@@ -29,7 +29,7 @@
       <type>directory</type>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapred_pid_dir_prefix</name>
@@ -41,7 +41,7 @@
       <overridable>false</overridable>
       <editable-only-at-install>true</editable-only-at-install>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapred_user</name>
@@ -53,7 +53,7 @@
       <type>user</type>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>jobhistory_heapsize</name>
@@ -64,19 +64,19 @@
       <unit>MB</unit>
       <type>int</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapred_user_nofile_limit</name>
     <value>32768</value>
     <description>Max open files limit setting for MAPREDUCE user.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapred_user_nproc_limit</name>
     <value>65536</value>
     <description>Max number of processes limit setting for MAPREDUCE user.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>content</name>
@@ -99,6 +99,6 @@
     <value-attributes>
       <type>content</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43323f99/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration-mapred/mapred-logsearch-conf.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration-mapred/mapred-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration-mapred/mapred-logsearch-conf.xml
index 3c0abbf..35970cd 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration-mapred/mapred-logsearch-conf.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration-mapred/mapred-logsearch-conf.xml
@@ -25,14 +25,14 @@
     <display-name>Service name</display-name>
     <description>Service name for Logsearch Portal (label)</description>
     <value>MapReduce</value>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>component_mappings</name>
     <display-name>Component mapping</display-name>
     <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
     <value>HISTORYSERVER:mapred_historyserver</value>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>content</name>
@@ -75,6 +75,6 @@
       <type>content</type>
       <show-property-name>false</show-property-name>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43323f99/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration-mapred/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration-mapred/mapred-site.xml
index e51107a..705763f 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration-mapred/mapred-site.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration-mapred/mapred-site.xml
@@ -40,7 +40,7 @@
         <name>mapreduce.map.memory.mb</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.map.sort.spill.percent</name>
@@ -52,7 +52,7 @@
       is already in progress, so spills may be larger than this threshold when
       it is set to less than .5
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.task.io.sort.factor</name>
@@ -61,7 +61,7 @@
       The number of streams to merge at once while sorting files.
       This determines the number of open file handles.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <!-- map/reduce properties -->
   <property>
@@ -70,7 +70,7 @@
     <description>
       Administrators for MapReduce applications.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.reduce.shuffle.parallelcopies</name>
@@ -79,7 +79,7 @@
       The default number of parallel transfers run by reduce during
       the copy(shuffle) phase.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.map.speculative</name>
@@ -88,7 +88,7 @@
       If true, then multiple instances of some map tasks
       may be executed in parallel.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.reduce.speculative</name>
@@ -97,7 +97,7 @@
       If true, then multiple instances of some reduce tasks may be
       executed in parallel.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.job.reduce.slowstart.completedmaps</name>
@@ -106,7 +106,7 @@
       Fraction of the number of maps in the job which should be complete before
       reduces are scheduled for the job.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.job.counters.max</name>
@@ -114,7 +114,7 @@
     <description>
       Limit on the number of counters allowed per job.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.reduce.shuffle.merge.percent</name>
@@ -125,7 +125,7 @@
       storing in-memory map outputs, as defined by
       mapreduce.reduce.shuffle.input.buffer.percent.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
@@ -134,7 +134,7 @@
       The percentage of memory to be allocated from the maximum heap
       size to storing map outputs during the shuffle.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.output.fileoutputformat.compress.type</name>
@@ -143,7 +143,7 @@
       If the job outputs are to compressed as SequenceFiles, how should
       they be compressed? Should be one of NONE, RECORD or BLOCK.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.reduce.input.buffer.percent</name>
@@ -154,7 +154,7 @@
       remaining map outputs in memory must consume less than this threshold before
       the reduce can begin.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <!-- copied from kryptonite configuration -->
   <property>
@@ -163,7 +163,7 @@
     <description>
       Should the outputs of the maps be compressed before being sent across the network. Uses SequenceFile compression.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.task.timeout</name>
@@ -173,7 +173,7 @@
       terminated if it neither reads an input, writes an output, nor
       updates its status string.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.map.memory.mb</name>
@@ -197,7 +197,7 @@
         <name>yarn.scheduler.minimum-allocation-mb</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.reduce.memory.mb</name>
@@ -221,7 +221,7 @@
         <name>yarn.scheduler.minimum-allocation-mb</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.shuffle.port</name>
@@ -231,7 +231,7 @@
       ShuffleHandler is a service run at the NodeManager to facilitate
       transfers of intermediate Map outputs to requesting Reducers.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.jobhistory.intermediate-done-dir</name>
@@ -239,7 +239,7 @@
     <description>
       Directory where history files are written by MapReduce jobs.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.jobhistory.done-dir</name>
@@ -248,7 +248,7 @@
       Directory where history files are managed by the MR JobHistory Server.
     </description>
     <property-type>NOT_MANAGED_HDFS_PATH</property-type>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.jobhistory.address</name>
@@ -269,7 +269,7 @@
       The runtime framework for executing MapReduce jobs. Can be one of local,
       classic or yarn.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.app.mapreduce.am.staging-dir</name>
@@ -277,7 +277,7 @@
     <description>
       The staging dir used while submitting jobs.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.app.mapreduce.am.resource.mb</name>
@@ -301,7 +301,7 @@
         <name>yarn.scheduler.minimum-allocation-mb</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.app.mapreduce.am.command-opts</name>
@@ -326,7 +326,7 @@
         <name>yarn.app.mapreduce.am.resource.mb</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.app.mapreduce.am.admin-command-opts</name>
@@ -348,25 +348,25 @@
         <name>yarn.app.mapreduce.am.resource.mb</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.app.mapreduce.am.log.level</name>
     <value>INFO</value>
     <description>MR App Master process log level.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.admin.map.child.java.opts</name>
     <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
     <description>This property stores Java options for map tasks.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.admin.reduce.child.java.opts</name>
     <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
     <description>This property stores Java options for reduce tasks.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.application.classpath</name>
@@ -375,7 +375,7 @@
       CLASSPATH for MR applications. A comma-separated list of CLASSPATH
       entries.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.am.max-attempts</name>
@@ -386,7 +386,7 @@
       set by resourcemanager. Otherwise, it will be override. The default number is
       set to 2, to allow at least one retry for AM.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.map.java.opts</name>
@@ -401,7 +401,7 @@
         <name>mapreduce.map.memory.mb</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.reduce.java.opts</name>
@@ -416,7 +416,7 @@
         <name>mapreduce.reduce.memory.mb</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.map.log.level</name>
@@ -425,7 +425,7 @@
       The logging level for the map task. The allowed levels are:
       OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.reduce.log.level</name>
@@ -434,7 +434,7 @@
       The logging level for the reduce task. The allowed levels are:
       OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.admin.user.env</name>
@@ -444,7 +444,7 @@
       This is not an additive property. You must preserve the original value if
       you want your map and reduce tasks to have access to native libraries (compression, etc)
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.output.fileoutputformat.compress</name>
@@ -452,7 +452,7 @@
     <description>
       Should the job outputs be compressed?
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.jobhistory.http.policy</name>
@@ -462,7 +462,7 @@
       The following values are supported: - HTTP_ONLY : Service is provided only
       on http - HTTPS_ONLY : Service is provided only on https
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.job.queuename</name>
@@ -484,31 +484,31 @@
     <name>mapreduce.reduce.shuffle.fetch.retry.enabled</name>
     <value>1</value>
     <description/>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.reduce.shuffle.fetch.retry.interval-ms</name>
     <value>1000</value>
     <description/>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.reduce.shuffle.fetch.retry.timeout-ms</name>
     <value>30000</value>
     <description/>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.job.emit-timeline-data</name>
     <value>false</value>
     <description/>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.jobhistory.bind-host</name>
     <value>0.0.0.0</value>
     <description/>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <!-- These configs were inherited from HDP 2.3 -->
@@ -519,7 +519,7 @@
       server state upon startup.  If enabled then
       mapreduce.jobhistory.recovery.store.class must be specified.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.jobhistory.recovery.store.class</name>
@@ -527,7 +527,7 @@
     <description>The HistoryServerStateStoreService class to store history server
       state for recovery.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>mapreduce.jobhistory.recovery.store.leveldb.path</name>
@@ -535,6 +535,6 @@
     <description>The URI where history server state will be stored if HistoryServerLeveldbSystemStateStoreService
       is configured as the recovery storage class.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43323f99/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-env.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-env.xml
index 3bf5bcb..5fb4732 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-env.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-env.xml
@@ -29,7 +29,7 @@
       <type>directory</type>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn_pid_dir_prefix</name>
@@ -41,7 +41,7 @@
       <overridable>false</overridable>
       <editable-only-at-install>true</editable-only-at-install>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn_user</name>
@@ -53,7 +53,7 @@
       <type>user</type>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn_heapsize</name>
@@ -64,7 +64,7 @@
       <type>int</type>
       <unit>MB</unit>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>resourcemanager_heapsize</name>
@@ -76,7 +76,7 @@
       <overridable>false</overridable>
       <unit>MB</unit>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>nodemanager_heapsize</name>
@@ -87,7 +87,7 @@
       <type>int</type>
       <unit>MB</unit>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>min_user_id</name>
@@ -97,25 +97,25 @@
     <value-attributes>
       <type>int</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>is_supported_yarn_ranger</name>
     <value>true</value>
     <description>Set to false by default,  needs to be set to true in stacks that use Ranger Yarn Plugin</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn_user_nofile_limit</name>
     <value>32768</value>
     <description>Max open files limit setting for YARN user.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn_user_nproc_limit</name>
     <value>65536</value>
     <description>Max number of processes limit setting for YARN user.</description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <!-- These properties were inherited from HDP 2.1 -->
@@ -129,7 +129,7 @@
       <unit>MB</unit>
       <type>int</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <!-- These properties were inherited from HDP 2.2 -->
@@ -152,7 +152,7 @@
       </entries>
       <selection-cardinality>1</selection-cardinality>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <!-- yarn-env.sh -->
@@ -290,7 +290,7 @@
     <value-attributes>
       <type>content</type>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>service_check.queue.name</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43323f99/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-log4j.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-log4j.xml
index 8d205f8..a200e74 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-log4j.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-log4j.xml
@@ -98,6 +98,6 @@
       <type>content</type>
       <show-property-name>false</show-property-name>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43323f99/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-logsearch-conf.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-logsearch-conf.xml
index 95cf0c9..a02e3d3 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-logsearch-conf.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-logsearch-conf.xml
@@ -25,14 +25,14 @@
     <display-name>Service name</display-name>
     <description>Service name for Logsearch Portal (label)</description>
     <value>YARN</value>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>component_mappings</name>
     <display-name>Component mapping</display-name>
     <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
     <value>RESOURCEMANAGER:yarn_resourcemanager,yarn_historyserver,yarn_jobsummary;NODEMANAGER:yarn_nodemanager;APP_TIMELINE_SERVER:yarn_timelineserver</value>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>content</name>
@@ -99,6 +99,6 @@
       <type>content</type>
       <show-property-name>false</show-property-name>
     </value-attributes>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
 </configuration>