You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ad...@apache.org on 2017/07/11 15:25:44 UTC

[09/10] ambari git commit: AMBARI-21431. Update BigInsight configuration files to be compliant with XSD

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d6fcfa1/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/core-site.xml
index f833896..34836ce 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/core-site.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/core-site.xml
@@ -31,6 +31,7 @@
   The size of this buffer should probably be a multiple of hardware
   page size (4096 on Intel x86), and it determines how much data is
   buffered during read and write operations.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -38,6 +39,7 @@
     <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
     <description> A list of comma-delimited serialization classes that can be used for obtaining serializers and deserializers.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -45,6 +47,7 @@
     <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
     <description>A list of the compression codec classes that can be used
                  for compression/decompression.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
 <!-- file system properties -->
@@ -56,6 +59,7 @@
     <description>The name of the default file system.  Either the
   literal string "local" or a host:port for NDFS.</description>
     <final>true</final>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <property>
@@ -64,6 +68,7 @@
     <description>Number of minutes between trash checkpoints.
   If zero, the trash feature is disabled.
   </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <!-- ipc properties: copied from kryptonite configuration -->
@@ -73,6 +78,7 @@
     <description>Defines the threshold number of connections after which
                connections will be inspected for idleness.
   </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -81,12 +87,14 @@
     <description>The maximum time after which a client will bring down the
                connection to the server.
   </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>ipc.client.connect.max.retries</name>
     <value>50</value>
     <description>Defines the maximum number of retries for IPC connections.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -98,6 +106,7 @@
       decrease latency
       with a cost of more/smaller packets.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <!-- Web Interface Configuration -->
@@ -109,6 +118,7 @@
                 not be exposed to public. Enable this option if the interfaces
                 are only reachable by those who have the right authorization.
   </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
  <property>
@@ -118,6 +128,7 @@
    Set the authentication for the cluster. Valid values are: simple or
    kerberos.
    </description>
+    <on-ambari-upgrade add="true"/>
  </property>
 <property>
   <name>hadoop.security.authorization</name>
@@ -125,6 +136,7 @@
   <description>
      Enable authorization for different protocols.
   </description>
+    <on-ambari-upgrade add="true"/>
 </property>
 
   <property>
@@ -175,6 +187,7 @@ If you want to treat all principals from APACHE.ORG with /admin as "admin", your
 RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
 DEFAULT
     </description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>multiLine</type>
     </value-attributes>
@@ -185,5 +198,6 @@ DEFAULT
     <description>
       Location of topology script used by Hadoop to determine the rack location of nodes.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d6fcfa1/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hadoop-env.xml
index ba575b0..c79ac73 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hadoop-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hadoop-env.xml
@@ -26,6 +26,7 @@
     <display-name>Hadoop Log Dir Prefix</display-name>
     <value>/var/log/hadoop</value>
     <description>Hadoop Log Dir Prefix</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>directory</type>
       <overridable>false</overridable>
@@ -37,6 +38,7 @@
     <display-name>Hadoop PID Dir Prefix</display-name>
     <value>/var/run/hadoop</value>
     <description>Hadoop PID Dir Prefix</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>directory</type>
       <overridable>false</overridable>
@@ -48,6 +50,7 @@
     <value>INFO,RFA</value>
     <description>Hadoop Root Logger</description>
     <display-name>Hadoop Root Logger</display-name>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <overridable>false</overridable>
     </value-attributes>
@@ -57,6 +60,7 @@
     <display-name>Hadoop maximum Java heap size</display-name>
     <value>1024</value>
     <description>Hadoop maximum Java heap size</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
       <unit>MB</unit>
@@ -68,6 +72,7 @@
     <value>1024</value>
     <description>NameNode Java heap size</description>
     <display-name>NameNode Java heap size</display-name>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
       <minimum>0</minimum>
@@ -81,6 +86,7 @@
     <value>200</value>
     <description>NameNode new generation size</description>
     <display-name>NameNode new generation size</display-name>
+    <on-ambari-upgrade add="true"/>
     <depends-on>
       <property>
         <type>hadoop-env</type>
@@ -101,6 +107,7 @@
     <value>200</value>
     <description>NameNode maximum new generation size</description>
     <display-name>NameNode maximum new generation size</display-name>
+    <on-ambari-upgrade add="true"/>
     <depends-on>
       <property>
         <type>hadoop-env</type>
@@ -121,6 +128,7 @@
     <value>128</value>
     <description>NameNode permanent generation size</description>
     <display-name>NameNode permanent generation size</display-name>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
       <minimum>0</minimum>
@@ -135,6 +143,7 @@
     <value>256</value>
     <description>NameNode maximum permanent generation size</description>
     <display-name>NameNode maximum permanent generation size</display-name>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
       <minimum>0</minimum>
@@ -149,6 +158,7 @@
     <value>1024</value>
     <description>DataNode maximum Java heap size</description>
     <display-name>DataNode maximum Java heap size</display-name>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
       <minimum>0</minimum>
@@ -160,12 +170,14 @@
   <property>
     <name>proxyuser_group</name>
     <value>users</value>
+    <on-ambari-upgrade add="true"/>
     <property-type>GROUP</property-type>
     <description>Proxy user group.</description>
   </property>
   <property>
     <name>hdfs_user</name>
     <value>hdfs</value>
+    <on-ambari-upgrade add="true"/>
     <property-type>USER</property-type>
     <description>User to run HDFS as</description>
   </property>
@@ -178,6 +190,7 @@
       if a path was previously mounted on a drive.
     </description>
     <display-name>File that stores mount point for each data dir</display-name>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>directory</type>
       <visible>true</visible>
@@ -303,6 +316,7 @@ export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/iop/current/hadoop-client/lib
 #Hadoop logging options. Modify and uncomment to change logging level
 #export HADOOP_ROOT_LOGGER={{hadoop_root_logger}}
     </value>
+    <on-ambari-upgrade add="true"/>
   </property>
 
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d6fcfa1/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hadoop-policy.xml
index 41bde16..04ccddd 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hadoop-policy.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hadoop-policy.xml
@@ -29,6 +29,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -39,6 +40,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -49,6 +51,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -59,6 +62,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -69,6 +73,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -79,6 +84,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -89,6 +95,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -99,6 +106,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
  <property>
@@ -108,6 +116,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -118,6 +127,7 @@
     group names. The user and group list is separated by a blank. For
     e.g. "alice,bob users,wheel".  A special value of "*" means all
     users are allowed.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
 <property>
@@ -128,6 +138,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d6fcfa1/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hdfs-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hdfs-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hdfs-log4j.xml
index 08822eb..d874772 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hdfs-log4j.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hdfs-log4j.xml
@@ -196,6 +196,7 @@ log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
 # Removes "deprecated" messages
 log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
     </value>
+    <on-ambari-upgrade add="true"/>
   </property>
 
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d6fcfa1/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hdfs-site.xml
index fc510fa..f5b1255 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hdfs-site.xml
@@ -34,6 +34,7 @@
       directories, for redundancy. </description>
     <display-name>NameNode directories</display-name>
     <final>true</final>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>directories</type>
       <overridable>false</overridable>
@@ -45,6 +46,7 @@
     <value>true</value>
     <description>to enable dfs append</description>
     <final>true</final>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -53,6 +55,7 @@
     <display-name>WebHDFS enabled</display-name>
     <description>Whether to enable WebHDFS feature</description>
     <final>true</final>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>boolean</type>
       <overridable>false</overridable>
@@ -65,6 +68,7 @@
     <description> Number of failed disks a DataNode would tolerate before it stops offering service</description>
     <final>true</final>
     <display-name>DataNode failed disk tolerance</display-name>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
       <minimum>0</minimum>
@@ -90,6 +94,7 @@
       Directories that do not exist are ignored.
     </description>
     <final>true</final>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>directories</type>
     </value-attributes>
@@ -102,6 +107,7 @@
       not permitted to connect to the namenode.  The full pathname of the
       file must be specified.  If the value is empty, no hosts are
       excluded.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <!--
@@ -112,6 +118,7 @@
       permitted to connect to the namenode. The full pathname of the file
       must be specified.  If the value is empty, all hosts are
       permitted.</description>
+      <on-ambari-upgrade add="true"/>
     </property>
   -->
 
@@ -124,6 +131,7 @@
       If this is a comma-delimited list of directories then the image is
       replicated in all of the directories for redundancy.
     </description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>directories</type>
       <overridable>false</overridable>
@@ -139,6 +147,7 @@
       replicated in all of the directoires for redundancy.
       Default value is same as dfs.namenode.checkpoint.dir
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
 
@@ -147,6 +156,7 @@
     <value>21600</value>
     <display-name>HDFS Maximum Checkpoint Delay</display-name>
     <description>The number of seconds between two periodic checkpoints.</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
       <unit>seconds</unit>
@@ -160,6 +170,7 @@
       of the namespace every 'dfs.namenode.checkpoint.txns' transactions,
       regardless of whether 'dfs.namenode.checkpoint.period' has expired.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -167,6 +178,7 @@
     <value>50</value>
     <description>Maximal block replication.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -174,6 +186,7 @@
     <value>3</value>
     <description>Default block replication.</description>
     <display-name>Block replication</display-name>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
     </value-attributes>
@@ -183,12 +196,14 @@
     <name>dfs.heartbeat.interval</name>
     <value>3</value>
     <description>Determines datanode heartbeat interval in seconds.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>dfs.heartbeat.interval</name>
     <value>3</value>
     <description>Determines datanode heartbeat interval in seconds.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -201,6 +216,7 @@
       Values greater than 1 will make safe mode permanent.
     </description>
     <display-name>Minimum replicated blocks %</display-name>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>float</type>
       <minimum>0.990</minimum>
@@ -217,6 +233,7 @@
       can utilize for the balancing purpose in term of
       the number of bytes per second.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -225,6 +242,7 @@
     <description>
       This property is used by HftpFileSystem.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -233,6 +251,7 @@
     <description>
       The datanode server address and port for data transfer.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -241,6 +260,7 @@
     <description>
       The datanode http server address and port.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -249,12 +269,14 @@
     <description>
       The datanode https server address and port.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>dfs.blocksize</name>
     <value>134217728</value>
     <description>The default block size for new files.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -263,6 +285,7 @@
     <description>The name of the default file system.  Either the
       literal string "local" or a host:port for NDFS.</description>
     <final>true</final>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <property>
@@ -271,6 +294,7 @@
     <value>1073741824</value>
     <display-name>Reserved space for HDFS</display-name>
     <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
       <unit>bytes</unit>
@@ -284,12 +308,14 @@
       The datanode ipc server address and port.
       If the port is 0 then the server will start on a free port.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>dfs.blockreport.initialDelay</name>
     <value>120</value>
     <description>Delay for first block report in seconds.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -297,6 +323,7 @@
     <value>8192</value>
     <description>Specifies the maximum number of threads to use for transferring data in and out of the datanode.</description>
     <display-name>DataNode max data transfer threads</display-name>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
       <minimum>0</minimum>
@@ -312,6 +339,7 @@
     <description>
       The octal umask used when creating files and directories.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -324,12 +352,14 @@
       Switching from one parameter value to the other does not change the mode,
       owner or group of files or directories.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>dfs.permissions.superusergroup</name>
     <value>hdfs</value>
     <description>The name of the group of super-users.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -337,6 +367,7 @@
     <value>64</value>
     <description>Added to grow Queue size so that more client connections are allowed</description>
     <display-name>NameNode Server threads</display-name>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
       <minimum>1</minimum>
@@ -351,6 +382,7 @@
       If "true", access tokens are used as capabilities for accessing datanodes.
       If "false", no access tokens are checked on accessing datanodes.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -359,6 +391,7 @@
     <description>
       Kerberos principal name for the NameNode
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -367,6 +400,7 @@
     <description>
       Kerberos principal name for the secondary NameNode.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
 
@@ -378,6 +412,7 @@
     <value>HTTP/_HOST@EXAMPLE.COM</value>
     <description>The Kerberos principal for the host that the NameNode runs on.</description>
 
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -385,6 +420,7 @@
     <value>HTTP/_HOST@EXAMPLE.COM</value>
     <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
 
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -392,6 +428,7 @@
     <name>dfs.namenode.secondary.http-address</name>
     <value>localhost:50090</value>
     <description>Address of secondary namenode web server</description>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <property>
@@ -402,6 +439,7 @@
       The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
       HTTP SPENGO specification.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -411,6 +449,7 @@
       The Kerberos keytab file with the credentials for the
       HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -419,6 +458,7 @@
     <description>
       The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -427,6 +467,7 @@
     <description>
       Combined keytab file containing the namenode service and host principals.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -435,6 +476,7 @@
     <description>
       Combined keytab file containing the namenode service and host principals.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -443,13 +485,14 @@
     <description>
       The filename of the keytab file for the DataNode.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>dfs.namenode.https-address</name>
     <value>localhost:50470</value>
     <description>The https address where namenode binds</description>
-
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <property>
@@ -460,6 +503,7 @@
       directories. The datanode will not come up if the permissions are
       different on existing dfs.datanode.data.dir directories. If the directories
       don't exist, they will be created with this permission.</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
     </value-attributes>
@@ -473,6 +517,7 @@
       The default value is 1 hour. Setting a value of 0 disables
       access times for HDFS.
     </description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>int</type>
     </value-attributes>
@@ -482,6 +527,7 @@
     <name>dfs.cluster.administrators</name>
     <value> hdfs</value>
     <description>ACL for who all can view the default servlets in the HDFS</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <visible>true</visible>
     </value-attributes>
@@ -495,6 +541,7 @@
       heartbeat messages have not been received by the namenode for more than a
       specified time interval.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.namenode.avoid.write.stale.datanode</name>
@@ -504,6 +551,7 @@
       heartbeat messages have not been received by the namenode for more than a
       specified time interval.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.namenode.write.stale.datanode.ratio</name>
@@ -511,11 +559,13 @@
     <description>When the ratio of number stale datanodes to total datanodes marked is greater
       than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>dfs.namenode.stale.datanode.interval</name>
     <value>30000</value>
     <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -523,12 +573,14 @@
     <value>0.0.0.0:8480</value>
     <description>The address and port the JournalNode web UI listens on.
       If the port is 0 then the server will start on a free port. </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>dfs.journalnode.edits.dir</name>
     <value>/grid/0/hdfs/journal</value>
     <description>The path where the JournalNode daemon will store its local state. </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <!-- HDFS Short-Circuit Local Reads -->
@@ -538,6 +590,7 @@
     <value>true</value>
     <description>This configuration parameter turns on short-circuit local reads.</description>
     <display-name>HDFS Short-circuit read</display-name>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
@@ -550,6 +603,7 @@
       This is a path to a UNIX domain socket that will be used for communication between the DataNode and local HDFS clients.
       If the string "_PORT" is present in this path, it will be replaced by the TCP port of the DataNode.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -561,6 +615,7 @@
       more file descriptors, but potentially provide better performance on
       workloads involving lots of seeks.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -568,39 +623,47 @@
     <value>true</value>
     <description>Set to true to enable NameNode to attempt recovering a previously failed dfs.namenode.name.dir.
       When enabled, a recovery of any failed directory is attempted during checkpoint.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>dfs.datanode.handler.count</name>
     <value>40</value>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>dfs.namenode.acls.enabled</name>
     <value>true</value>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>dfs.client.file-block-storage-locations.timeout.millis</name>
     <value>3000</value>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>dfs.client.mmap.enabled</name>
     <value>true</value>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>dfs.datanode.max.locked.memory</name>
     <value>0</value>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>dfs.datanode.hdfs-blocks-metadata.enabled</name>
     <value>true</value>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
      <name>dfs.http.policy</name>
      <value>HTTP_ONLY</value>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d6fcfa1/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/ssl-client.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/ssl-client.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/ssl-client.xml
index 4513fdd..01d8108 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/ssl-client.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/ssl-client.xml
@@ -22,15 +22,18 @@
         <name>ssl.client.truststore.location</name>
         <value>/etc/security/clientKeys/all.jks</value>
         <description>Location of the trust store file.</description>
+      <on-ambari-upgrade add="true"/>
     </property>
     <property>
         <name>ssl.client.truststore.type</name>
         <value>jks</value>
         <description>Optional. Default value is "jks".</description>
+      <on-ambari-upgrade add="true"/>
     </property>
     <property>
         <name>ssl.client.truststore.password</name>
         <value>bigdata</value>
+        <on-ambari-upgrade add="true"/>
         <property-type>PASSWORD</property-type>
         <description>Password to open the trust store file.</description>
     </property>
@@ -38,20 +41,24 @@
         <name>ssl.client.truststore.reload.interval</name>
         <value>10000</value>
         <description>Truststore reload interval, in milliseconds.</description>
+      <on-ambari-upgrade add="true"/>
     </property>
     <property>
         <name>ssl.client.keystore.type</name>
         <value>jks</value>
         <description>Optional. Default value is "jks".</description>
+      <on-ambari-upgrade add="true"/>
     </property>
     <property>
         <name>ssl.client.keystore.location</name>
         <value>/etc/security/clientKeys/keystore.jks</value>
         <description>Location of the keystore file.</description>
+      <on-ambari-upgrade add="true"/>
     </property>
     <property>
         <name>ssl.client.keystore.password</name>
         <value>bigdata</value>
+        <on-ambari-upgrade add="true"/>
         <property-type>PASSWORD</property-type>
         <description>Password to open the keystore file.</description>
     </property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d6fcfa1/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/ssl-server.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/ssl-server.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/ssl-server.xml
index f95793e..8e41f19 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/ssl-server.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/ssl-server.xml
@@ -22,15 +22,18 @@
         <name>ssl.server.truststore.location</name>
         <value>/etc/security/serverKeys/all.jks</value>
         <description>Location of the trust store file.</description>
+      <on-ambari-upgrade add="true"/>
     </property>
     <property>
         <name>ssl.server.truststore.type</name>
         <value>jks</value>
         <description>Optional. Default value is "jks".</description>
+      <on-ambari-upgrade add="true"/>
     </property>
     <property>
         <name>ssl.server.truststore.password</name>
         <value>bigdata</value>
+        <on-ambari-upgrade add="true"/>
         <property-type>PASSWORD</property-type>
         <description>Password to open the trust store file.</description>
     </property>
@@ -38,26 +41,31 @@
         <name>ssl.server.truststore.reload.interval</name>
         <value>10000</value>
         <description>Truststore reload interval, in milliseconds.</description>
+      <on-ambari-upgrade add="true"/>
     </property>
     <property>
         <name>ssl.server.keystore.type</name>
         <value>jks</value>
         <description>Optional. Default value is "jks".</description>
+      <on-ambari-upgrade add="true"/>
     </property>
     <property>
         <name>ssl.server.keystore.location</name>
         <value>/etc/security/serverKeys/keystore.jks</value>
         <description>Location of the keystore file.</description>
+      <on-ambari-upgrade add="true"/>
     </property>
     <property>
         <name>ssl.server.keystore.password</name>
         <value>bigdata</value>
+        <on-ambari-upgrade add="true"/>
         <property-type>PASSWORD</property-type>
         <description>Password to open the keystore file.</description>
     </property>
     <property>
         <name>ssl.server.keystore.keypassword</name>
         <value>bigdata</value>
+        <on-ambari-upgrade add="true"/>
         <property-type>PASSWORD</property-type>
         <description>Password for private key in keystore file.</description>
     </property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d6fcfa1/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hcat-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hcat-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hcat-env.xml
index df3f949..5088aee 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hcat-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hcat-env.xml
@@ -52,6 +52,7 @@
       USER={{hcat_user}}
       METASTORE_PORT={{hive_metastore_port}}
     </value>
+    <on-ambari-upgrade add="true"/>
   </property>
 
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d6fcfa1/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-env.xml
index 1f8e64b..666be68 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-env.xml
@@ -25,24 +25,28 @@
     <name>hive.heapsize</name>
     <value>1024</value>
     <description>Hive Java heap size</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.client.heapsize</name>
     <value>512</value>
     <description>Hive Client Java heap size</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.metastore.heapsize</name>
     <value>1024</value>
     <description>Hive Metastore Java heap size</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive_database_type</name>
     <value>mysql</value>
     <description>Default HIVE DB type.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive_database</name>
@@ -51,6 +55,7 @@
     <description>
       Property that determines whether the HIVE DB is managed by Ambari.
     </description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <overridable>false</overridable>
     </value-attributes>
@@ -60,22 +65,26 @@
     <display-name>Database Type</display-name>
     <value>MySQL</value>
     <description>Database type.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive_database_name</name>
     <value>hive</value>
     <description>Database name.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive_dbroot</name>
     <value>/usr/lib/hive/lib/</value>
     <description>Hive DB Directory.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive_log_dir</name>
     <display-name>Hive Log Dir</display-name>
     <value>/var/log/hive</value>
     <description>Directory for Hive Log files.</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>directory</type>
       <overridable>false</overridable>
@@ -87,6 +96,7 @@
     <display-name>Hive PID Dir</display-name>
     <value>/var/run/hive</value>
     <description>Hive PID Dir.</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>directory</type>
       <overridable>false</overridable>
@@ -96,6 +106,7 @@
   <property>
     <name>hive_user</name>
     <value>hive</value>
+    <on-ambari-upgrade add="true"/>
     <property-type>USER</property-type>
     <description>Hive User.</description>
   </property>
@@ -107,6 +118,7 @@
     <display-name>WebHCat Log Dir</display-name>
     <value>/var/log/webhcat</value>
     <description>WebHCat Log Dir.</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>directory</type>
       <overridable>false</overridable>
@@ -118,6 +130,7 @@
     <display-name>WebHCat Pid Dir</display-name>
     <value>/var/run/webhcat</value>
     <description>WebHCat Pid Dir.</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>directory</type>
       <overridable>false</overridable>
@@ -127,12 +140,14 @@
   <property>
     <name>hcat_user</name>
     <value>hcat</value>
+    <on-ambari-upgrade add="true"/>
     <property-type>USER</property-type>
     <description>HCat User.</description>
   </property>
   <property>
     <name>webhcat_user</name>
     <value>hcat</value>
+    <on-ambari-upgrade add="true"/>
     <property-type>USER</property-type>
     <description>WebHCat User.</description>
   </property>
@@ -190,6 +205,7 @@ ${HIVE_AUX_JARS_PATH}
 
 export METASTORE_PORT={{hive_metastore_port}}
     </value>
+    <on-ambari-upgrade add="true"/>
   </property>
 
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d6fcfa1/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-exec-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-exec-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-exec-log4j.xml
index 3e17d2d..3a70d7c 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-exec-log4j.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-exec-log4j.xml
@@ -113,6 +113,7 @@ log4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,RFA
 log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
 
     </value>
+    <on-ambari-upgrade add="true"/>
   </property>
 
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d6fcfa1/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-log4j.xml
index f7f789b..8fa7a70 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-log4j.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-log4j.xml
@@ -131,6 +131,7 @@ log4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,RFA
 log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
 
     </value>
+    <on-ambari-upgrade add="true"/>
   </property>
 
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d6fcfa1/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-site.xml
index 5f2bc18..39c002e 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-site.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-site.xml
@@ -23,6 +23,7 @@ limitations under the License.
     <name>hive.cbo.enable</name>
     <value>true</value>
     <description>Flag to control enabling Cost Based Optimizations using Calcite framework.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -34,6 +35,7 @@ limitations under the License.
       org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager,
       2. When HiveServer2 supports service discovery via Zookeeper.
     </description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>multiLine</type>
       <empty-value-valid>true</empty-value-valid>
@@ -44,12 +46,14 @@ limitations under the License.
     <name>hive.metastore.connect.retries</name>
     <value>24</value>
     <description>Number of retries while opening a connection to metastore</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.metastore.failure.retries</name>
     <value>24</value>
     <description>Number of retries upon failure of Thrift metastore calls</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -59,6 +63,7 @@ limitations under the License.
       Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
       Number of seconds for the client to wait between consecutive connection attempts
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -68,6 +73,7 @@ limitations under the License.
       Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
       MetaStore Client socket timeout in seconds
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -77,6 +83,7 @@ limitations under the License.
       Size per reducer.The default is 1G, i.e if the input size is 10G, it
       will use 10 reducers.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -86,6 +93,7 @@ limitations under the License.
       The Hive client authorization manager class name. The user defined authorization class should implement
       interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -93,12 +101,14 @@ limitations under the License.
     <value>org.apache.hadoop.hive.thrift.ZooKeeperTokenStore</value>
     <description>The delegation token store implementation.
       Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for load-balanced cluster.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.cluster.delegation.token.store.zookeeper.connectString</name>
     <value>localhost:2181</value>
     <description>The ZooKeeper token store connect string.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -109,6 +119,7 @@ limitations under the License.
       when it is brought up. JDBC/ODBC clients should use the ZooKeeper ensemble: hive.zookeeper.quorum
       in their connection string.
     </description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
@@ -118,24 +129,28 @@ limitations under the License.
     <name>fs.hdfs.impl.disable.cache</name>
     <value>true</value>
     <description>Disable HDFS filesystem cache.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>fs.file.impl.disable.cache</name>
     <value>true</value>
     <description>Disable local filesystem cache.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.exec.scratchdir</name>
     <value>/tmp/hive</value>
     <description>HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/&lt;username&gt; is created, with ${hive.scratch.dir.permission}.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.exec.submitviachild</name>
     <value>false</value>
     <description/>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -146,6 +161,7 @@ limitations under the License.
       separate JVM (true recommended) or not.
       Avoids the overhead of spawning new JVM, but can lead to out-of-memory issues.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -155,6 +171,7 @@ limitations under the License.
       This controls whether the final outputs of a query (to a local/HDFS file or a Hive table) is compressed.
       The compression codec and other options are determined from Hadoop config variables mapred.output.compress*
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -164,12 +181,14 @@ limitations under the License.
       This controls whether intermediate files produced by Hive between multiple map-reduce jobs are compressed.
       The compression codec and other options are determined from Hadoop config variables mapred.output.compress*
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.exec.reducers.bytes.per.reducer</name>
     <value>67108864</value>
     <description>size per reducer.The default is 256Mb, i.e if the input size is 1G, it will use 4 reducers.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -179,6 +198,7 @@ limitations under the License.
       max number of reducers will be used. If the one specified in the configuration parameter mapred.reduce.tasks is
       negative, Hive will use this one as the max number of reducers when automatically determine number of reducers.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -189,6 +209,7 @@ limitations under the License.
       A pre-execution hook is specified as the name of a Java class which implements the
       org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -199,6 +220,7 @@ limitations under the License.
       A post-execution hook is specified as the name of a Java class which implements the
       org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -209,30 +231,35 @@ limitations under the License.
       An on-failure hook is specified as the name of Java class which implements the
       org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.exec.parallel</name>
     <value>false</value>
     <description>Whether to execute jobs in parallel</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.exec.parallel.thread.number</name>
     <value>8</value>
     <description>How many jobs at most can be executed in parallel</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.mapred.reduce.tasks.speculative.execution</name>
     <value>false</value>
     <description>Whether speculative execution for reducers should be turned on. </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.exec.dynamic.partition</name>
     <value>true</value>
     <description>Whether or not to allow dynamic partitions in DML/DDL.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -243,42 +270,49 @@ limitations under the License.
       in case the user accidentally overwrites all partitions.
       NonStrict allows all partitions of a table to be dynamic.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.exec.max.dynamic.partitions</name>
     <value>5000</value>
     <description>Maximum number of dynamic partitions allowed to be created in total.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.exec.max.dynamic.partitions.pernode</name>
     <value>2000</value>
     <description>Maximum number of dynamic partitions allowed to be created in each mapper/reducer node.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.exec.max.created.files</name>
     <value>100000</value>
     <description>Maximum number of HDFS files created by all mappers/reducers in a MapReduce job.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.metastore.warehouse.dir</name>
     <value>/apps/hive/warehouse</value>
     <description>location of default database for the warehouse</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.metastore.uris</name>
     <value>thrift://localhost:9083</value>
     <description>Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore.</description>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <property require-input = "true">
     <name>javax.jdo.option.ConnectionPassword</name>
     <display-name>Database Password</display-name>
     <value></value>
+    <on-ambari-upgrade add="true"/>
     <property-type>PASSWORD</property-type>
     <description>password to use against metastore database</description>
     <value-attributes>
@@ -294,18 +328,21 @@ limitations under the License.
     <value-attributes>
       <overridable>false</overridable>
     </value-attributes>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <property>
     <name>hive.metastore.server.max.threads</name>
     <value>100000</value>
     <description>Maximum number of worker threads in the Thrift server's pool.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.metastore.kerberos.keytab.file</name>
     <value>/etc/security/keytabs/hive.service.keytab</value>
     <description>The path to the Kerberos Keytab file containing the metastore Thrift server's service principal.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -315,18 +352,21 @@ limitations under the License.
       The service principal for the metastore Thrift server.
       The special string _HOST will be replaced automatically with the correct host name.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.cluster.delegation.token.store.zookeeper.znode</name>
     <value>/hive/cluster/delegation</value>
     <description>The root path for token store data.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.metastore.cache.pinobjtypes</name>
     <value>Table,Database,Type,FieldSchema,Order</value>
     <description>List of comma separated metastore object types that should be pinned in the cache</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -336,6 +376,7 @@ limitations under the License.
       whenever databases, tables, and partitions are created, altered, or dropped.
       Set to org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener
       if metastore-side authorization is desired.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -347,6 +388,7 @@ limitations under the License.
       question doesn't have permissions to delete the corresponding directory
       on the storage).
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -354,6 +396,7 @@ limitations under the License.
     <display-name>JDBC Driver Class</display-name>
     <value>com.mysql.jdbc.Driver</value>
     <description>Driver class name for a JDBC metastore</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <overridable>false</overridable>
     </value-attributes>
@@ -364,6 +407,7 @@ limitations under the License.
     <display-name>Database Username</display-name>
     <value>hive</value>
     <description>Username to use against metastore database</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>db_user</type>
       <overridable>false</overridable>
@@ -374,18 +418,21 @@ limitations under the License.
     <name>hive.map.aggr</name>
     <value>true</value>
     <description>Whether to use map-side aggregation in Hive Group By queries</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.smbjoin.cache.rows</name>
     <value>10000</value>
     <description>How many rows with the same key value should be cached in memory per smb joined table.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.map.aggr.hash.percentmemory</name>
     <value>0.5</value>
     <description>Portion of total memory to be used by map-side group aggregation hash table</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -395,6 +442,7 @@ limitations under the License.
       The max memory to be used by map-side group aggregation hash table.
       If the memory usage is higher than this number, force to flush data
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -404,24 +452,28 @@ limitations under the License.
       Hash aggregation will be turned off if the ratio between hash  table size and input rows is bigger than this number.
       Set to 1 to make sure hash aggregation is never turned off.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.merge.mapfiles</name>
     <value>true</value>
     <description>Merge small files at the end of a map-only job</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.merge.mapredfiles</name>
     <value>false</value>
     <description>Merge small files at the end of a map-reduce job</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.merge.size.per.task</name>
     <value>256000000</value>
     <description>Size of merged files at the end of the job</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -432,12 +484,14 @@ limitations under the License.
       map-reduce job to merge the output files into bigger files. This is only done for map-only jobs
       if hive.merge.mapfiles is true, and for map-reduce jobs if hive.merge.mapredfiles is true.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.merge.rcfile.block.level</name>
     <value>true</value>
     <description/>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -449,18 +503,21 @@ limitations under the License.
       for small ORC files. Note that enabling this config will not honor padding tolerance
       config (hive.exec.orc.block.padding.tolerance).
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.exec.orc.default.stripe.size</name>
     <value>67108864</value>
     <description>Define the default ORC stripe size</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.exec.orc.default.compress</name>
     <value>ZLIB</value>
     <description>Define the default compression codec for ORC file</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -471,6 +528,7 @@ limitations under the License.
       Define the compression strategy to use while writing data.
       This changes the compression level of higher level compression codec (like ZLIB).
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -480,18 +538,21 @@ limitations under the License.
       If turned on splits generated by orc will include metadata about the stripes in the file. This
       data is read remotely (from the client or HS2 machine) and sent to all the tasks.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.orc.compute.splits.num.threads</name>
     <value>10</value>
     <description>How many threads orc should use to create splits in parallel.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.auto.convert.join</name>
     <value>true</value>
     <description>Whether Hive enables the optimization about converting common join into mapjoin based on the input file size</description>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.auto.convert.join.noconditionaltask</name>
@@ -501,23 +562,27 @@ limitations under the License.
       If this parameter is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
       specified size, the join is directly converted to a mapjoin (there is no conditional task).
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.limit.optimize.enable</name>
     <value>true</value>
     <description>Whether to enable to optimization to trying a smaller subset of data for simple LIMIT first.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.enforce.bucketing</name>
     <value>true</value>
     <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.enforce.sorting</name>
     <value>true</value>
     <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
 
@@ -525,12 +590,14 @@ limitations under the License.
     <name>hive.enforce.sortmergebucketmapjoin</name>
     <value>true</value>
     <description>If the user asked for sort-merge bucketed map-side join, and it cannot be performed, should the query fail or not ?</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.auto.convert.sortmerge.join</name>
     <value>true</value>
     <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass the criteria for sort-merge join.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -546,22 +613,26 @@ limitations under the License.
       with few files (10 files), the sort-merge join will only use 10 mappers, and a simple map-only join might be faster
       if the complete small table can fit in memory, and a map-join can be performed.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.optimize.constant.propagation</name>
     <value>true</value>
     <description>Whether to enable constant propagation optimizer</description>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.optimize.metadataonly</name>
     <value>true</value>
     <description/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.optimize.null.scan</name>
     <value>true</value>
     <description>Dont scan relations which are guaranteed to not generate any rows</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
 
@@ -571,6 +642,7 @@ limitations under the License.
     <description>If the tables being joined are bucketized on the join columns, and the number of buckets in one table
       is a multiple of the number of buckets in the other table, the buckets can be joined with each other by setting
       this parameter as true.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -580,6 +652,7 @@ limitations under the License.
       Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again.
       This should always be set to true. Since it is a new feature, it has been made configurable.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.optimize.reducededuplication.min.reducer</name>
@@ -589,6 +662,7 @@ limitations under the License.
       That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
       The optimization will be automatically disabled if number of reducers would be less than specified value.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.optimize.sort.dynamic.partition</name>
@@ -598,11 +672,13 @@ limitations under the License.
       This way we can keep only one record writer open for each partition value
       in the reducer thereby reducing the memory pressure on reducers.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.stats.autogather</name>
     <value>true</value>
     <description>A flag to gather statistics automatically during the INSERT OVERWRITE command.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.stats.dbclass</name>
@@ -611,6 +687,7 @@ limitations under the License.
       Expects one of the pattern in [jdbc(:.*), hbase, counter, custom, fs].
       The storage that stores temporary Hive statistics. Currently, jdbc, hbase, counter and custom type are supported.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -624,6 +701,7 @@ limitations under the License.
       from metastore. When this flag is disabled, Hive will make calls to filesystem to get file sizes
       and will estimate the number of rows from row schema.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.stats.fetch.column.stats</name>
@@ -634,24 +712,28 @@ limitations under the License.
       can be expensive when the number of columns is high. This flag can be used to disable fetching
       of column statistics from metastore.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.zookeeper.client.port</name>
     <value>2181</value>
     <description>The port of ZooKeeper servers to talk to. If the list of Zookeeper servers specified in hive.zookeeper.quorum,does not contain port numbers, this value is used.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.zookeeper.namespace</name>
     <value>hive_zookeeper_namespace</value>
     <description>The parent node under which all ZooKeeper nodes are created.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.txn.manager</name>
     <value>org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager</value>
     <description/>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -663,6 +745,7 @@ limitations under the License.
       streaming data into Hive.  But it will also increase the number of
       open transactions at any given time, possibly impacting read performance.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -671,6 +754,7 @@ limitations under the License.
     <description>
       Support concurrency and use locks, needed for Transactions. Requires Zookeeper.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -679,6 +763,7 @@ limitations under the License.
     <description>
       Whether to print the names of the columns in query output.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -689,6 +774,7 @@ limitations under the License.
       Time before a given compaction in working state is declared a failure
       and returned to the initiated state.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.compactor.check.interval</name>
@@ -698,12 +784,14 @@ limitations under the License.
       Time between checks to see if any partitions need compacted.
       This should be kept high because each check for compaction requires many calls against the NameNode.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.compactor.delta.pct.threshold</name>
     <value>0.1f</value>
     <description>Percentage (by size) of base that deltas can be before major compaction is initiated.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.fetch.task.conversion</name>
@@ -717,6 +805,7 @@ limitations under the License.
       1. minimal : SELECT STAR, FILTER on partition columns, LIMIT only
       2. more    : SELECT, FILTER, LIMIT only (support TABLESAMPLE and virtual columns)
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.fetch.task.conversion.threshold</name>
@@ -726,6 +815,7 @@ limitations under the License.
       is calculated by summation of file lengths. If it's not native, storage handler for the table
       can optionally implement org.apache.hadoop.hive.ql.metadata.InputEstimator interface.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -736,12 +826,14 @@ limitations under the License.
       final aggregations in single reduce task. If this is set true, Hive delegates final aggregation
       stage to fetch task, possibly decreasing the query time.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.security.authorization.enabled</name>
     <value>false</value>
     <description>enable or disable the Hive client authorization</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
@@ -754,6 +846,7 @@ limitations under the License.
       hive client authenticator manager class name. The user defined authenticator should implement
       interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -764,12 +857,14 @@ limitations under the License.
       The user defined authorization class should implement interface
       org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.security.metastore.authorization.auth.reads</name>
     <value>true</value>
     <description>If this is true, metastore authorizer authorizes read actions on database, table</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -779,24 +874,28 @@ limitations under the License.
       authenticator manager class name to be used in the metastore for authentication.
       The user defined authenticator should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.server2.logging.operation.enabled</name>
     <value>true</value>
     <description>When true, HS2 will save operation logs</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.server2.logging.operation.log.location</name>
     <value>${java.io.tmpdir}/${user.name}/operation_logs</value>
     <description>Top level directory where operation logs are stored if logging functionality is enabled</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.server2.zookeeper.namespace</name>
     <value>hiveserver2</value>
     <description>The parent node in ZooKeeper used by HiveServer2 when supporting dynamic service discovery.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -806,18 +905,21 @@ limitations under the License.
       Expects one of [binary, http].
       Transport mode of HiveServer2.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.server2.thrift.http.port</name>
     <value>10001</value>
     <description>Port number of HiveServer2 Thrift interface when hive.server2.transport.mode is 'http'.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.server2.thrift.http.path</name>
     <value>cliservice</value>
     <description>Path component of URL endpoint when in HTTP mode.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -825,6 +927,7 @@ limitations under the License.
     <display-name>HiveServer2 Port</display-name>
     <value>10000</value>
     <description>Port number of HiveServer2 Thrift interface when hive.server2.transport.mode is 'binary'.</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <overridable>false</overridable>
       <type>int</type>
@@ -843,18 +946,21 @@ limitations under the License.
       "auth-conf" - authentication plus integrity and confidentiality protection
       This is applicable only if HiveServer2 is configured to use Kerberos authentication.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.server2.thrift.max.worker.threads</name>
     <value>500</value>
     <description>Maximum number of Thrift worker threads</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.server2.allow.user.substitution</name>
     <value>true</value>
     <description>Allow alternate user to be specified as part of HiveServer2 open connection request.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -872,12 +978,14 @@ limitations under the License.
       hive.server2.authentication.spnego.keytab
       are specified.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.server2.authentication</name>
     <description>Authentication mode, default NONE. Options are NONE, NOSASL, KERBEROS, LDAP, PAM and CUSTOM</description>
     <value>NONE</value>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -890,6 +998,7 @@ limitations under the License.
       and HTTP transport mode is used.
       This needs to be set only if SPNEGO is to be used in authentication.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -900,6 +1009,7 @@ limitations under the License.
       submitted the query. But if the parameter is set to false, the query will run as the user that the hiveserver2
       process runs as.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.server2.table.type.mapping</name>
@@ -911,21 +1021,25 @@ limitations under the License.
       HIVE : Exposes Hive's native table types like MANAGED_TABLE, EXTERNAL_TABLE, VIRTUAL_VIEW
       CLASSIC : More generic types like TABLE and VIEW
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.server2.use.SSL</name>
     <value>false</value>
     <description/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.server2.keystore.path</name>
     <value>/etc/security/keystores/hs2keystore.jks</value>
     <description>SSL certificate keystore location</description>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hive.server2.keystore.password</name>
     <value>password</value>
+    <on-ambari-upgrade add="true"/>
     <property-type>PASSWORD</property-type>
     <description>SSL certificate keystore password</description>
   </property>
@@ -934,6 +1048,7 @@ limitations under the License.
     <name>hive.conf.restricted.list</name>
     <value>hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role</value>
     <description>Comma separated list of configuration options which are immutable at runtime</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -943,6 +1058,7 @@ limitations under the License.
       Max number of entries in the vector group by aggregation hashtables.
       Exceeding this will trigger a flush irrelevant of memory pressure condition.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <!-- missing from HiveConf -->
@@ -951,9 +1067,9 @@ limitations under the License.
     <display-name>Database Name</display-name>
     <value>hive</value>
     <description>Database name used as the Hive Metastore</description>
+    <on-ambari-upgrade add="true"/>
     <value-attributes>
       <type>database</type>
-      <type>host</type>
       <overridable>false</overridable>
     </value-attributes>
   </property>
@@ -963,12 +1079,14 @@ limitations under the License.
     <value>false</value>
     <description>If true, the metastore thrift interface will be secured with SASL.
      Clients must authenticate with Kerberos.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.metastore.execute.setugi</name>
     <value>true</value>
     <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -977,6 +1095,7 @@ limitations under the License.
     <description> If the tables being joined are sorted and bucketized on the join columns, and they have the same number
     of buckets, a sort-merge join can be performed by setting this parameter as true.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -986,6 +1105,7 @@ limitations under the License.
       is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
       converted to a mapjoin(there is no conditional task). The default is 10MB.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -995,6 +1115,7 @@ limitations under the License.
       This flag should be set to true to enable vectorized mode of query execution.
       The default value is false.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -1004,6 +1125,7 @@ limitations under the License.
       This flag should be set to true to enable vectorized mode of the reduce-side of query execution.
       The default value is true.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -1012,6 +1134,7 @@ limitations under the License.
     <description>
 	Whether to enable automatic use of indexes
 	</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -1020,6 +1143,7 @@ limitations under the License.
     <description>
       Chooses execution engine. The Option is : mr (Map reduce, default)
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -1027,12 +1151,14 @@ limitations under the License.
     <value>1024</value>
 	<!--value>4096</value-->
     <description>Number of entries added to the group by aggregation hash before a recomputation of average entry size is performed.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.vectorized.groupby.flush.percent</name>
     <value>0.1</value>
     <description>Percent of entries in the group by aggregation hash flushed when the memory threshold is exceeded.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -1043,48 +1169,56 @@ limitations under the License.
       stored in metastore. For basic stats collection turn on the config hive.stats.autogather to true.
       For more advanced stats collection need to run analyze table queries.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.limit.pushdown.memory.usage</name>
     <value>0.04</value>
     <description>The max memory to be used for hash in RS operator for top K selection.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.txn.timeout</name>
     <value>300</value>
     <description>Time after which transactions are declared aborted if the client has not sent a heartbeat, in seconds.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.compactor.initiator.on</name>
     <value>false</value>
     <description>Whether to run the compactor's initiator thread in this metastore instance or not. If there is more than one instance of the thrift metastore this should only be set to true for one of them.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.compactor.worker.threads</name>
     <value>0</value>
     <description>Number of compactor worker threads to run on this metastore instance. Can be different values on different metastore instances.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.compactor.delta.num.threshold</name>
     <value>10</value>
     <description>Number of delta files that must exist in a directory before the compactor will attempt a minor compaction.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.compactor.abortedtxn.threshold</name>
     <value>1000</value>
     <description>Number of aborted transactions involving a particular table or partition before major compaction is initiated.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>datanucleus.cache.level2.type</name>
     <value>none</value>
     <description>Determines caching mechanism DataNucleus L2 cache will use. It is strongly recommended to use default value of 'none' as other values may cause consistency errors in Hive.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -1093,6 +1227,7 @@ limitations under the License.
     <description>The maximum number of bytes that a query using the compact index can read.
       Negative value is equivalent to infinity.
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -1100,12 +1235,14 @@ limitations under the License.
     <value>true</value>
     <description>Set this to true if table directories should inherit the permissions of the warehouse or database directory instead of being created with permissions derived from dfs umask
     </description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>hive.start.cleanup.scratchdir</name>
     <value>true</value>
     <description>To cleanup the hive scratchdir while starting the hive server.</description>
+    <on-ambari-upgrade add="true"/>
   </property>
 
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d6fcfa1/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/webhcat-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/webhcat-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/webhcat-env.xml
index 2eec231..73d0d27 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/webhcat-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/webhcat-env.xml
@@ -49,6 +49,7 @@ CONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log
 # Set HADOOP_HOME to point to a specific hadoop install directory
 export HADOOP_HOME={{hadoop_home}}
     </value>
+    <on-ambari-upgrade add="true"/>
   </property>
 
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1d6fcfa1/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/webhcat-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/webhcat-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/webhcat-log4j.xml
index 0ded4d4..237aa73 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/webhcat-log4j.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/webhcat-log4j.xml
@@ -73,6 +73,7 @@ log4j.logger.org.apache.zookeeper = WARN
 log4j.logger.org.eclipse.jetty = INFO
 
     </value>
+    <on-ambari-upgrade add="true"/>
   </property>
 
 </configuration>