You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by al...@apache.org on 2016/12/08 23:12:54 UTC

[07/20] ambari git commit: AMBARI-19137. HDP 3.0 TP - move ZK, HDFS, YARN/MR into new common-services version (alejandro)

http://git-wip-us.apache.org/repos/asf/ambari/blob/647c6f27/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/ranger-yarn-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/ranger-yarn-policymgr-ssl.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/ranger-yarn-policymgr-ssl.xml
new file mode 100644
index 0000000..ad6cf4f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/ranger-yarn-policymgr-ssl.xml
@@ -0,0 +1,66 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore</name>
+    <value>{{stack_root}}/current/hadoop-client/conf/ranger-yarn-plugin-keystore.jks</value>
+    <description>Java Keystore files</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.password</name>
+    <value>myKeyFilePassword</value>
+    <property-type>PASSWORD</property-type>
+    <description>password for keystore</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore</name>
+    <value>{{stack_root}}/current/hadoop-client/conf/ranger-yarn-plugin-truststore.jks</value>
+    <description>java truststore file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.password</name>
+    <value>changeit</value>
+    <property-type>PASSWORD</property-type>
+    <description>java truststore password</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java keystore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java truststore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/647c6f27/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/ranger-yarn-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/ranger-yarn-security.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/ranger-yarn-security.xml
new file mode 100644
index 0000000..5f69962
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/ranger-yarn-security.xml
@@ -0,0 +1,58 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>ranger.plugin.yarn.service.name</name>
+    <value>{{repo_name}}</value>
+    <description>Name of the Ranger service containing policies for this Yarn instance</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.yarn.policy.source.impl</name>
+    <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
+    <description>Class to retrieve policies from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.yarn.policy.rest.url</name>
+    <value>{{policymgr_mgr_url}}</value>
+    <description>URL to Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.yarn.policy.rest.ssl.config.file</name>
+    <value>/etc/hadoop/conf/ranger-policymgr-ssl-yarn.xml</value>
+    <description>Path to the file containing SSL details to contact Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.yarn.policy.pollIntervalMs</name>
+    <value>30000</value>
+    <description>How often to poll for changes in policies?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.yarn.policy.cache.dir</name>
+    <value>/etc/ranger/{{repo_name}}/policycache</value>
+    <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/647c6f27/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-env.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-env.xml
new file mode 100644
index 0000000..d8531b1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-env.xml
@@ -0,0 +1,306 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <property>
+    <name>yarn_log_dir_prefix</name>
+    <value>/var/log/hadoop-yarn</value>
+    <display-name>YARN Log Dir Prefix</display-name>
+    <description>YARN Log Dir Prefix</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn_pid_dir_prefix</name>
+    <value>/var/run/hadoop-yarn</value>
+    <display-name>YARN PID Dir Prefix</display-name>
+    <description>YARN PID Dir Prefix</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn_user</name>
+    <display-name>Yarn User</display-name>
+    <value>yarn</value>
+    <property-type>USER</property-type>
+    <description>YARN User</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn_heapsize</name>
+    <value>1024</value>
+    <display-name>YARN Java heap size</display-name>
+    <description>Max heapsize for all YARN components using a numerical value in the scale of MB</description>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>resourcemanager_heapsize</name>
+    <value>1024</value>
+    <display-name>ResourceManager Java heap size</display-name>
+    <description>Max heapsize for ResourceManager using a numerical value in the scale of MB</description>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>nodemanager_heapsize</name>
+    <value>1024</value>
+    <display-name>NodeManager Java heap size</display-name>
+    <description>Max heapsize for NodeManager using a numerical value in the scale of MB</description>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>min_user_id</name>
+    <value>1000</value>
+    <display-name>Minimum user ID for submitting job</display-name>
+    <description>Set to 0 to disallow root from submitting jobs. Set to 1000 to disallow all superusers from submitting jobs</description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>is_supported_yarn_ranger</name>
+    <value>true</value>
+    <description>Set to false by default,  needs to be set to true in stacks that use Ranger Yarn Plugin</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn_user_nofile_limit</name>
+    <value>32768</value>
+    <description>Max open files limit setting for YARN user.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn_user_nproc_limit</name>
+    <value>65536</value>
+    <description>Max number of processes limit setting for YARN user.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These properties were inherited from HDP 2.1 -->
+  <property>
+    <name>apptimelineserver_heapsize</name>
+    <value>1024</value>
+    <display-name>AppTimelineServer Java heap size</display-name>
+    <description>Max heapsize for AppTimelineServer using a numerical value in the scale of MB</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <unit>MB</unit>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These properties were inherited from HDP 2.2 -->
+  <property>
+    <name>yarn_cgroups_enabled</name>
+    <value>false</value>
+    <description>You can use CGroups to isolate CPU-heavy processes in a Hadoop cluster.</description>
+    <display-name>CPU Isolation</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Enabled</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Disabled</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- yarn-env.sh -->
+  <property>
+    <name>content</name>
+    <display-name>yarn-env template</display-name>
+    <description>This is the jinja template for yarn-env.sh file</description>
+    <value>
+      export HADOOP_YARN_HOME={{hadoop_yarn_home}}
+      export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER
+      export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER
+      export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+      export JAVA_HOME={{java64_home}}
+      export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}"
+
+      # We need to add the EWMA appender for the yarn daemons only;
+      # however, YARN_ROOT_LOGGER is shared by the yarn client and the
+      # daemons. This is restrict the EWMA appender to daemons only.
+      INVOKER="${0##*/}"
+      if [ "$INVOKER" == "yarn-daemon.sh" ]; then
+      export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,EWMA,RFA}
+      fi
+
+      # User for YARN daemons
+      export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
+
+      # resolve links - $0 may be a softlink
+      export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
+
+      # some Java parameters
+      # export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+      if [ "$JAVA_HOME" != "" ]; then
+      #echo "run java in $JAVA_HOME"
+      JAVA_HOME=$JAVA_HOME
+      fi
+
+      if [ "$JAVA_HOME" = "" ]; then
+      echo "Error: JAVA_HOME is not set."
+      exit 1
+      fi
+
+      JAVA=$JAVA_HOME/bin/java
+      JAVA_HEAP_MAX=-Xmx1000m
+
+      # For setting YARN specific HEAP sizes please use this
+      # Parameter and set appropriately
+      YARN_HEAPSIZE={{yarn_heapsize}}
+
+      # check envvars which might override default args
+      if [ "$YARN_HEAPSIZE" != "" ]; then
+      JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
+      fi
+
+      # Resource Manager specific parameters
+
+      # Specify the max Heapsize for the ResourceManager using a numerical value
+      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+      # the value to 1000.
+      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
+      # and/or YARN_RESOURCEMANAGER_OPTS.
+      # If not specified, the default value will be picked from either YARN_HEAPMAX
+      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+      export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
+
+      # Specify the JVM options to be used when starting the ResourceManager.
+      # These options will be appended to the options specified as YARN_OPTS
+      # and therefore may override any similar flags set in YARN_OPTS
+      #export YARN_RESOURCEMANAGER_OPTS=
+
+      # Node Manager specific parameters
+
+      # Specify the max Heapsize for the NodeManager using a numerical value
+      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+      # the value to 1000.
+      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
+      # and/or YARN_NODEMANAGER_OPTS.
+      # If not specified, the default value will be picked from either YARN_HEAPMAX
+      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+      export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
+
+      # Specify the max Heapsize for the timeline server using a numerical value
+      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+      # the value to 1024.
+      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
+      # and/or YARN_TIMELINESERVER_OPTS.
+      # If not specified, the default value will be picked from either YARN_HEAPMAX
+      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+      export YARN_TIMELINESERVER_HEAPSIZE={{apptimelineserver_heapsize}}
+
+      # Specify the JVM options to be used when starting the NodeManager.
+      # These options will be appended to the options specified as YARN_OPTS
+      # and therefore may override any similar flags set in YARN_OPTS
+      #export YARN_NODEMANAGER_OPTS=
+
+      # so that filenames w/ spaces are handled correctly in loops below
+      IFS=
+
+
+      # default log directory and file
+      if [ "$YARN_LOG_DIR" = "" ]; then
+      YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
+      fi
+      if [ "$YARN_LOGFILE" = "" ]; then
+      YARN_LOGFILE='yarn.log'
+      fi
+
+      # default policy file for service-level authorization
+      if [ "$YARN_POLICYFILE" = "" ]; then
+      YARN_POLICYFILE="hadoop-policy.xml"
+      fi
+
+      # restore ordinary behaviour
+      unset IFS
+
+
+      YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
+      YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
+      YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
+      YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
+      YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
+      YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
+      YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+      YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+      export YARN_NODEMANAGER_OPTS="$YARN_NODEMANAGER_OPTS -Dnm.audit.logger=INFO,NMAUDIT"
+      export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS -Drm.audit.logger=INFO,RMAUDIT"
+      if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+      YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+      fi
+      YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
+      YARN_OPTS="$YARN_OPTS -Djava.io.tmpdir={{hadoop_java_io_tmpdir}}"
+    </value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>service_check.queue.name</name>
+    <value>default</value>
+    <description>
+      The queue that used by service check.
+    </description>
+    <depends-on>
+      <property>
+        <type>capacity-scheduler</type>
+        <name>yarn.scheduler.capacity.root.queues</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/647c6f27/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-log4j.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-log4j.xml
new file mode 100644
index 0000000..1d828ee
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-log4j.xml
@@ -0,0 +1,103 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <property>
+    <name>content</name>
+    <display-name>yarn-log4j template</display-name>
+    <description>Custom log4j.properties</description>
+    <value>
+      #Relative to Yarn Log Dir Prefix
+      yarn.log.dir=.
+      #
+      # Job Summary Appender
+      #
+      # Use following logger to send summary to separate file defined by
+      # hadoop.mapreduce.jobsummary.log.file rolled daily:
+      # hadoop.mapreduce.jobsummary.logger=INFO,JSA
+      #
+      hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+      hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+      log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
+      # Set the ResourceManager summary log filename
+      yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log
+      # Set the ResourceManager summary log level and appender
+      yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
+      #yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
+
+      # To enable AppSummaryLogging for the RM,
+      # set yarn.server.resourcemanager.appsummary.logger to
+      # LEVEL,RMSUMMARY in hadoop-env.sh
+
+      # Appender for ResourceManager Application Summary Log
+      # Requires the following properties to be set
+      #    - hadoop.log.dir (Hadoop Log directory)
+      #    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
+      #    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
+      log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
+      log4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
+      log4j.appender.RMSUMMARY.MaxFileSize=256MB
+      log4j.appender.RMSUMMARY.MaxBackupIndex=20
+      log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
+      log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+      log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+      log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+      log4j.appender.JSA.DatePattern=.yyyy-MM-dd
+      log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+      log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
+      log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
+
+      # Appender for viewing information for errors and warnings
+      yarn.ewma.cleanupInterval=300
+      yarn.ewma.messageAgeLimitSeconds=86400
+      yarn.ewma.maxUniqueMessages=250
+      log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender
+      log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
+      log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
+      log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}
+
+      # Audit logging for ResourceManager
+      rm.audit.logger=${hadoop.root.logger}
+      log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=${rm.audit.logger}
+      log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=false
+      log4j.appender.RMAUDIT=org.apache.log4j.DailyRollingFileAppender
+      log4j.appender.RMAUDIT.File=${yarn.log.dir}/rm-audit.log
+      log4j.appender.RMAUDIT.layout=org.apache.log4j.PatternLayout
+      log4j.appender.RMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+      log4j.appender.RMAUDIT.DatePattern=.yyyy-MM-dd
+
+      # Audit logging for NodeManager
+      nm.audit.logger=${hadoop.root.logger}
+      log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=${nm.audit.logger}
+      log4j.additivity.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=false
+      log4j.appender.NMAUDIT=org.apache.log4j.DailyRollingFileAppender
+      log4j.appender.NMAUDIT.File=${yarn.log.dir}/nm-audit.log
+      log4j.appender.NMAUDIT.layout=org.apache.log4j.PatternLayout
+      log4j.appender.NMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+      log4j.appender.NMAUDIT.DatePattern=.yyyy-MM-dd
+    </value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/647c6f27/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-logsearch-conf.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-logsearch-conf.xml
new file mode 100644
index 0000000..95cf0c9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-logsearch-conf.xml
@@ -0,0 +1,104 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <property>
+    <name>service_name</name>
+    <display-name>Service name</display-name>
+    <description>Service name for Logsearch Portal (label)</description>
+    <value>YARN</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>component_mappings</name>
+    <display-name>Component mapping</display-name>
+    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
+    <value>RESOURCEMANAGER:yarn_resourcemanager,yarn_historyserver,yarn_jobsummary;NODEMANAGER:yarn_nodemanager;APP_TIMELINE_SERVER:yarn_timelineserver</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>Logfeeder Config</display-name>
+    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
+    <value>
+{
+  "input":[
+    {
+      "type":"yarn_nodemanager",
+      "rowtype":"service",
+      "path":"{{default('/configurations/yarn-env/yarn_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/yarn-env/yarn_user', 'yarn')}}/yarn-{{default('configurations/yarn-env/yarn_user', 'yarn')}}-nodemanager-*.log"
+    },
+    {
+      "type":"yarn_resourcemanager",
+      "rowtype":"service",
+      "path":"{{default('/configurations/yarn-env/yarn_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/yarn-env/yarn_user', 'yarn')}}/yarn-{{default('configurations/yarn-env/yarn_user', 'yarn')}}-resourcemanager-*.log"
+    },
+    {
+      "type":"yarn_timelineserver",
+      "rowtype":"service",
+      "path":"{{default('/configurations/yarn-env/yarn_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/yarn-env/yarn_user', 'yarn')}}/yarn-{{default('configurations/yarn-env/yarn_user', 'yarn')}}-timelineserver-*.log"
+    },
+    {
+      "type":"yarn_historyserver",
+      "rowtype":"service",
+      "path":"{{default('/configurations/yarn-env/yarn_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/yarn-env/yarn_user', 'yarn')}}/yarn-{{default('configurations/yarn-env/yarn_user', 'yarn')}}-historyserver-*.log"
+    },
+    {
+      "type":"yarn_jobsummary",
+      "rowtype":"service",
+      "path":"{{default('/configurations/yarn-env/yarn_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/yarn-env/yarn_user', 'yarn')}}/hadoop-mapreduce.jobsummary.log"
+    }
+   ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "yarn_historyserver",
+            "yarn_jobsummary",
+            "yarn_nodemanager",
+            "yarn_resourcemanager",
+            "yarn_timelineserver"
+          ]
+         }
+       },
+      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\(%{JAVAFILE:file}:%{JAVAMETHOD:method}\\(%{INT:line_number}\\)\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+         }
+       }
+     }
+   ]
+}
+    </value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/647c6f27/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-site.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-site.xml
new file mode 100644
index 0000000..01c3b47
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/configuration/yarn-site.xml
@@ -0,0 +1,1151 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<!-- Put site-specific property overrides in this file. -->
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
+  <!-- ResourceManager -->
+  <property>
+    <name>yarn.resourcemanager.hostname</name>
+    <value>localhost</value>
+    <description>The hostname of the RM.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.resource-tracker.address</name>
+    <value>localhost:8025</value>
+    <description> The address of ResourceManager. </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.scheduler.address</name>
+    <value>localhost:8030</value>
+    <description>The address of the scheduler interface.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.address</name>
+    <value>localhost:8050</value>
+    <description>
+      The address of the applications manager interface in the
+      RM.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.admin.address</name>
+    <value>localhost:8141</value>
+    <description>The address of the RM admin interface.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.scheduler.class</name>
+    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
+    <description>The class to use as the resource scheduler.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.minimum-allocation-mb</name>
+    <value>512</value>
+    <description>
+      The minimum allocation for every container request at the RM,
+      in MBs. Memory requests lower than this won't take effect,
+      and the specified value will get allocated at minimum.
+    </description>
+    <display-name>Minimum Container Size (Memory)</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>5120</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.nodemanager.resource.memory-mb</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.maximum-allocation-mb</name>
+    <value>5120</value>
+    <description>
+      The maximum allocation for every container request at the RM,
+      in MBs. Memory requests higher than this won't take effect,
+      and will get capped to this value.
+    </description>
+    <display-name>Maximum Container Size (Memory)</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>5120</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.nodemanager.resource.memory-mb</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.acl.enable</name>
+    <value>false</value>
+    <description> Are acls enabled. </description>
+    <depends-on>
+      <property>
+        <type>ranger-yarn-plugin-properties</type>
+        <name>ranger-yarn-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.admin.acl</name>
+    <value>yarn</value>
+    <description> ACL of who can be admin of the YARN cluster. </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- NodeManager -->
+  <property>
+    <name>yarn.nodemanager.address</name>
+    <value>0.0.0.0:45454</value>
+    <description>The address of the container manager in the NM.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.resource.memory-mb</name>
+    <value>5120</value>
+    <description>Amount of physical memory, in MB, that can be allocated
+      for containers.</description>
+    <display-name>Memory allocated for all YARN containers on a node</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>268435456</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.application.classpath</name>
+    <value>$HADOOP_CONF_DIR,{{stack_root}}/current/hadoop-client/*,{{stack_root}}/current/hadoop-client/lib/*,{{stack_root}}/current/hadoop-hdfs-client/*,{{stack_root}}/current/hadoop-hdfs-client/lib/*,{{stack_root}}/current/hadoop-yarn-client/*,{{stack_root}}/current/hadoop-yarn-client/lib/*</value>
+    <description>Classpath for typical applications.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.vmem-pmem-ratio</name>
+    <value>2.1</value>
+    <description>Ratio between virtual memory to physical memory when
+      setting memory limits for containers. Container allocations are
+      expressed in terms of physical memory, and virtual memory usage
+      is allowed to exceed this allocation by this ratio.
+    </description>
+    <display-name>Virtual Memory Ratio</display-name>
+    <value-attributes>
+      <type>float</type>
+      <minimum>0.1</minimum>
+      <maximum>5.0</maximum>
+      <increment-step>0.1</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.container-executor.class</name>
+    <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
+    <description>ContainerExecutor for launching containers</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.group</name>
+    <value>hadoop</value>
+    <description>Unix group of the NodeManager</description>
+    <depends-on>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_cgroups_enabled</name>
+      </property>
+      <property>
+        <type>cluster-env</type>
+        <name>user_group</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.aux-services</name>
+    <value>mapreduce_shuffle,spark_shuffle,spark2_shuffle</value>
+    <description>Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and cannot start with numbers</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
+    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
+    <description>The auxiliary service class to use </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.log-dirs</name>
+    <value>/hadoop/yarn/log</value>
+    <description>
+      Where to store container logs. An application's localized log directory
+      will be found in ${yarn.nodemanager.log-dirs}/application_${appid}.
+      Individual containers' log directories will be below this, in directories
+      named container_{$contid}. Each container directory will contain the files
+      stderr, stdin, and syslog generated by that container.
+    </description>
+    <value-attributes>
+      <type>directories</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.local-dirs</name>
+    <value>/hadoop/yarn/local</value>
+    <description>
+      List of directories to store localized files in. An
+      application's localized file directory will be found in:
+      ${yarn.nodemanager.local-dirs}/usercache/${user}/appcache/application_${appid}.
+      Individual containers' work directories, called container_${contid}, will
+      be subdirectories of this.
+    </description>
+    <value-attributes>
+      <type>directories</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.container-monitor.interval-ms</name>
+    <value>3000</value>
+    <description>
+      The interval, in milliseconds, for which the node manager
+      waits  between two cycles of monitoring its containers' memory usage.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.health-checker.interval-ms</name>
+    <value>135000</value>
+    <description>Frequency of running node health script.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.health-checker.script.timeout-ms</name>
+    <value>60000</value>
+    <description>Script time out period.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.log.retain-seconds</name>
+    <value>604800</value>
+    <description>
+      Time in seconds to retain user logs. Only applicable if
+      log aggregation is disabled.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.log-aggregation-enable</name>
+    <value>true</value>
+    <description>Whether to enable log aggregation. </description>
+    <display-name>Enable Log Aggregation</display-name>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.remote-app-log-dir</name>
+    <value>/app-logs</value>
+    <description>Location to aggregate logs to. </description>
+    <property-type>NOT_MANAGED_HDFS_PATH</property-type>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.remote-app-log-dir-suffix</name>
+    <value>logs</value>
+    <description>
+      The remote log dir will be created at
+      {yarn.nodemanager.remote-app-log-dir}/${user}/{thisParam}.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.log-aggregation.compression-type</name>
+    <value>gz</value>
+    <description>
+      T-file compression types used to compress aggregated logs.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.delete.debug-delay-sec</name>
+    <value>0</value>
+    <description>
+      Number of seconds after an application finishes before the nodemanager's
+      DeletionService will delete the application's localized file directory
+      and log directory.
+
+      To diagnose Yarn application problems, set this property's value large
+      enough (for example, to 600 = 10 minutes) to permit examination of these
+      directories. After changing the property's value, you must restart the
+      nodemanager in order for it to have an effect.
+
+      The roots of Yarn applications' work directories is configurable with
+      the yarn.nodemanager.local-dirs property (see below), and the roots
+      of the Yarn applications' log directories is configurable with the
+      yarn.nodemanager.log-dirs property (see also below).
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.log-aggregation.retain-seconds</name>
+    <value>2592000</value>
+    <description>
+      How long to keep aggregation logs before deleting them. -1 disables.
+      Be careful set this too small and you will spam the name node.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.admin-env</name>
+    <value>MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX</value>
+    <description>
+      Environment variables that should be forwarded from the NodeManager's
+      environment to the container's.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.disk-health-checker.min-healthy-disks</name>
+    <value>0.25</value>
+    <description>
+      The minimum fraction of number of disks to be healthy for the nodemanager
+      to launch new containers. This correspond to both
+      yarn-nodemanager.local-dirs and yarn.nodemanager.log-dirs. i.e.
+      If there are less number of healthy local-dirs (or log-dirs) available,
+      then new containers will not be launched on this node.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.am.max-attempts</name>
+    <value>2</value>
+    <description>
+      The maximum number of application attempts. It's a global
+      setting for all application masters. Each application master can specify
+      its individual maximum number of application attempts via the API, but the
+      individual number cannot be more than the global upper bound. If it is,
+      the resourcemanager will override it. The default number is set to 2, to
+      allow at least one retry for AM.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.webapp.address</name>
+    <value>localhost:8088</value>
+    <description>
+      The address of the RM web application.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.webapp.https.address</name>
+    <value>localhost:8090</value>
+    <description>
+      The https address of the RM web application.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.vmem-check-enabled</name>
+    <value>false</value>
+    <description>
+      Whether virtual memory limits will be enforced for containers.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.log.server.url</name>
+    <value>http://localhost:19888/jobhistory/logs</value>
+    <description>
+      URI for the HistoryServer's log resource
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.nodes.exclude-path</name>
+    <value>/etc/hadoop/conf/yarn.exclude</value>
+    <description>
+      Names a file that contains a list of hosts that are
+      not permitted to connect to the resource manager.  The full pathname of the
+      file must be specified.  If the value is empty, no hosts are
+      excluded.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.http.policy</name>
+    <value>HTTP_ONLY</value>
+    <description>
+      This configures the HTTP endpoint for Yarn Daemons.The following values are supported: - HTTP_ONLY : Service is provided only on http - HTTPS_ONLY : Service is provided only on https
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.1 -->
+  <property>
+    <name>yarn.timeline-service.enabled</name>
+    <value>true</value>
+    <description>Indicate to clients whether timeline service is enabled or not.
+      If enabled, clients will put entities and events to the timeline server.
+    </description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.generic-application-history.store-class</name>
+    <value>org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore</value>
+    <description>
+      Store class name for history store, defaulting to file system store
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.webapp.address</name>
+    <value>localhost:8188</value>
+    <description>
+      The http address of the timeline service web application.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.webapp.https.address</name>
+    <value>localhost:8190</value>
+    <description>
+      The http address of the timeline service web application.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.address</name>
+    <value>localhost:10200</value>
+    <description>
+      This is default address for the timeline server to start
+      the RPC server.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.ttl-ms</name>
+    <description>Time to live for timeline store data in milliseconds.</description>
+    <value>2678400000</value>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms</name>
+    <description>Length of time to wait between deletion cycles of leveldb timeline store in milliseconds.</description>
+    <value>300000</value>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These properties were inherited from HDP 2.2 -->
+  <property>
+    <name>hadoop.registry.rm.enabled</name>
+    <value>false</value>
+    <description>
+      Is the registry enabled: does the RM start it up, create the user and system paths, and purge service records when containers, application attempts and applications complete
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hadoop.registry.zk.quorum</name>
+    <value>localhost:2181</value>
+    <description>
+      List of hostname:port pairs defining the zookeeper quorum binding for the registry
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.recovery.enabled</name>
+    <value>true</value>
+    <description>Enable the node manager to recover after starting</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.recovery.dir</name>
+    <value>{{yarn_log_dir_prefix}}/nodemanager/recovery-state</value>
+    <description>
+      The local filesystem directory in which the node manager will store
+      state when recovery is enabled.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.client.nodemanager-connect.retry-interval-ms</name>
+    <value>10000</value>
+    <description>Time interval between each attempt to connect to NM</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.client.nodemanager-connect.max-wait-ms</name>
+    <value>60000</value>
+    <description>Max time to wait to establish a connection to NM</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.recovery.enabled</name>
+    <value>true</value>
+    <description>
+      Enable RM to recover state after starting.
+      If true, then yarn.resourcemanager.store.class must be specified.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.work-preserving-recovery.enabled</name>
+    <value>true</value>
+    <description>
+      Enable RM work preserving recovery. This configuration is private to YARN for experimenting the feature.
+    </description>
+    <display-name>Enable Work Preserving Restart</display-name>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.store.class</name>
+    <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
+    <description>
+      The class to use as the persistent store.
+      If org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore is used,
+      the store is implicitly fenced; meaning a single ResourceManager
+      is able to use the store at any point in time.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-address</name>
+    <value>localhost:2181</value>
+    <description>
+      List Host:Port of the ZooKeeper servers to be used by the RM. comma separated host:port pairs, each corresponding to a zk server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" If the optional chroot suffix is used the example would look like: "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002/app/a" where the client would be rooted at "/app/a" and all paths would be relative to this root - ie getting/setting/etc...  "/foo/bar" would result in operations being run on "/app/a/foo/bar" (from the server perspective).
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-state-store.parent-path</name>
+    <value>/rmstore</value>
+    <description>Full path of the ZooKeeper znode where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore as the value for yarn.resourcemanager.store.class</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-acl</name>
+    <value>world:anyone:rwcda</value>
+    <description>ACL's to be used for ZooKeeper znodes.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms</name>
+    <value>10000</value>
+    <description>Set the amount of time RM waits before allocating new containers on work-preserving-recovery. Such wait period gives RM a chance to settle down resyncing with NMs in the cluster on recovery, before assigning new containers to applications.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.connect.retry-interval.ms</name>
+    <value>30000</value>
+    <description>How often to try connecting to the ResourceManager.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.connect.max-wait.ms</name>
+    <value>900000</value>
+    <description>Maximum time to wait to establish connection to ResourceManager</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-retry-interval-ms</name>
+    <value>1000</value>
+    <description>"Retry interval in milliseconds when connecting to ZooKeeper.
+      When HA is enabled, the value here is NOT used. It is generated
+      automatically from yarn.resourcemanager.zk-timeout-ms and
+      yarn.resourcemanager.zk-num-retries."
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-num-retries</name>
+    <value>1000</value>
+    <description>Number of times RM tries to connect to ZooKeeper.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-timeout-ms</name>
+    <value>10000</value>
+    <description>ZooKeeper session timeout in milliseconds. Session expiration is managed by the ZooKeeper cluster itself, not by the client. This value is used by the cluster to determine when the client's session expires. Expirations happens when the cluster does not hear from the client within the specified session timeout period (i.e. no heartbeat).</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.state-store.max-completed-applications</name>
+    <value>${yarn.resourcemanager.max-completed-applications}</value>
+    <description>The maximum number of completed applications RM state store keeps, less than or equals to ${yarn.resourcemanager.max-completed-applications}. By default, it equals to ${yarn.resourcemanager.max-completed-applications}. This ensures that the applications kept in the state store are consistent with the applications remembered in RM memory. Any values larger than ${yarn.resourcemanager.max-completed-applications} will be reset to ${yarn.resourcemanager.max-completed-applications}. Note that this value impacts the RM recovery performance.Typically,  a smaller value indicates better performance on RM recovery.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.fs.state-store.retry-policy-spec</name>
+    <value>2000, 500</value>
+    <description>hdfs client retry policy specification. hdfs client retry is always enabled. Specified in pairs of sleep-time and number-of-retries and (t0, n0), (t1, n1), ..., the first n0 retries sleep t0 milliseconds on average, the following n1 retries sleep t1 milliseconds on average, and so on.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.fs.state-store.uri</name>
+    <value> </value>
+    <description>RI pointing to the location of the FileSystem path where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore as the value for yarn.resourcemanager.store.class </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.ha.enabled</name>
+    <value>false</value>
+    <description>enable RM HA or not</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.resources-handler.class</name>
+    <value>org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler</value>
+    <description>Pre-requisite to use CGroups</description>
+    <depends-on>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_cgroups_enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name>
+    <value>hadoop-yarn</value>
+    <description>Name of the Cgroups hierarchy under which all YARN jobs will be launched</description>
+    <depends-on>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_cgroups_enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.cgroups.mount</name>
+    <value>false</value>
+    <description>If true, YARN will automount the CGroup, however the directory needs to already exist; else, the cgroup should be mounted by the admin</description>
+    <depends-on>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_cgroups_enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.cgroups.mount-path</name>
+    <value>/cgroup</value>
+    <description>Path used by the LCE to mount cgroups if not found. This path must exist before the NodeManager is launched.</description>
+    <depends-on>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_cgroups_enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage</name>
+    <value>false</value>
+    <description>Strictly limit CPU resource usage to allocated usage even if spare CPU is available</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.resource.cpu-vcores</name>
+    <value>8</value>
+    <description>Number of vcores that can be allocated
+      for containers. This is used by the RM scheduler when allocating
+      resources for containers. This is not used to limit the number of
+      CPUs used by YARN containers. If it is set to -1 and
+      yarn.nodemanager.resource.detect-hardware-capabilities is true, it is
+      automatically determined from the hardware in case of Windows and Linux.
+      In other cases, number of vcores is 8 by default.
+    </description>
+    <display-name>Number of virtual cores</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>32</maximum>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
+    <value>80</value>
+    <description>The amount of CPU allocated for YARN containers - only effective when used with CGroups</description>
+    <display-name>Percentage of physical CPU allocated for all containers on a node</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>100</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.node-labels.fs-store.retry-policy-spec</name>
+    <value>2000, 500</value>
+    <description>
+      Retry policy used for FileSystem node label store. The policy is
+      specified by N pairs of sleep-time in milliseconds and number-of-retries
+      &quot;s1,n1,s2,n2,...&quot;.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb</name>
+    <value>1000</value>
+    <description>This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name>
+    <value>90</value>
+    <description>This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
+    <description>Defines how often NMs wake up to upload log files. The default value is -1. By default, the logs will be uploaded whenthe application is finished. By setting this configure, logs can be uploaded periodically when the application is running. The minimum rolling-interval-seconds can be set is 3600.</description>
+    <value>3600</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.log-aggregation.debug-enabled</name>
+    <value>false</value>
+    <description>
+      This configuration is for debug and test purpose.
+      By setting this configuration as true.
+      We can break the lower bound of yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.log-aggregation.num-log-files-per-app</name>
+    <value>30</value>
+    <description>This is temporary solution. The configuration will be deleted once, we find a more scalable method to only write a single log file per LRS.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.system-metrics-publisher.enabled</name>
+    <value>true</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size</name>
+    <value>10</value>
+    <description>Number of worker threads that send the yarn system metrics data.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.client.max-retries</name>
+    <value>30</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.client.retry-interval-ms</name>
+    <value>1000</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.ttl-enable</name>
+    <value>true</value>
+    <description>
+      Enable age off of timeline store data.
+    </description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.state-store-class</name>
+    <value>org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore</value>
+    <description>Store class name for timeline state store.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.leveldb-state-store.path</name>
+    <value>/hadoop/yarn/timeline</value>
+    <description>Store file name for leveldb state store.</description>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.leveldb-timeline-store.path</name>
+    <value>/hadoop/yarn/timeline</value>
+    <description>Store file name for leveldb timeline store.</description>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.leveldb-timeline-store.read-cache-size</name>
+    <value>104857600</value>
+    <description>
+      Size of read cache for uncompressed blocks for leveldb timeline store in bytes.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size</name>
+    <value>10000</value>
+    <description>
+      Size of cache for recently read entity start times for leveldb timeline store in number of entities.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size</name>
+    <value>10000</value>
+    <description>
+      Size of cache for recently written entity start times for leveldb timeline store in number of entities.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.http-authentication.type</name>
+    <value>simple</value>
+    <description>
+      Defines authentication used for the Timeline Server HTTP endpoint.
+      Supported values are: simple | kerberos | $AUTHENTICATION_HANDLER_CLASSNAME
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.http-authentication.simple.anonymous.allowed</name>
+    <value>true</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled</name>
+    <value>false</value>
+    <description>
+      Flag to enable override of the default kerberos authentication filter with
+      the RM authentication filter to allow authentication using delegation
+      tokens(fallback to kerberos if the tokens are missing).
+      Only applicable when the http authentication type is kerberos.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.bind-host</name>
+    <value>0.0.0.0</value>
+    <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.bind-host</name>
+    <value>0.0.0.0</value>
+    <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.bind-host</name>
+    <value>0.0.0.0</value>
+    <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.node-labels.fs-store.root-dir</name>
+    <value>/system/yarn/node-labels</value>
+    <description>
+      URI for NodeLabelManager.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.minimum-allocation-vcores</name>
+    <value>1</value>
+    <description/>
+    <display-name>Minimum Container Size (VCores)</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>8</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.nodemanager.resource.cpu-vcores</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.maximum-allocation-vcores</name>
+    <value>8</value>
+    <description/>
+    <display-name>Maximum Container Size (VCores)</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>8</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.nodemanager.resource.cpu-vcores</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.node-labels.enabled</name>
+    <value>false</value>
+    <description>
+      Enable node labels to restrict YARN applications so that they run only on cluster nodes that have a specified node label.
+    </description>
+    <display-name>Node Labels</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Enabled</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Disabled</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.container-executor.class</name>
+    <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
+    <description>ContainerExecutor for launching containers</description>
+    <depends-on>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_cgroups_enabled</name>
+      </property>
+      <property>
+        <type>core-site</type>
+        <name>hadoop.security.authentication</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.scheduler.monitor.enable</name>
+    <description>
+      Enable a set of periodic monitors (specified in
+      yarn.resourcemanager.scheduler.monitor.policies) that affect the
+      scheduler.
+    </description>
+    <value>false</value>
+    <display-name>Pre-emption</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Enabled</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Disabled</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- In HDP 2.3, these properties were deleted:
+yarn.node-labels.manager-class
+-->
+
+  <!-- These configs were inherited from HDP 2.3 -->
+  <property>
+    <name>yarn.timeline-service.recovery.enabled</name>
+    <description>
+      Enable timeline server to recover state after starting. If
+      true, then yarn.timeline-service.state-store-class must be specified.
+    </description>
+    <value>true</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.authorization-provider</name>
+    <description> Yarn authorization provider class. </description>
+    <depends-on>
+      <property>
+        <type>ranger-yarn-plugin-properties</type>
+        <name>ranger-yarn-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!--ats v1.5 properties-->
+  <property>
+    <name>yarn.timeline-service.version</name>
+    <value>1.5</value>
+    <description>Timeline service version we&#x2019;re currently using.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.store-class</name>
+    <value>org.apache.hadoop.yarn.server.timeline.EntityGroupFSTimelineStore</value>
+    <description>Main storage class for YARN timeline server.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.active-dir</name>
+    <value>/ats/active/</value>
+    <description>DFS path to store active application&#x2019;s timeline data</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.done-dir</name>
+    <value>/ats/done/</value>
+    <description>DFS path to store done application&#x2019;s timeline data</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes</name>
+    <value/>
+    <description>Plugins that can translate a timeline entity read request into a list of timeline cache ids, separated by commas. </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- advanced ats v1.5 properties-->
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.summary-store</name>
+    <description>Summary storage for ATS v1.5</description>
+    <!-- Use rolling leveldb, advanced -->
+    <value>org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.scan-interval-seconds</name>
+    <description>
+      Scan interval for ATS v1.5 entity group file system storage reader.This
+      value controls how frequent the reader will scan the HDFS active directory
+      for application status.
+    </description>
+    <!-- Default is 60 seconds, advanced -->
+    <value>60</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds</name>
+    <description>
+      Scan interval for ATS v1.5 entity group file system storage cleaner.This
+      value controls how frequent the reader will scan the HDFS done directory
+      for stale application data.
+    </description>
+    <!-- 3600 is default, advanced -->
+    <value>3600</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.retain-seconds</name>
+    <description>
+      How long the ATS v1.5 entity group file system storage will keep an
+      application's data in the done directory.
+    </description>
+    <!-- 7 days is default, advanced -->
+    <value>604800</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.4 -->
+  <property>
+    <name>yarn.nodemanager.aux-services.spark_shuffle.class</name>
+    <value>org.apache.spark.network.yarn.YarnShuffleService</value>
+    <description>The auxiliary service class to use for Spark</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.5 -->
+  <property>
+    <name>yarn.nodemanager.aux-services</name>
+    <value>mapreduce_shuffle,spark_shuffle,spark2_shuffle</value>
+    <description>Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and cannot start with numbers</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.aux-services.spark2_shuffle.class</name>
+    <value>org.apache.spark.network.yarn.YarnShuffleService</value>
+    <description>The auxiliary service class to use for Spark 2</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.container-metrics.unregister-delay-ms</name>
+    <value>60000</value>
+    <description>The delay time ms to unregister container metrics after completion.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath</name>
+    <value/>
+    <description>Classpath for all plugins defined in yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes.</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/647c6f27/ambari-server/src/main/resources/common-services/YARN/3.0.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0/kerberos.json b/ambari-server/src/main/resources/common-services/YARN/3.0.0/kerberos.json
new file mode 100644
index 0000000..e690204
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0/kerberos.json
@@ -0,0 +1,278 @@
+{
+  "services": [
+    {
+      "name": "YARN",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "yarn-site": {
+            "yarn.timeline-service.enabled": "true",
+            "yarn.timeline-service.http-authentication.type": "kerberos",
+            "yarn.acl.enable": "true",
+            "yarn.admin.acl": "${yarn-env/yarn_user},dr.who",
+            "yarn.timeline-service.http-authentication.signature.secret": "",
+            "yarn.timeline-service.http-authentication.signature.secret.file": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
+            "yarn.timeline-service.http-authentication.token.validity": "",
+            "yarn.timeline-service.http-authentication.cookie.domain": "",
+            "yarn.timeline-service.http-authentication.cookie.path": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
+            "yarn.resourcemanager.proxyusers.*.groups": "",
+            "yarn.resourcemanager.proxyusers.*.hosts": "",
+            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
+            "yarn.nodemanager.linux-container-executor.cgroups.mount-path": ""
+          }
+        },
+        {
+          "core-site": {
+            "hadoop.proxyuser.${yarn-env/yarn_user}.groups": "*",
+            "hadoop.proxyuser.${yarn-env/yarn_user}.hosts": "${clusterHostInfo/rm_host}"
+          }
+        },
+        {
+          "capacity-scheduler": {
+            "yarn.scheduler.capacity.root.acl_administer_queue": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_administer_queue": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.acl_administer_jobs": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_submit_applications": "${yarn-env/yarn_user}"
+          }
+        },
+        {
+          "ranger-yarn-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "NODEMANAGER",
+          "identities": [
+            {
+              "name": "nodemanager_nm",
+              "principal": {
+                "value": "nm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.nodemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.nodemanager.keytab"
+              }
+            },
+            {
+              "name": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "hive-interactive-site/hive.llap.daemon.service.principal"
+              },
+              "keytab": {
+                "configuration": "hive-interactive-site/hive.llap.daemon.keytab.file"
+              },
+              "when" : {
+                "contains" : ["services", "HIVE"]
+              }
+            },
+            {
+              "name": "llap_zk_hive",
+              "principal": {
+                "value": "hive/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hive-interactive-site/hive.llap.zk.sm.principal"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hive.llap.zk.sm.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": "r"
+                },
+                "configuration": "hive-interactive-site/hive.llap.zk.sm.keytab.file"
+              },
+              "when" : {
+                "contains" : ["services", "HIVE"]
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "yarn-site": {
+                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor"
+              }
+            }
+          ]
+        },
+        {
+          "name": "RESOURCEMANAGER",
+          "identities": [
+            {
+              "name": "resource_manager_rm",
+              "principal": {
+                "value": "rm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.resourcemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/rm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.resourcemanager.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file"
+              }
+            },
+            {
+              "name": "/YARN/RESOURCEMANAGER/resource_manager_rm",
+              "principal": {
+                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.principal"
+              },
+              "keytab": {
+                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "APP_TIMELINE_SERVER",
+          "identities": [
+            {
+              "name": "app_timeline_server_yarn",
+              "principal": {
+                "value": "yarn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.timeline-service.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/yarn.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.timeline-service.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
+              }
+            },
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "name": "MAPREDUCE2",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "components": [
+        {
+          "name": "HISTORYSERVER",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "history_server_jhs",
+              "principal": {
+                "value": "jhs/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "mapred-site/mapreduce.jobhistory.principal",
+                "local_username": "${mapred-env/mapred_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/jhs.service.keytab",
+                "owner": {
+                  "name": "${mapred-env/mapred_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "mapred-site/mapreduce.jobhistory.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
\ No newline at end of file