You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by sm...@apache.org on 2016/12/09 21:57:15 UTC

[23/51] [abbrv] ambari git commit: AMBARI-19137. HDP 3.0 TP - move ZK, HDFS, YARN/MR into new common-services version (alejandro)

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-audit.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-audit.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-audit.xml
deleted file mode 100644
index a6b1baa..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-audit.xml
+++ /dev/null
@@ -1,177 +0,0 @@
-<?xml version="1.0"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>xasecure.audit.is.enabled</name>
-    <value>true</value>
-    <description>Is Audit enabled?</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db</name>
-    <value>false</value>
-    <display-name>Audit to DB</display-name>
-    <description>Is Audit to DB enabled?</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>xasecure.audit.destination.db</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.jdbc.url</name>
-    <value>{{audit_jdbc_url}}</value>
-    <description>Audit DB JDBC URL</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.user</name>
-    <value>{{xa_audit_db_user}}</value>
-    <description>Audit DB JDBC User</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.password</name>
-    <value>crypted</value>
-    <property-type>PASSWORD</property-type>
-    <description>Audit DB JDBC Password</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.jdbc.driver</name>
-    <value>{{jdbc_driver}}</value>
-    <description>Audit DB JDBC Driver</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.credential.provider.file</name>
-    <value>jceks://file{{credential_file}}</value>
-    <description>Credential file store</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.db.batch.filespool.dir</name>
-    <value>/var/log/hadoop/yarn/audit/db/spool</value>
-    <description>/var/log/hadoop/yarn/audit/db/spool</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.hdfs</name>
-    <value>true</value>
-    <display-name>Audit to HDFS</display-name>
-    <description>Is Audit to HDFS enabled?</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>xasecure.audit.destination.hdfs</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.hdfs.dir</name>
-    <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
-    <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>xasecure.audit.destination.hdfs.dir</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
-    <value>/var/log/hadoop/yarn/audit/hdfs/spool</value>
-    <description>/var/log/hadoop/yarn/audit/hdfs/spool</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.solr</name>
-    <value>false</value>
-    <display-name>Audit to SOLR</display-name>
-    <description>Is Solr audit enabled?</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>xasecure.audit.destination.solr</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.solr.urls</name>
-    <value/>
-    <description>Solr URL</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>ranger-admin-site</type>
-        <name>ranger.audit.solr.urls</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.solr.zookeepers</name>
-    <value>NONE</value>
-    <description>Solr Zookeeper string</description>
-    <depends-on>
-      <property>
-        <type>ranger-admin-site</type>
-        <name>ranger.audit.solr.zookeepers</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
-    <value>/var/log/hadoop/yarn/audit/solr/spool</value>
-    <description>/var/log/hadoop/yarn/audit/solr/spool</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.provider.summary.enabled</name>
-    <value>false</value>
-    <display-name>Audit provider summary enabled</display-name>
-    <description>Enable Summary audit?</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-plugin-properties.xml
deleted file mode 100644
index 97867cc..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-plugin-properties.xml
+++ /dev/null
@@ -1,82 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="true">
-  <property>
-    <name>policy_user</name>
-    <value>ambari-qa</value>
-    <display-name>Policy user for YARN</display-name>
-    <description>This user must be system user and also present at Ranger admin portal</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>hadoop.rpc.protection</name>
-    <value/>
-    <description>Used for repository creation on ranger admin</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>common.name.for.certificate</name>
-    <value/>
-    <description>Common name for certificate, this value should match what is specified in repo within ranger admin</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger-yarn-plugin-enabled</name>
-    <value>No</value>
-    <display-name>Enable Ranger for YARN</display-name>
-    <description>Enable ranger yarn plugin ?</description>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>ranger-yarn-plugin-enabled</name>
-      </property>
-    </depends-on>
-    <value-attributes>
-      <type>boolean</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>REPOSITORY_CONFIG_USERNAME</name>
-    <value>yarn</value>
-    <display-name>Ranger repository config user</display-name>
-    <description>Used for repository creation on ranger admin</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>REPOSITORY_CONFIG_PASSWORD</name>
-    <value>yarn</value>
-    <display-name>Ranger repository config password</display-name>
-    <property-type>PASSWORD</property-type>
-    <description>Used for repository creation on ranger admin</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml
deleted file mode 100644
index 5410104..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml
+++ /dev/null
@@ -1,66 +0,0 @@
-<?xml version="1.0"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>xasecure.policymgr.clientssl.keystore</name>
-    <value>/usr/hdp/current/hadoop-client/conf/ranger-yarn-plugin-keystore.jks</value>
-    <description>Java Keystore files</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.keystore.password</name>
-    <value>myKeyFilePassword</value>
-    <property-type>PASSWORD</property-type>
-    <description>password for keystore</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.truststore</name>
-    <value>/usr/hdp/current/hadoop-client/conf/ranger-yarn-plugin-truststore.jks</value>
-    <description>java truststore file</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.truststore.password</name>
-    <value>changeit</value>
-    <property-type>PASSWORD</property-type>
-    <description>java truststore password</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
-    <value>jceks://file{{credential_file}}</value>
-    <description>java keystore credential file</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
-    <value>jceks://file{{credential_file}}</value>
-    <description>java truststore credential file</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-security.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-security.xml
deleted file mode 100644
index 5f69962..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-security.xml
+++ /dev/null
@@ -1,58 +0,0 @@
-<?xml version="1.0"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>ranger.plugin.yarn.service.name</name>
-    <value>{{repo_name}}</value>
-    <description>Name of the Ranger service containing policies for this Yarn instance</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.yarn.policy.source.impl</name>
-    <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
-    <description>Class to retrieve policies from the source</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.yarn.policy.rest.url</name>
-    <value>{{policymgr_mgr_url}}</value>
-    <description>URL to Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.yarn.policy.rest.ssl.config.file</name>
-    <value>/etc/hadoop/conf/ranger-policymgr-ssl-yarn.xml</value>
-    <description>Path to the file containing SSL details to contact Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.yarn.policy.pollIntervalMs</name>
-    <value>30000</value>
-    <description>How often to poll for changes in policies?</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.yarn.policy.cache.dir</name>
-    <value>/etc/ranger/{{repo_name}}/policycache</value>
-    <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-env.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-env.xml
deleted file mode 100644
index bbc2930..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-env.xml
+++ /dev/null
@@ -1,200 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="true" supports_adding_forbidden="true">
-  <!-- These properties were inherited from HDP 2.1 -->
-  <property>
-    <name>apptimelineserver_heapsize</name>
-    <value>1024</value>
-    <display-name>AppTimelineServer Java heap size</display-name>
-    <description>Max heapsize for AppTimelineServer using a numerical value in the scale of MB</description>
-    <value-attributes>
-      <overridable>false</overridable>
-      <unit>MB</unit>
-      <type>int</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <!-- These properties were inherited from HDP 2.2 -->
-  <property>
-    <name>yarn_cgroups_enabled</name>
-    <value>false</value>
-    <description>You can use CGroups to isolate CPU-heavy processes in a Hadoop cluster.</description>
-    <display-name>CPU Isolation</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>true</value>
-          <label>Enabled</label>
-        </entry>
-        <entry>
-          <value>false</value>
-          <label>Disabled</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <!-- These properties were inherited from HDP 2.3 -->
-  <property>
-    <name>is_supported_yarn_ranger</name>
-    <value>true</value>
-    <description>Set to false by default,  needs to be set to true in stacks that use Ranger Yarn Plugin</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!-- yarn-env.sh -->
-  <property>
-    <name>content</name>
-    <display-name>yarn-env template</display-name>
-    <description>This is the jinja template for yarn-env.sh file</description>
-    <value>
-      export HADOOP_YARN_HOME={{hadoop_yarn_home}}
-      export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER
-      export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER
-      export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
-      export JAVA_HOME={{java64_home}}
-      export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}"
-
-      # We need to add the EWMA appender for the yarn daemons only;
-      # however, YARN_ROOT_LOGGER is shared by the yarn client and the
-      # daemons. This is restrict the EWMA appender to daemons only.
-      INVOKER="${0##*/}"
-      if [ "$INVOKER" == "yarn-daemon.sh" ]; then
-        export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,EWMA,RFA}
-      fi
-
-      # User for YARN daemons
-      export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
-
-      # resolve links - $0 may be a softlink
-      export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
-
-      # some Java parameters
-      # export JAVA_HOME=/home/y/libexec/jdk1.6.0/
-      if [ "$JAVA_HOME" != "" ]; then
-      #echo "run java in $JAVA_HOME"
-      JAVA_HOME=$JAVA_HOME
-      fi
-
-      if [ "$JAVA_HOME" = "" ]; then
-      echo "Error: JAVA_HOME is not set."
-      exit 1
-      fi
-
-      JAVA=$JAVA_HOME/bin/java
-      JAVA_HEAP_MAX=-Xmx1000m
-
-      # For setting YARN specific HEAP sizes please use this
-      # Parameter and set appropriately
-      YARN_HEAPSIZE={{yarn_heapsize}}
-
-      # check envvars which might override default args
-      if [ "$YARN_HEAPSIZE" != "" ]; then
-      JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
-      fi
-
-      # Resource Manager specific parameters
-
-      # Specify the max Heapsize for the ResourceManager using a numerical value
-      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-      # the value to 1000.
-      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
-      # and/or YARN_RESOURCEMANAGER_OPTS.
-      # If not specified, the default value will be picked from either YARN_HEAPMAX
-      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-      export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
-
-      # Specify the JVM options to be used when starting the ResourceManager.
-      # These options will be appended to the options specified as YARN_OPTS
-      # and therefore may override any similar flags set in YARN_OPTS
-      #export YARN_RESOURCEMANAGER_OPTS=
-
-      # Node Manager specific parameters
-
-      # Specify the max Heapsize for the NodeManager using a numerical value
-      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-      # the value to 1000.
-      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
-      # and/or YARN_NODEMANAGER_OPTS.
-      # If not specified, the default value will be picked from either YARN_HEAPMAX
-      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-      export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
-
-      # Specify the max Heapsize for the timeline server using a numerical value
-      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-      # the value to 1024.
-      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
-      # and/or YARN_TIMELINESERVER_OPTS.
-      # If not specified, the default value will be picked from either YARN_HEAPMAX
-      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-      export YARN_TIMELINESERVER_HEAPSIZE={{apptimelineserver_heapsize}}
-
-      # Specify the JVM options to be used when starting the NodeManager.
-      # These options will be appended to the options specified as YARN_OPTS
-      # and therefore may override any similar flags set in YARN_OPTS
-      #export YARN_NODEMANAGER_OPTS=
-
-      # so that filenames w/ spaces are handled correctly in loops below
-      IFS=
-
-
-      # default log directory and file
-      if [ "$YARN_LOG_DIR" = "" ]; then
-      YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
-      fi
-      if [ "$YARN_LOGFILE" = "" ]; then
-      YARN_LOGFILE='yarn.log'
-      fi
-
-      # default policy file for service-level authorization
-      if [ "$YARN_POLICYFILE" = "" ]; then
-      YARN_POLICYFILE="hadoop-policy.xml"
-      fi
-
-      # restore ordinary behaviour
-      unset IFS
-
-
-      YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
-      YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
-      YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
-      YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
-      YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
-      YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
-      YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
-      YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
-      export YARN_NODEMANAGER_OPTS="$YARN_NODEMANAGER_OPTS -Dnm.audit.logger=INFO,NMAUDIT"
-      export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS -Drm.audit.logger=INFO,RMAUDIT"
-      if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
-      YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
-      fi
-      YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
-      YARN_OPTS="$YARN_OPTS -Djava.io.tmpdir={{hadoop_java_io_tmpdir}}"
-    </value>
-    <value-attributes>
-      <type>content</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-log4j.xml
deleted file mode 100644
index 9ac34f3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-log4j.xml
+++ /dev/null
@@ -1,103 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>content</name>
-    <display-name>yarn-log4j template</display-name>
-    <description>Custom log4j.properties</description>
-    <value>
-#Relative to Yarn Log Dir Prefix
-yarn.log.dir=.
-#
-# Job Summary Appender
-#
-# Use following logger to send summary to separate file defined by
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
-# hadoop.mapreduce.jobsummary.logger=INFO,JSA
-#
-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
-# Set the ResourceManager summary log filename
-yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log
-# Set the ResourceManager summary log level and appender
-yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
-#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
-
-# To enable AppSummaryLogging for the RM,
-# set yarn.server.resourcemanager.appsummary.logger to
-# LEVEL,RMSUMMARY in hadoop-env.sh
-
-# Appender for ResourceManager Application Summary Log
-# Requires the following properties to be set
-#    - hadoop.log.dir (Hadoop Log directory)
-#    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
-#    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
-log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
-log4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
-log4j.appender.RMSUMMARY.MaxFileSize=256MB
-log4j.appender.RMSUMMARY.MaxBackupIndex=20
-log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
-log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-log4j.appender.JSA.DatePattern=.yyyy-MM-dd
-log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
-log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
-
-# Appender for viewing information for errors and warnings
-yarn.ewma.cleanupInterval=300
-yarn.ewma.messageAgeLimitSeconds=86400
-yarn.ewma.maxUniqueMessages=250
-log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender
-log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
-log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
-log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}
-
-# Audit logging for ResourceManager
-rm.audit.logger=${hadoop.root.logger}
-log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=${rm.audit.logger}
-log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=false
-log4j.appender.RMAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.RMAUDIT.File=${yarn.log.dir}/rm-audit.log
-log4j.appender.RMAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.RMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.RMAUDIT.DatePattern=.yyyy-MM-dd
-
-# Audit logging for NodeManager
-nm.audit.logger=${hadoop.root.logger}
-log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=${nm.audit.logger}
-log4j.additivity.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=false
-log4j.appender.NMAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.NMAUDIT.File=${yarn.log.dir}/nm-audit.log
-log4j.appender.NMAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.NMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.NMAUDIT.DatePattern=.yyyy-MM-dd
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-site.xml
index e33b91d..0f46d75 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-site.xml
@@ -17,798 +17,19 @@
   limitations under the License.
 -->
 <configuration supports_final="true">
-  <!-- These configs were inherited from HDP 2.1 -->
-  <property>
-    <name>yarn.timeline-service.enabled</name>
-    <value>true</value>
-    <description>Indicate to clients whether timeline service is enabled or not.
-      If enabled, clients will put entities and events to the timeline server.
-    </description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.generic-application-history.store-class</name>
-    <value>org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore</value>
-    <description>
-      Store class name for history store, defaulting to file system store
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.webapp.address</name>
-    <value>localhost:8188</value>
-    <description>
-      The http address of the timeline service web application.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.webapp.https.address</name>
-    <value>localhost:8190</value>
-    <description>
-      The http address of the timeline service web application.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.address</name>
-    <value>localhost:10200</value>
-    <description>
-      This is default address for the timeline server to start
-      the RPC server.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <description>Time to live for timeline store data in milliseconds.</description>
-    <name>yarn.timeline-service.ttl-ms</name>
-    <value>2678400000</value>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <description>Length of time to wait between deletion cycles of leveldb timeline store in milliseconds.</description>
-    <name>yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms</name>
-    <value>300000</value>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
 
-  <!-- These configs were inherited from HDP 2.2 -->
-  <property>
-    <name>yarn.application.classpath</name>
-    <value>$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*</value>
-    <description>Classpath for typical applications.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>hadoop.registry.rm.enabled</name>
-    <value>false</value>
-    <description>
-      Is the registry enabled: does the RM start it up, create the user and system paths, and purge service records when containers, application attempts and applications complete
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>hadoop.registry.zk.quorum</name>
-    <value>localhost:2181</value>
-    <description>
-      List of hostname:port pairs defining the zookeeper quorum binding for the registry
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.recovery.enabled</name>
-    <value>true</value>
-    <description>Enable the node manager to recover after starting</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.recovery.dir</name>
-    <value>{{yarn_log_dir_prefix}}/nodemanager/recovery-state</value>
-    <description>
-      The local filesystem directory in which the node manager will store
-      state when recovery is enabled.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.client.nodemanager-connect.retry-interval-ms</name>
-    <value>10000</value>
-    <description>Time interval between each attempt to connect to NM</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.client.nodemanager-connect.max-wait-ms</name>
-    <value>60000</value>
-    <description>Max time to wait to establish a connection to NM</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.recovery.enabled</name>
-    <value>true</value>
-    <description>
-      Enable RM to recover state after starting.
-      If true, then yarn.resourcemanager.store.class must be specified.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.work-preserving-recovery.enabled</name>
-    <value>true</value>
-    <description>
-      Enable RM work preserving recovery. This configuration is private to YARN for experimenting the feature.
-    </description>
-    <display-name>Enable Work Preserving Restart</display-name>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.store.class</name>
-    <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
-    <description>
-      The class to use as the persistent store.
-      If org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore is used,
-      the store is implicitly fenced; meaning a single ResourceManager
-      is able to use the store at any point in time.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.zk-address</name>
-    <value>localhost:2181</value>
-    <description>
-      List Host:Port of the ZooKeeper servers to be used by the RM. comma separated host:port pairs, each corresponding to a zk server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" If the optional chroot suffix is used the example would look like: "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002/app/a" where the client would be rooted at "/app/a" and all paths would be relative to this root - ie getting/setting/etc...  "/foo/bar" would result in operations being run on "/app/a/foo/bar" (from the server perspective).
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.zk-state-store.parent-path</name>
-    <value>/rmstore</value>
-    <description>Full path of the ZooKeeper znode where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore as the value for yarn.resourcemanager.store.class</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.zk-acl</name>
-    <value>world:anyone:rwcda</value>
-    <description>ACL's to be used for ZooKeeper znodes.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms</name>
-    <value>10000</value>
-    <description>Set the amount of time RM waits before allocating new containers on work-preserving-recovery. Such wait period gives RM a chance to settle down resyncing with NMs in the cluster on recovery, before assigning new containers to applications.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.connect.retry-interval.ms</name>
-    <value>30000</value>
-    <description>How often to try connecting to the ResourceManager.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.connect.max-wait.ms</name>
-    <value>900000</value>
-    <description>Maximum time to wait to establish connection to ResourceManager</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.zk-retry-interval-ms</name>
-    <value>1000</value>
-    <description>"Retry interval in milliseconds when connecting to ZooKeeper.
-      When HA is enabled, the value here is NOT used. It is generated
-      automatically from yarn.resourcemanager.zk-timeout-ms and
-      yarn.resourcemanager.zk-num-retries."
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.zk-num-retries</name>
-    <value>1000</value>
-    <description>Number of times RM tries to connect to ZooKeeper.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.zk-timeout-ms</name>
-    <value>10000</value>
-    <description>ZooKeeper session timeout in milliseconds. Session expiration is managed by the ZooKeeper cluster itself, not by the client. This value is used by the cluster to determine when the client's session expires. Expirations happens when the cluster does not hear from the client within the specified session timeout period (i.e. no heartbeat).</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.state-store.max-completed-applications</name>
-    <value>${yarn.resourcemanager.max-completed-applications}</value>
-    <description>The maximum number of completed applications RM state store keeps, less than or equals to ${yarn.resourcemanager.max-completed-applications}. By default, it equals to ${yarn.resourcemanager.max-completed-applications}. This ensures that the applications kept in the state store are consistent with the applications remembered in RM memory. Any values larger than ${yarn.resourcemanager.max-completed-applications} will be reset to ${yarn.resourcemanager.max-completed-applications}. Note that this value impacts the RM recovery performance.Typically,  a smaller value indicates better performance on RM recovery.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.fs.state-store.retry-policy-spec</name>
-    <value>2000, 500</value>
-    <description>hdfs client retry policy specification. hdfs client retry is always enabled. Specified in pairs of sleep-time and number-of-retries and (t0, n0), (t1, n1), ..., the first n0 retries sleep t0 milliseconds on average, the following n1 retries sleep t1 milliseconds on average, and so on.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.fs.state-store.uri</name>
-    <value> </value>
-    <description>RI pointing to the location of the FileSystem path where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore as the value for yarn.resourcemanager.store.class </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.ha.enabled</name>
-    <value>false</value>
-    <description>enable RM HA or not</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.resources-handler.class</name>
-    <value>org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler</value>
-    <description>Pre-requisite to use CGroups</description>
-    <depends-on>
-      <property>
-        <type>yarn-env</type>
-        <name>yarn_cgroups_enabled</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name>
-    <value>hadoop-yarn</value>
-    <description>Name of the Cgroups hierarchy under which all YARN jobs will be launched</description>
-    <depends-on>
-      <property>
-        <type>yarn-env</type>
-        <name>yarn_cgroups_enabled</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.cgroups.mount</name>
-    <value>false</value>
-    <description>If true, YARN will automount the CGroup, however the directory needs to already exist; else, the cgroup should be mounted by the admin</description>
-    <depends-on>
-      <property>
-        <type>yarn-env</type>
-        <name>yarn_cgroups_enabled</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.cgroups.mount-path</name>
-    <value>/cgroup</value>
-    <description>Path used by the LCE to mount cgroups if not found. This path must exist before the NodeManager is launched.</description>
-    <depends-on>
-      <property>
-        <type>yarn-env</type>
-        <name>yarn_cgroups_enabled</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage</name>
-    <value>false</value>
-    <description>Strictly limit CPU resource usage to allocated usage even if spare CPU is available</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.resource.cpu-vcores</name>
-    <value>8</value>
-    <description>Number of vcores that can be allocated
-      for containers. This is used by the RM scheduler when allocating
-      resources for containers. This is not used to limit the number of
-      CPUs used by YARN containers. If it is set to -1 and
-      yarn.nodemanager.resource.detect-hardware-capabilities is true, it is
-      automatically determined from the hardware in case of Windows and Linux.
-      In other cases, number of vcores is 8 by default.
-    </description>
-    <display-name>Number of virtual cores</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>32</maximum>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>yarn-site</type>
-        <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
-    <value>80</value>
-    <description>The amount of CPU allocated for YARN containers - only effective when used with CGroups</description>
-    <display-name>Percentage of physical CPU allocated for all containers on a node</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>100</maximum>
-      <increment-step>1</increment-step>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.node-labels.fs-store.retry-policy-spec</name>
-    <value>2000, 500</value>
-    <description>
-      Retry policy used for FileSystem node label store. The policy is
-      specified by N pairs of sleep-time in milliseconds and number-of-retries
-      &quot;s1,n1,s2,n2,...&quot;.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb</name>
-    <value>1000</value>
-    <description>This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name>
-    <value>90</value>
-    <description>This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
-    <value>-1</value>
-    <description>Defines how often NMs wake up to upload log files. The default value is -1. By default, the logs will be uploaded whenthe application is finished. By setting this configure, logs can be uploaded periodically when the application is running. The minimum rolling-interval-seconds can be set is 3600.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.log-aggregation.debug-enabled</name>
-    <value>false</value>
-    <description>
-      This configuration is for debug and test purpose.
-      By setting this configuration as true.
-      We can break the lower bound of yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.log-aggregation.num-log-files-per-app</name>
-    <value>30</value>
-    <description>This is temporary solution. The configuration will be deleted once, we find a more scalable method to only write a single log file per LRS.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.system-metrics-publisher.enabled</name>
-    <value>true</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size</name>
-    <value>10</value>
-    <description>Number of worker threads that send the yarn system metrics data.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.client.max-retries</name>
-    <value>30</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.client.retry-interval-ms</name>
-    <value>1000</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.ttl-enable</name>
-    <value>true</value>
-    <description>
-      Enable age off of timeline store data.
-    </description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.state-store-class</name>
-    <value>org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore</value>
-    <description>Store class name for timeline state store.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.leveldb-state-store.path</name>
-    <value>/hadoop/yarn/timeline</value>
-    <description>Store file name for leveldb state store.</description>
-    <value-attributes>
-      <type>directory</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.leveldb-timeline-store.path</name>
-    <value>/hadoop/yarn/timeline</value>
-    <description>Store file name for leveldb timeline store.</description>
-    <value-attributes>
-      <type>directory</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.leveldb-timeline-store.read-cache-size</name>
-    <value>104857600</value>
-    <description>
-      Size of read cache for uncompressed blocks for leveldb timeline store in bytes.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size</name>
-    <value>10000</value>
-    <description>
-      Size of cache for recently read entity start times for leveldb timeline store in number of entities.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size</name>
-    <value>10000</value>
-    <description>
-      Size of cache for recently written entity start times for leveldb timeline store in number of entities.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.http-authentication.type</name>
-    <value>simple</value>
-    <description>
-      Defines authentication used for the Timeline Server HTTP endpoint.
-      Supported values are: simple | kerberos | $AUTHENTICATION_HANDLER_CLASSNAME
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.http-authentication.simple.anonymous.allowed</name>
-    <value>true</value>
-    <description/>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled</name>
-    <value>false</value>
-    <description>
-      Flag to enable override of the default kerberos authentication filter with
-      the RM authentication filter to allow authentication using delegation
-      tokens(fallback to kerberos if the tokens are missing).
-      Only applicable when the http authentication type is kerberos.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.bind-host</name>
-    <value>0.0.0.0</value>
-    <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.bind-host</name>
-    <value>0.0.0.0</value>
-    <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.bind-host</name>
-    <value>0.0.0.0</value>
-    <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.node-labels.fs-store.root-dir</name>
-    <value>/system/yarn/node-labels</value>
-    <description>
-      URI for NodeLabelManager.
-    </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.minimum-allocation-vcores</name>
-    <value>1</value>
-    <description/>
-    <display-name>Minimum Container Size (VCores)</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>8</maximum>
-      <increment-step>1</increment-step>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>yarn-site</type>
-        <name>yarn.nodemanager.resource.cpu-vcores</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.scheduler.maximum-allocation-vcores</name>
-    <value>8</value>
-    <description/>
-    <display-name>Maximum Container Size (VCores)</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>8</maximum>
-      <increment-step>1</increment-step>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>yarn-site</type>
-        <name>yarn.nodemanager.resource.cpu-vcores</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.node-labels.enabled</name>
-    <value>false</value>
-    <description>
-      Enable node labels to restrict YARN applications so that they run only on cluster nodes that have a specified node label.
-    </description>
-    <display-name>Node Labels</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>true</value>
-          <label>Enabled</label>
-        </entry>
-        <entry>
-          <value>false</value>
-          <label>Disabled</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.container-executor.class</name>
-    <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
-    <description>ContainerExecutor for launching containers</description>
-    <depends-on>
-      <property>
-        <type>yarn-env</type>
-        <name>yarn_cgroups_enabled</name>
-      </property>
-      <property>
-        <type>core-site</type>
-        <name>hadoop.security.authentication</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.group</name>
-    <value>hadoop</value>
-    <description>Unix group of the NodeManager</description>
-    <depends-on>
-      <property>
-        <type>yarn-env</type>
-        <name>yarn_cgroups_enabled</name>
-      </property>
-      <property>
-        <type>cluster-env</type>
-        <name>user_group</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.scheduler.monitor.enable</name>
-    <description>
-      Enable a set of periodic monitors (specified in
-      yarn.resourcemanager.scheduler.monitor.policies) that affect the
-      scheduler.
-    </description>
-    <value>false</value>
-    <display-name>Pre-emption</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>true</value>
-          <label>Enabled</label>
-        </entry>
-        <entry>
-          <value>false</value>
-          <label>Disabled</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <!-- In HDP 2.3, these properties were deleted:
-  yarn.node-labels.manager-class
-  -->
-
-  <!-- These configs were inherited from HDP 2.3 -->
-  <property>
-    <name>yarn.timeline-service.recovery.enabled</name>
-    <description>
-      Enable timeline server to recover state after starting. If
-      true, then yarn.timeline-service.state-store-class must be specified.
-    </description>
-    <value>true</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.acl.enable</name>
-    <value>false</value>
-    <description> Are acls enabled. </description>
-    <depends-on>
-      <property>
-        <type>ranger-yarn-plugin-properties</type>
-        <name>ranger-yarn-plugin-enabled</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.authorization-provider</name>
-    <description> Yarn authorization provider class. </description>
-    <depends-on>
-      <property>
-        <type>ranger-yarn-plugin-properties</type>
-        <name>ranger-yarn-plugin-enabled</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.admin.acl</name>
-    <value>yarn</value>
-    <description> ACL of who can be admin of the YARN cluster. </description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!--ats v1.5 properties-->
-  <property>
-    <name>yarn.timeline-service.version</name>
-    <value>1.5</value>
-    <description>Timeline service version we&#x2019;re currently using.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.store-class</name>
-    <value>org.apache.hadoop.yarn.server.timeline.EntityGroupFSTimelineStore</value>
-    <description>Main storage class for YARN timeline server.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.active-dir</name>
-    <value>/ats/active/</value>
-    <description>DFS path to store active application&#x2019;s timeline data</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.done-dir</name>
-    <value>/ats/done/</value>
-    <description>DFS path to store done application&#x2019;s timeline data</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes</name>
-    <value/>
-    <description>Plugins that can translate a timeline entity read request into a list of timeline cache ids, separated by commas. </description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!-- advanced ats v1.5 properties-->
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.summary-store</name>
-    <description>Summary storage for ATS v1.5</description>
-    <!-- Use rolling leveldb, advanced -->
-    <value>org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.scan-interval-seconds</name>
-    <description>
-      Scan interval for ATS v1.5 entity group file system storage reader.This
-      value controls how frequent the reader will scan the HDFS active directory
-      for application status.
-    </description>
-    <!-- Default is 60 seconds, advanced -->
-    <value>60</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds</name>
-    <description>
-      Scan interval for ATS v1.5 entity group file system storage cleaner.This
-      value controls how frequent the reader will scan the HDFS done directory
-      for stale application data.
-    </description>
-    <!-- 3600 is default, advanced -->
-    <value>3600</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.retain-seconds</name>
-    <description>
-      How long the ATS v1.5 entity group file system storage will keep an
-      application's data in the done directory.
-    </description>
-    <!-- 7 days is default, advanced -->
-    <value>604800</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <!-- These configs were inherited from HDP 2.4 -->
-  <property>
-    <name>yarn.nodemanager.aux-services.spark_shuffle.class</name>
-    <value>org.apache.spark.network.yarn.YarnShuffleService</value>
-    <description>The auxiliary service class to use for Spark</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <!-- These configs were inherited from HDP 2.5 -->
-  <property>
-    <name>yarn.nodemanager.aux-services</name>
-    <value>mapreduce_shuffle,spark_shuffle,spark2_shuffle</value>
-    <description>Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and cannot start with numbers</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.aux-services.spark2_shuffle.class</name>
-    <value>org.apache.spark.network.yarn.YarnShuffleService</value>
-    <description>The auxiliary service class to use for Spark 2</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
   <property>
     <name>yarn.nodemanager.aux-services.spark_shuffle.classpath</name>
     <value>{{stack_root}}/${hdp.version}/spark/aux/*</value>
     <description>The auxiliary service classpath to use for Spark</description>
     <on-ambari-upgrade add="false"/>
   </property>
+
+  <!-- These configs were inherited from HDP 2.5 -->
   <property>
     <name>yarn.nodemanager.aux-services.spark2_shuffle.classpath</name>
     <value>{{stack_root}}/${hdp.version}/spark2/aux/*</value>
     <description>The auxiliary service classpath to use for Spark 2</description>
     <on-ambari-upgrade add="false"/>
   </property>
-  <property>
-    <name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
-    <description>Defines how often NMs wake up to upload log files. The default value is -1. By default, the logs will be uploaded whenthe application is finished. By setting this configure, logs can be uploaded periodically when the application is running. The minimum rolling-interval-seconds can be set is 3600.</description>
-    <value>3600</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.nodemanager.container-metrics.unregister-delay-ms</name>
-    <value>60000</value>
-    <description>The delay time ms to unregister container metrics after completion.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath</name>
-    <value/>
-    <description>Classpath for all plugins defined in yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes.</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/kerberos.json
deleted file mode 100644
index e690204..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/kerberos.json
+++ /dev/null
@@ -1,278 +0,0 @@
-{
-  "services": [
-    {
-      "name": "YARN",
-      "identities": [
-        {
-          "name": "/spnego"
-        },
-        {
-          "name": "/smokeuser"
-        }
-      ],
-      "configurations": [
-        {
-          "yarn-site": {
-            "yarn.timeline-service.enabled": "true",
-            "yarn.timeline-service.http-authentication.type": "kerberos",
-            "yarn.acl.enable": "true",
-            "yarn.admin.acl": "${yarn-env/yarn_user},dr.who",
-            "yarn.timeline-service.http-authentication.signature.secret": "",
-            "yarn.timeline-service.http-authentication.signature.secret.file": "",
-            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
-            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
-            "yarn.timeline-service.http-authentication.token.validity": "",
-            "yarn.timeline-service.http-authentication.cookie.domain": "",
-            "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
-            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyusers.*.groups": "",
-            "yarn.resourcemanager.proxyusers.*.hosts": "",
-            "yarn.resourcemanager.proxyusers.*.users": "",
-            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
-            "yarn.nodemanager.linux-container-executor.cgroups.mount-path": ""
-          }
-        },
-        {
-          "core-site": {
-            "hadoop.proxyuser.${yarn-env/yarn_user}.groups": "*",
-            "hadoop.proxyuser.${yarn-env/yarn_user}.hosts": "${clusterHostInfo/rm_host}"
-          }
-        },
-        {
-          "capacity-scheduler": {
-            "yarn.scheduler.capacity.root.acl_administer_queue": "${yarn-env/yarn_user}",
-            "yarn.scheduler.capacity.root.default.acl_administer_queue": "${yarn-env/yarn_user}",
-            "yarn.scheduler.capacity.root.acl_administer_jobs": "${yarn-env/yarn_user}",
-            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "${yarn-env/yarn_user}",
-            "yarn.scheduler.capacity.root.default.acl_submit_applications": "${yarn-env/yarn_user}"
-          }
-        },
-        {
-          "ranger-yarn-audit": {
-            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
-            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
-            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
-            "xasecure.audit.jaas.Client.option.storeKey": "false",
-            "xasecure.audit.jaas.Client.option.serviceName": "solr",
-            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
-          }
-        }
-      ],
-      "components": [
-        {
-          "name": "NODEMANAGER",
-          "identities": [
-            {
-              "name": "nodemanager_nm",
-              "principal": {
-                "value": "nm/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.nodemanager.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/nm.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.nodemanager.keytab"
-              }
-            },
-            {
-              "name": "/HIVE/HIVE_SERVER/hive_server_hive",
-              "principal": {
-                "configuration": "hive-interactive-site/hive.llap.daemon.service.principal"
-              },
-              "keytab": {
-                "configuration": "hive-interactive-site/hive.llap.daemon.keytab.file"
-              },
-              "when" : {
-                "contains" : ["services", "HIVE"]
-              }
-            },
-            {
-              "name": "llap_zk_hive",
-              "principal": {
-                "value": "hive/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "hive-interactive-site/hive.llap.zk.sm.principal"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/hive.llap.zk.sm.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": "r"
-                },
-                "configuration": "hive-interactive-site/hive.llap.zk.sm.keytab.file"
-              },
-              "when" : {
-                "contains" : ["services", "HIVE"]
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file"
-              }
-            }
-          ],
-          "configurations": [
-            {
-              "yarn-site": {
-                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor"
-              }
-            }
-          ]
-        },
-        {
-          "name": "RESOURCEMANAGER",
-          "identities": [
-            {
-              "name": "resource_manager_rm",
-              "principal": {
-                "value": "rm/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.resourcemanager.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/rm.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.resourcemanager.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file"
-              }
-            },
-            {
-              "name": "/YARN/RESOURCEMANAGER/resource_manager_rm",
-              "principal": {
-                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.principal"
-              },
-              "keytab": {
-                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.keyTab"
-              }
-            }
-          ]
-        },
-        {
-          "name": "APP_TIMELINE_SERVER",
-          "identities": [
-            {
-              "name": "app_timeline_server_yarn",
-              "principal": {
-                "value": "yarn/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.timeline-service.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/yarn.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.timeline-service.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
-              }
-            },
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "name": "MAPREDUCE2",
-      "identities": [
-        {
-          "name": "/spnego"
-        },
-        {
-          "name": "/smokeuser"
-        }
-      ],
-      "components": [
-        {
-          "name": "HISTORYSERVER",
-          "identities": [
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            },
-            {
-              "name": "history_server_jhs",
-              "principal": {
-                "value": "jhs/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "mapred-site/mapreduce.jobhistory.principal",
-                "local_username": "${mapred-env/mapred_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/jhs.service.keytab",
-                "owner": {
-                  "name": "${mapred-env/mapred_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "mapred-site/mapreduce.jobhistory.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file"
-              }
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/metainfo.xml
index 7e1fd78..a3a8ae9 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/metainfo.xml
@@ -20,63 +20,8 @@
   <services>
     <service>
       <name>YARN</name>
-      <displayName>YARN</displayName>
-      <version>2.7.1.3.0</version>
-      <extends>common-services/YARN/2.1.0.2.0</extends>
-
-      <components>
-        <component>
-          <name>APP_TIMELINE_SERVER</name>
-          <displayName>App Timeline Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <reassignAllowed>true</reassignAllowed>
-
-          <commandScript>
-            <script>scripts/application_timeline_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-
-          <dependencies>
-            <dependency>
-              <name>TEZ/TEZ_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>SPARK/SPARK_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-        </component>
-
-        <component>
-          <name>RESOURCEMANAGER</name>
-          <category>MASTER</category>
-          <cardinality>1-2</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-
-          <dependencies>
-            <dependency>
-              <name>TEZ/TEZ_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <configuration-dependencies>
-            <config-type>capacity-scheduler</config-type>
-          </configuration-dependencies>
-        </component>
-      </components>
+      <version>3.0.0.3.0</version>
+      <extends>common-services/YARN/3.0.0</extends>
 
       <osSpecifics>
         <osSpecific>
@@ -105,34 +50,12 @@
           </packages>
         </osSpecific>
       </osSpecifics>
-
-      <themes>
-        <theme>
-          <fileName>theme.json</fileName>
-          <default>true</default>
-        </theme>
-      </themes>
-
-      <quickLinksConfigurations>
-        <quickLinksConfiguration>
-          <fileName>quicklinks.json</fileName>
-          <default>true</default>
-        </quickLinksConfiguration>
-      </quickLinksConfigurations>
-
-      <configuration-dependencies>
-        <config-type>yarn-site</config-type>
-        <config-type>yarn-env</config-type>
-        <config-type>core-site</config-type>
-        <config-type>yarn-log4j</config-type>
-      </configuration-dependencies>
     </service>
 
     <service>
       <name>MAPREDUCE2</name>
       <displayName>MapReduce2</displayName>
       <version>2.7.1.3.0</version>
-      <configuration-dir>configuration-mapred</configuration-dir>
 
       <osSpecifics>
         <osSpecific>
@@ -153,21 +76,6 @@
         </osSpecific>
       </osSpecifics>
 
-      <themes-dir>themes-mapred</themes-dir>
-      <themes>
-        <theme>
-          <fileName>theme.json</fileName>
-          <default>true</default>
-        </theme>
-      </themes>
-
-      <quickLinksConfigurations-dir>quicklinks-mapred</quickLinksConfigurations-dir>
-      <quickLinksConfigurations>
-        <quickLinksConfiguration>
-          <fileName>quicklinks.json</fileName>
-          <default>true</default>
-        </quickLinksConfiguration>
-      </quickLinksConfigurations>
     </service>
   </services>
 </metainfo>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks-mapred/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks-mapred/quicklinks.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks-mapred/quicklinks.json
deleted file mode 100644
index 5ffbc07..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks-mapred/quicklinks.json
+++ /dev/null
@@ -1,80 +0,0 @@
-{
-  "name": "default",
-  "description": "default quick links configuration",
-  "configuration": {
-    "protocol":
-    {
-      "type":"https",
-      "checks":[
-        {
-          "property":"mapreduce.jobhistory.http.policy",
-          "desired":"HTTPS_ONLY",
-          "site":"mapred-site"
-        }
-      ]
-    },
-
-    "links": [
-      {
-        "name": "jobhistory_ui",
-        "label": "JobHistory UI",
-        "requires_user_name": "false",
-        "component_name": "HISTORYSERVER",
-        "url": "%@://%@:%@",
-        "port":{
-          "http_property": "mapreduce.jobhistory.webapp.address",
-          "http_default_port": "19888",
-          "https_property": "mapreduce.jobhistory.webapp.https.address",
-          "https_default_port": "8090",
-          "regex": "\\w*:(\\d+)",
-          "site": "mapred-site"
-        }
-      },
-      {
-        "name": "jobhistory_logs",
-        "label": "JobHistory logs",
-        "requires_user_name": "false",
-        "component_name": "HISTORYSERVER",
-        "url": "%@://%@:%@/logs",
-        "port":{
-          "http_property": "mapreduce.jobhistory.webapp.address",
-          "http_default_port": "19888",
-          "https_property": "mapreduce.jobhistory.webapp.https.address",
-          "https_default_port": "8090",
-          "regex": "\\w*:(\\d+)",
-          "site": "mapred-site"
-        }
-      },
-      {
-        "name":"jobhistory_jmx",
-        "label":"JobHistory JMX",
-        "requires_user_name":"false",
-        "component_name": "HISTORYSERVER",
-        "url":"%@://%@:%@/jmx",
-        "port":{
-          "http_property": "mapreduce.jobhistory.webapp.address",
-          "http_default_port": "19888",
-          "https_property": "mapreduce.jobhistory.webapp.https.address",
-          "https_default_port": "8090",
-          "regex": "\\w*:(\\d+)",
-          "site": "mapred-site"
-        }
-      },
-      {
-        "name":"thread_stacks",
-        "label":"Thread Stacks",
-        "requires_user_name": "false",
-        "component_name": "HISTORYSERVER",
-        "url":"%@://%@:%@/stacks",
-        "port":{
-          "http_property": "mapreduce.jobhistory.webapp.address",
-          "http_default_port": "19888",
-          "https_property": "mapreduce.jobhistory.webapp.https.address",
-          "https_default_port": "8090",
-          "regex": "\\w*:(\\d+)",
-          "site": "mapred-site"
-        }
-      }
-    ]
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/d845449a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks/quicklinks.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks/quicklinks.json
deleted file mode 100644
index 37248d0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks/quicklinks.json
+++ /dev/null
@@ -1,80 +0,0 @@
-{
-  "name": "default",
-  "description": "default quick links configuration",
-  "configuration": {
-    "protocol":
-    {
-      "type":"https",
-      "checks":[
-        {
-          "property":"yarn.http.policy",
-          "desired":"HTTPS_ONLY",
-          "site":"yarn-site"
-        }
-      ]
-    },
-
-    "links": [
-      {
-        "name": "resourcemanager_ui",
-        "label": "ResourceManager UI",
-        "requires_user_name": "false",
-        "component_name": "RESOURCEMANAGER",
-        "url": "%@://%@:%@",
-        "port":{
-          "http_property": "yarn.resourcemanager.webapp.address",
-          "http_default_port": "8088",
-          "https_property": "yarn.resourcemanager.webapp.https.address",
-          "https_default_port": "8090",
-          "regex": "\\w*:(\\d+)",
-          "site": "yarn-site"
-        }
-      },
-      {
-        "name": "resourcemanager_logs",
-        "label": "ResourceManager logs",
-        "requires_user_name": "false",
-        "component_name": "RESOURCEMANAGER",
-        "url": "%@://%@:%@/logs",
-        "port":{
-          "http_property": "yarn.resourcemanager.webapp.address",
-          "http_default_port": "8088",
-          "https_property": "yarn.resourcemanager.webapp.https.address",
-          "https_default_port": "8090",
-          "regex": "\\w*:(\\d+)",
-          "site": "yarn-site"
-        }
-      },
-      {
-        "name": "resourcemanager_jmx",
-        "label":"ResourceManager JMX",
-        "requires_user_name": "false",
-        "component_name": "RESOURCEMANAGER",
-        "url":"%@://%@:%@/jmx",
-        "port":{
-          "http_property": "yarn.resourcemanager.webapp.address",
-          "http_default_port": "8088",
-          "https_property": "yarn.resourcemanager.webapp.https.address",
-          "https_default_port": "8090",
-          "regex": "\\w*:(\\d+)",
-          "site": "yarn-site"
-        }
-      },
-      {
-        "name": "thread_stacks",
-        "label":"Thread Stacks",
-        "requires_user_name": "false",
-        "component_name": "RESOURCEMANAGER",
-        "url":"%@://%@:%@/stacks",
-        "port":{
-          "http_property": "yarn.resourcemanager.webapp.address",
-          "http_default_port": "8088",
-          "https_property": "yarn.resourcemanager.webapp.https.address",
-          "https_default_port": "8090",
-          "regex": "\\w*:(\\d+)",
-          "site": "yarn-site"
-        }
-      }
-    ]
-  }
-}
\ No newline at end of file