You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by sc...@apache.org on 2015/06/03 15:38:46 UTC

[3/7] ambari git commit: AMBARI-10446 Remove 2.2.GlusterFS stack

http://git-wip-us.apache.org/repos/asf/ambari/blob/627401f6/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/SLIDER/package/scripts/slider.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/SLIDER/package/scripts/slider.py b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/SLIDER/package/scripts/slider.py
deleted file mode 100644
index 48c534e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/SLIDER/package/scripts/slider.py
+++ /dev/null
@@ -1,62 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import os
-
-from resource_management import *
-
-
-def slider():
-  import params
-
-  Directory(params.slider_conf_dir,
-            recursive=True
-  )
-
-  slider_client_config = params.config['configurations']['slider-client'] if 'configurations' in params.config and 'slider-client' in params.config['configurations'] else {}
-
-  XmlConfig("slider-client.xml",
-            conf_dir=params.slider_conf_dir,
-            configurations=slider_client_config
-  )
-
-  File(format("{slider_conf_dir}/slider-env.sh"),
-       mode=0755,
-       content=InlineTemplate(params.slider_env_sh_template)
-  )
-
-  Directory(params.storm_slider_conf_dir,
-            recursive=True
-  )
-
-  File(format("{storm_slider_conf_dir}/storm-slider-env.sh"),
-       mode=0755,
-       content=Template('storm-slider-env.sh.j2')
-  )
-
-  if (params.log4j_props != None):
-    File(format("{params.slider_conf_dir}/log4j.properties"),
-         mode=0644,
-         content=params.log4j_props
-    )
-  elif (os.path.exists(format("{params.slider_conf_dir}/log4j.properties"))):
-    File(format("{params.slider_conf_dir}/log4j.properties"),
-         mode=0644
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/627401f6/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/SLIDER/package/scripts/slider_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/SLIDER/package/scripts/slider_client.py b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/SLIDER/package/scripts/slider_client.py
deleted file mode 100644
index cb22a99..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/SLIDER/package/scripts/slider_client.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-from slider import slider
-
-
-class SliderClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-
-    env.set_params(params)
-
-    slider()
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-
-if __name__ == "__main__":
-  SliderClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/627401f6/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/SLIDER/package/templates/storm-slider-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/SLIDER/package/templates/storm-slider-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/SLIDER/package/templates/storm-slider-env.sh.j2
deleted file mode 100644
index 8022a4b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/SLIDER/package/templates/storm-slider-env.sh.j2
+++ /dev/null
@@ -1,38 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-export JAVA_HOME={{java64_home}}
-export SLIDER_HOME={{slider_home_dir}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/627401f6/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/SQOOP/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/SQOOP/metainfo.xml
deleted file mode 100644
index efb05cd..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/SQOOP/metainfo.xml
+++ /dev/null
@@ -1,52 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>SQOOP</name>
-      <version>1.4.5.2.2.0.0</version>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>mysql-connector-java</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>redhat5,redhat6,suse11</osFamily>
-          <packages>
-            <package>
-              <name>sqoop_2_2_*</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>ubuntu12</osFamily>
-          <packages>
-            <package>
-              <name>sqoop-2-2-.*</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/627401f6/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/TEZ/configuration/tez-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/TEZ/configuration/tez-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/TEZ/configuration/tez-site.xml
deleted file mode 100644
index 50345fd..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/TEZ/configuration/tez-site.xml
+++ /dev/null
@@ -1,311 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<configuration supports_final="true" supports_do_not_extend="true">
-
-  <property>
-    <name>tez.lib.uris</name>
-    <value>/hdp/apps/${hdp.version}/tez/tez.tar.gz</value>
-    <description>Comma-delimited list of the location of the Tez libraries which will be localized for DAGs.
-      Specifying a single .tar.gz or .tgz assumes that a compressed version of the tez libs is being used. This is uncompressed into a tezlibs directory when running containers, and tezlibs/;tezlibs/lib/ are added to the classpath (after . and .*).
-      If multiple files are specified - files are localized as regular files, contents of directories are localized as regular files (non-recursive).
-    </description>
-  </property>
-
-  <property>
-    <name>tez.cluster.additional.classpath.prefix</name>
-    <value>/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>tez.am.log.level</name>
-    <value>INFO</value>
-    <description>Root Logging level passed to the Tez app master</description>
-  </property>
-
-  <property>
-    <name>tez.generate.debug.artifacts</name>
-    <value>false</value>
-    <description>Generate debug artifacts such as a text representation of the submitted DAG plan</description>
-  </property>
-
-  <property>
-    <name>tez.staging-dir</name>
-    <value>/tmp/${user.name}/staging</value>
-    <description>The staging dir used while submitting DAGs</description>
-  </property>
-
-  <property>
-    <name>tez.am.resource.memory.mb</name>
-    <value>1536</value>
-    <description>The amount of memory to be used by the AppMaster.
-      Used only if the value is not specified explicitly by the DAG definition.
-    </description>
-  </property>
-
-  <property>
-    <name>tez.am.launch.cmd-opts</name>
-    <value>-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC</value>
-    <description>Java options for the Tez AppMaster process. The Xmx value is derived based on tez.am.resource.memory.mb and is 80% of the value by default.
-      Used only if the value is not specified explicitly by the DAG definition.
-    </description>
-  </property>
-
-  <property>
-    <name>tez.am.launch.cluster-default.cmd-opts</name>
-    <value>-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}</value>
-    <description>Cluster default Java options for the Tez AppMaster process. These will be prepended to the properties specified via tez.am.launch.cmd-opts</description>
-  </property>
-
-  <property>
-    <name>tez.am.launch.env</name>
-    <value>LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64</value>
-    <description>
-        Additional execution environment entries for tez. This is not an additive property. You must preserve the original value if
-        you want to have access to native libraries.
-      Used only if the value is not specified explicitly by the DAG definition.
-    </description>
-  </property>
-
-  <property>
-    <name>tez.task.resource.memory.mb</name>
-    <value>1536</value>
-    <description>The amount of memory to be used by launched tasks.
-      Used only if the value is not specified explicitly by the DAG definition.
-    </description>
-  </property>
-
-  <property>
-    <name>tez.task.launch.cmd-opts</name>
-    <value>-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC</value>
-    <description>Java options for tasks. The Xmx value is derived based on tez.task.resource.memory.mb and is 80% of this value by default.
-      Used only if the value is not specified explicitly by the DAG definition.
-    </description>
-  </property>
-
-  <property>
-    <name>tez.task.launch.cluster-default.cmd-opts</name>
-    <value>-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}</value>
-    <description>Cluster default Java options for tasks. These will be prepended to the properties specified via tez.task.launch.cmd-opts</description>
-  </property>
-
-  <property>
-    <name>tez.task.launch.env</name>
-    <value>LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64</value>
-    <description>
-      Additional execution environment entries for tez. This is not an additive property. You must preserve the original value if
-      you want to have access to native libraries.
-      Used only if the value is not specified explicitly by the DAG definition.
-    </description>
-  </property>
-
-  <property>
-    <name>tez.shuffle-vertex-manager.min-src-fraction</name>
-    <value>0.2</value>
-    <description>In case of a ScatterGather connection, the fraction of source tasks which should
-      complete before tasks for the current vertex are schedule
-    </description>
-  </property>
-
-  <property>
-    <name>tez.shuffle-vertex-manager.max-src-fraction</name>
-    <value>0.4</value>
-    <description>In case of a ScatterGather connection, once this fraction of source tasks have
-      completed, all tasks on the current vertex can be scheduled. Number of tasks ready for
-      scheduling on the current vertex scales linearly between min-fraction and max-fraction
-    </description>
-  </property>
-
-  <property>
-    <name>tez.am.am-rm.heartbeat.interval-ms.max</name>
-    <value>250</value>
-    <description>The maximum heartbeat interval between the AM and RM in milliseconds</description>
-  </property>
-
-  <property>
-    <name>tez.grouping.split-waves</name>
-    <value>1.7</value>
-    <description>The multiplier for available queue capacity when determining number of tasks for
-      a Vertex. 1.7 with 100% queue available implies generating a number of tasks roughly equal
-      to 170% of the available containers on the queue
-    </description>
-  </property>
-
-  <property>
-    <name>tez.grouping.min-size</name>
-    <value>16777216</value>
-    <description>Lower bound on the size (in bytes) of a grouped split, to avoid generating
-      too many splits
-    </description>
-  </property>
-
-  <property>
-    <name>tez.grouping.max-size</name>
-    <value>1073741824</value>
-    <description>Upper bound on the size (in bytes) of a grouped split, to avoid generating
-      excessively large split
-    </description>
-  </property>
-
-  <property>
-    <name>tez.am.container.reuse.enabled</name>
-    <value>true</value>
-    <description>Configuration to specify whether container should be reused</description>
-  </property>
-
-  <property>
-    <name>tez.am.container.reuse.rack-fallback.enabled</name>
-    <value>true</value>
-    <description>Whether to reuse containers for rack local tasks. Active only if reuse is enabled
-    </description>
-  </property>
-
-  <property>
-    <name>tez.am.container.reuse.non-local-fallback.enabled</name>
-    <value>false</value>
-    <description>Whether to reuse containers for non-local tasks. Active only if reuse is enabled
-    </description>
-  </property>
-
-  <property>
-    <name>tez.am.container.idle.release-timeout-min.millis</name>
-    <value>10000</value>
-    <description>The minimum amount of time to hold on to a container that is idle. Only active when reuse is enabled.</description>
-  </property>
-
-  <property>
-    <name>tez.am.container.idle.release-timeout-max.millis</name>
-    <value>20000</value>
-    <description>The maximum amount of time to hold on to a container if no task can be assigned to it immediately. Only active when reuse is enabled.</description>
-  </property>
-
-  <property>
-    <name>tez.am.container.reuse.locality.delay-allocation-millis</name>
-    <value>250</value>
-    <description>The amount of time to wait before assigning a container to the next level of
-      locality. NODE -> RACK -> NON_LOCAL
-    </description>
-  </property>
-
-  <property>
-    <name>tez.am.max.app.attempts</name>
-    <value>2</value>
-    <description>Specifies the total number of time the app master will run in case recovery is triggered</description>
-  </property>
-
-  <property>
-    <name>tez.am.maxtaskfailures.per.node</name>
-    <value>10</value>
-    <description>The maximum number of allowed task attempt failures on a node before
-      it gets marked as blacklisted
-    </description>
-  </property>
-
-  <property>
-    <name>tez.task.am.heartbeat.counter.interval-ms.max</name>
-    <value>4000</value>
-    <description>Time interval at which task counters are sent to the AM</description>
-  </property>
-
-  <property>
-    <name>tez.task.get-task.sleep.interval-ms.max</name>
-    <value>200</value>
-    <description>The maximum amount of time, in seconds, to wait before a task asks an AM for
-      another task
-    </description>
-  </property>
-
-  <property>
-    <name>tez.task.max-events-per-heartbeat</name>
-    <value>500</value>
-    <description>Maximum number of of events to fetch from the AM by the tasks in a single heartbeat.</description>
-  </property>
-
-  <property>
-    <name>tez.session.client.timeout.secs</name>
-    <value>-1</value>
-    <description>Time (in seconds) to wait for AM to come up when trying to submit a DAG from
-      the client
-    </description>
-  </property>
-
-  <property>
-    <name>tez.session.am.dag.submit.timeout.secs</name>
-    <value>300</value>
-    <description>Time (in seconds) for which the Tez AM should wait for a DAG to be submitted
-      before shutting down
-    </description>
-  </property>
-
-  <property>
-    <name>tez.counters.max</name>
-    <value>2000</value>
-    <description>The number of allowed counters for the executing DAG</description>
-  </property>
-
-  <property>
-    <name>tez.counters.max.groups</name>
-    <value>1000</value>
-    <description>The number of allowed counter groups for the executing DAG</description>
-  </property>
-
-
-  <!-- Configuration for runtime components -->
-
-  <!-- These properties can be set on a per edge basis by configuring the payload for each
-       edge independently. -->
-
-
-  <property>
-    <name>tez.runtime.compress</name>
-    <value>true</value>
-    <description>Whether intermediate data should be compressed or not</description>
-  </property>
-
-  <property>
-    <name>tez.runtime.compress.codec</name>
-    <value>org.apache.hadoop.io.compress.SnappyCodec</value>
-    <description>The coded to be used if compressing intermediate data. Only
-      applicable if tez.runtime.compress is enabled
-    </description>
-  </property>
-
-  <property>
-    <name>tez.runtime.io.sort.mb</name>
-    <value>512</value>
-    <description>The size of the sort buffer when output needs to be sorted</description>
-  </property>
-
-  <property>
-    <name>tez.runtime.unordered.output.buffer.size-mb</name>
-    <value>100</value>
-    <description>The size of the buffer when output does not require to be sorted</description>
-  </property>
-
-  <property>
-    <name>tez.history.logging.service.class</name>
-    <value>org.apache.tez.dag.history.logging.ats.ATSHistoryLoggingService</value>
-    <description>The class to be used for logging history data.
-      Set to org.apache.tez.dag.history.logging.ats.ATSHistoryLoggingService to log to ATS
-      Set to org.apache.tez.dag.history.logging.impl.SimpleHistoryLoggingService to log to the filesystem specified by ${fs.defaultFS}
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/627401f6/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/TEZ/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/TEZ/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/TEZ/metainfo.xml
deleted file mode 100644
index 5e51f6f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/TEZ/metainfo.xml
+++ /dev/null
@@ -1,47 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>TEZ</name>
-      <displayName>Tez</displayName>
-      <version>0.5.2.2.2.0.0</version>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>redhat5,redhat6,suse11</osFamily>
-          <packages>
-            <package>
-              <name>tez_2_2_*</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>ubuntu12</osFamily>
-          <packages>
-            <package>
-              <name>tez-2-2-.*</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/627401f6/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/configuration-mapred/mapred-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/configuration-mapred/mapred-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/configuration-mapred/mapred-env.xml
deleted file mode 100644
index b5280ce..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/configuration-mapred/mapred-env.xml
+++ /dev/null
@@ -1,46 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-
-  
-  <!-- mapred-env.sh -->
-  <property>
-    <name>content</name>
-    <description>This is the jinja template for mapred-env.sh file</description>
-    <value>
-# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
-
-export HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}
-
-export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA
-
-#export HADOOP_JOB_HISTORYSERVER_OPTS=
-#export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.
-#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.
-#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.
-#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default
-#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.
-export HADOOP_OPTS="-Dhdp.version=$HDP_VERSION $HADOOP_OPTS"
-    </value>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/627401f6/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml
deleted file mode 100644
index 5856e02..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml
+++ /dev/null
@@ -1,173 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
-
-  <property>
-    <name>mapreduce.admin.user.env</name>
-    <value>LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64</value>
-    <description>
-      Additional execution environment entries for map and reduce task processes.
-      This is not an additive property. You must preserve the original value if
-      you want your map and reduce tasks to have access to native libraries (compression, etc)
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.application.classpath</name>
-    <value>/usr/hdp/${hdp.version}/hadoop/lib:/etc/hadoop/conf:/usr/lib/hadoop/*:/usr/lib/hadoop/lib/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure</value>
-    <description>
-      CLASSPATH for MR applications. A comma-separated list of CLASSPATH
-      entries.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.application.framework.path</name>
-    <value>/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.admin-command-opts</name>
-    <value>-Dhdp.version=${hdp.version}</value>
-    <description>
-      Java opts for the MR App Master processes.
-      The following symbol, if present, will be interpolated: @taskid@ is replaced
-      by current TaskID. Any other occurrences of '@' will go unchanged.
-      For example, to enable verbose gc logging to a file named for the taskid in
-      /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
-      -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
-
-      Usage of -Djava.library.path can cause programs to no longer function if
-      hadoop native libraries are used. These values should instead be set as part
-      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
-      mapreduce.reduce.env config settings.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.admin.map.child.java.opts</name>
-    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>mapreduce.admin.reduce.child.java.opts</name>
-    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.shuffle.fetch.retry.enabled</name>
-    <value>1</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.shuffle.fetch.retry.interval-ms</name>
-    <value>1000</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.shuffle.fetch.retry.timeout-ms</name>
-    <value>30000</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>mapreduce.job.emit-timeline-data</name>
-    <value>false</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.bind-host</name>
-    <value>0.0.0.0</value>
-    <description></description>
-  </property>
-
-
-  <!--glusterfs properties -->
-  <property>
-    <name>mapreduce.jobhistory.intermediate-done-dir</name>
-    <value>glusterfs:///mr-history/tmp</value>
-    <description>
-      Directory where history files are written by MapReduce jobs.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.done-dir</name>
-    <value>glusterfs:///mr-history/done</value>
-    <description>
-      Directory where history files are managed by the MR JobHistory Server.
-    </description>
-  </property>
-  <property>
-     <name>yarn.app.mapreduce.am.staging-dir</name>
-     <value>glusterfs:///user</value>
-     <description>
-       The staging dir used while submitting jobs.
-     </description>
-  </property>
-  <property>
-     <name>mapred.healthChecker.script.path</name>
-     <value>glusterfs:///mapred/jobstatus</value>
-   </property>
-  <property>
-     <name>mapred.job.tracker.history.completed.location</name>
-     <value>glusterfs:///mapred/history/done</value>
-  </property>
-
-  <property>
-    <name>mapred.system.dir</name>
-    <value>glusterfs:///mapred/system</value>
-  </property>
-
-  <property>
-    <name>mapreduce.jobtracker.staging.root.dir</name>
-    <value>glusterfs:///user</value>
-  </property>
-
-  <property>
-    <name>mapred.healthChecker.script.path</name>
-    <value>glusterfs:///mapred/jobstatus</value>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.history.completed.location</name>
-    <value>glusterfs:///mapred/history/done</value>
-  </property>
-
-  <property>
-    <name>mapred.system.dir</name>
-    <value>glusterfs:///mapred/system</value>
-  </property>
-
-  <property>
-    <name>mapreduce.jobtracker.staging.root.dir</name>
-    <value>glusterfs:///user</value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/627401f6/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/configuration-mapred/ssl-client.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/configuration-mapred/ssl-client.xml b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/configuration-mapred/ssl-client.xml
deleted file mode 100644
index 4513fdd..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/configuration-mapred/ssl-client.xml
+++ /dev/null
@@ -1,58 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-    <property>
-        <name>ssl.client.truststore.location</name>
-        <value>/etc/security/clientKeys/all.jks</value>
-        <description>Location of the trust store file.</description>
-    </property>
-    <property>
-        <name>ssl.client.truststore.type</name>
-        <value>jks</value>
-        <description>Optional. Default value is "jks".</description>
-    </property>
-    <property>
-        <name>ssl.client.truststore.password</name>
-        <value>bigdata</value>
-        <property-type>PASSWORD</property-type>
-        <description>Password to open the trust store file.</description>
-    </property>
-    <property>
-        <name>ssl.client.truststore.reload.interval</name>
-        <value>10000</value>
-        <description>Truststore reload interval, in milliseconds.</description>
-    </property>
-    <property>
-        <name>ssl.client.keystore.type</name>
-        <value>jks</value>
-        <description>Optional. Default value is "jks".</description>
-    </property>
-    <property>
-        <name>ssl.client.keystore.location</name>
-        <value>/etc/security/clientKeys/keystore.jks</value>
-        <description>Location of the keystore file.</description>
-    </property>
-    <property>
-        <name>ssl.client.keystore.password</name>
-        <value>bigdata</value>
-        <property-type>PASSWORD</property-type>
-        <description>Password to open the keystore file.</description>
-    </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/627401f6/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/configuration-mapred/ssl-server.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/configuration-mapred/ssl-server.xml b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/configuration-mapred/ssl-server.xml
deleted file mode 100644
index 97cebd6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/configuration-mapred/ssl-server.xml
+++ /dev/null
@@ -1,58 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-    <property>
-        <name>ssl.server.truststore.location</name>
-        <value>/etc/security/serverKeys/all.jks</value>
-        <description>Location of the trust store file.</description>
-    </property>
-    <property>
-        <name>ssl.server.truststore.type</name>
-        <value>jks</value>
-        <description>Optional. Default value is "jks".</description>
-    </property>
-    <property>
-        <name>ssl.server.truststore.password</name>
-        <value>bigdata</value>
-        <property-type>PASSWORD</property-type>
-        <description>Password to open the trust store file.</description>
-    </property>
-    <property>
-        <name>ssl.server.truststore.reload.interval</name>
-        <value>10000</value>
-        <description>Truststore reload interval, in milliseconds.</description>
-    </property>
-    <property>
-        <name>ssl.server.keystore.type</name>
-        <value>jks</value>
-        <description>Optional. Default value is "jks".</description>
-    </property>
-    <property>
-        <name>ssl.server.keystore.location</name>
-        <value>/etc/security/serverKeys/keystore.jks</value>
-        <description>Location of the keystore file.</description>
-    </property>
-    <property>
-        <name>ssl.server.keystore.password</name>
-        <value>bigdata</value>
-        <property-type>PASSWORD</property-type>
-        <description>Password to open the keystore file.</description>
-    </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/627401f6/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/configuration/capacity-scheduler.xml
deleted file mode 100644
index cf8242b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/configuration/capacity-scheduler.xml
+++ /dev/null
@@ -1,131 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<configuration supports_final="false">
-
-  <property>
-    <name>yarn.scheduler.capacity.maximum-applications</name>
-    <value>10000</value>
-    <description>
-      Maximum number of applications that can be pending and running.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
-    <value>0.2</value>
-    <description>
-      Maximum percent of resources in the cluster which can be used to run 
-      application masters i.e. controls number of concurrent running
-      applications.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.queues</name>
-    <value>default</value>
-    <description>
-      The queues at the this level (root is the root queue).
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.capacity</name>
-    <value>100</value>
-    <description>
-      The total capacity as a percentage out of 100 for this queue.
-      If it has child queues then this includes their capacity as well.
-      The child queues capacity should add up to their parent queue's capacity
-      or less.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.capacity</name>
-    <value>100</value>
-    <description>Default queue target capacity.</description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
-    <value>1</value>
-    <description>
-      Default queue user limit a percentage from 0.0 to 1.0.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
-    <value>100</value>
-    <description>
-      The maximum capacity of the default queue. 
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.state</name>
-    <value>RUNNING</value>
-    <description>
-      The state of the default queue. State can be one of RUNNING or STOPPED.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
-    <value>*</value>
-    <description>
-      The ACL of who can submit jobs to the default queue.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_administer_jobs</name>
-    <value>*</value>
-    <description>
-      The ACL of who can administer jobs on the default queue.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.acl_administer_queue</name>
-    <value>*</value>
-    <description>
-      The ACL for who can administer this queue i.e. change sub-queue 
-      allocations.
-    </description>
-  </property>
-  
-  <property>
-    <name>yarn.scheduler.capacity.node-locality-delay</name>
-    <value>40</value>
-    <description>
-      Number of missed scheduling opportunities after which the CapacityScheduler
-      attempts to schedule rack-local containers.
-      Typically this should be set to number of nodes in the cluster, By default is setting
-      approximately number of nodes in one rack which is 40.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.default.minimum-user-limit-percent</name>
-    <value>100</value>
-    <description>
-      Default minimum queue resource limit depends on the number of users who have submitted applications.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/627401f6/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/configuration/mapred-site.xml.2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/configuration/mapred-site.xml.2 b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/configuration/mapred-site.xml.2
deleted file mode 100644
index 6abb71d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/configuration/mapred-site.xml.2
+++ /dev/null
@@ -1,68 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-
-<!-- GLUSTERFS properties -->
-  <property>
-    <name>mapreduce.jobhistory.intermediate-done-dir</name>
-    <value>glusterfs:///mr-history/tmp</value>
-    <description>
-      Directory where history files are written by MapReduce jobs.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.done-dir</name>
-    <value>glusterfs:///mr-history/done</value>
-    <description>
-      Directory where history files are managed by the MR JobHistory Server.
-    </description>
-  </property>
-  <property>
-     <name>yarn.app.mapreduce.am.staging-dir</name>
-     <value>glusterfs:///user</value>
-     <description>
-       The staging dir used while submitting jobs.
-     </description>
-  </property>
-  <property>
-     <name>mapred.healthChecker.script.path</name>
-     <value>glusterfs:///mapred/jobstatus</value>
-   </property>
-  <property>
-     <name>mapred.job.tracker.history.completed.location</name>
-     <value>glusterfs:///mapred/history/done</value>
-  </property>
-
-  <property>
-    <name>mapred.system.dir</name>
-    <value>glusterfs:///mapred/system</value>
-  </property>
-
-  <property>
-    <name>mapreduce.jobtracker.staging.root.dir</name>
-    <value>glusterfs:///user</value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/627401f6/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/configuration/yarn-site.xml
deleted file mode 100644
index 7731a00..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/configuration/yarn-site.xml
+++ /dev/null
@@ -1,565 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
-
-  <!-- ResourceManager -->
-
-  <property>
-    <name>yarn.resourcemanager.hostname</name>
-    <value>localhost</value>
-    <description>The hostname of the RM.</description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.resource-tracker.address</name>
-    <value>localhost:8025</value>
-    <description> The address of ResourceManager. </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.scheduler.address</name>
-    <value>localhost:8030</value>
-    <description>The address of the scheduler interface.</description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.address</name>
-    <value>localhost:8050</value>
-    <description>
-      The address of the applications manager interface in the
-      RM.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.admin.address</name>
-    <value>localhost:8141</value>
-    <description>The address of the RM admin interface.</description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.scheduler.class</name>
-    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
-    <description>The class to use as the resource scheduler.</description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.minimum-allocation-mb</name>
-    <value>512</value>
-    <description>
-      The minimum allocation for every container request at the RM,
-      in MBs. Memory requests lower than this won't take effect,
-      and the specified value will get allocated at minimum.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.maximum-allocation-mb</name>
-    <value>2048</value>
-    <description>
-      The maximum allocation for every container request at the RM,
-      in MBs. Memory requests higher than this won't take effect,
-      and will get capped to this value.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.acl.enable</name>
-    <value>false</value>
-    <description> Are acls enabled. </description>
-  </property>
-
-  <property>
-    <name>yarn.admin.acl</name>
-    <value></value>
-    <description> ACL of who can be admin of the YARN cluster. </description>
-  </property>
-
-  <!-- NodeManager -->
-
-  <property>
-    <name>yarn.nodemanager.address</name>
-    <value>0.0.0.0:45454</value>
-    <description>The address of the container manager in the NM.</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.resource.memory-mb</name>
-    <value>5120</value>
-    <description>Amount of physical memory, in MB, that can be allocated
-      for containers.</description>
-  </property>
-
-  <property>
-    <name>yarn.application.classpath</name>
-    <value>$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*,/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*</value>
-    <description>Classpath for typical applications.</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.vmem-pmem-ratio</name>
-    <value>2.1</value>
-    <description>Ratio between virtual memory to physical memory when
-      setting memory limits for containers. Container allocations are
-      expressed in terms of physical memory, and virtual memory usage
-      is allowed to exceed this allocation by this ratio.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.container-executor.class</name>
-    <value>org.apache.hadoop.yarn.server.nodemanager.GlusterContainerExecutor</value>
-    <description>ContainerExecutor for launching containers</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.group</name>
-    <value>hadoop</value>
-    <description>Unix group of the NodeManager</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.aux-services</name>
-    <value>mapreduce_shuffle</value>
-    <description>Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and can
-      not start with numbers</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
-    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
-    <description>The auxiliary service class to use </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.log-dirs</name>
-    <value>/hadoop/yarn/log</value>
-    <description>
-      Where to store container logs. An application's localized log directory
-      will be found in ${yarn.nodemanager.log-dirs}/application_${appid}.
-      Individual containers' log directories will be below this, in directories
-      named container_{$contid}. Each container directory will contain the files
-      stderr, stdin, and syslog generated by that container.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.local-dirs</name>
-    <value>/hadoop/yarn/local</value>
-    <description>
-      List of directories to store localized files in. An
-      application's localized file directory will be found in:
-      ${yarn.nodemanager.local-dirs}/usercache/${user}/appcache/application_${appid}.
-      Individual containers' work directories, called container_${contid}, will
-      be subdirectories of this.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.container-monitor.interval-ms</name>
-    <value>3000</value>
-    <description>
-      The interval, in milliseconds, for which the node manager
-      waits  between two cycles of monitoring its containers' memory usage.
-    </description>
-  </property>
-
-  <!--
-  <property>
-    <name>yarn.nodemanager.health-checker.script.path</name>
-    <value>/etc/hadoop/conf/health_check_nodemanager</value>
-    <description>The health check script to run.</description>
-  </property>
-   -->
-
-  <property>
-    <name>yarn.nodemanager.health-checker.interval-ms</name>
-    <value>135000</value>
-    <description>Frequency of running node health script.</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.health-checker.script.timeout-ms</name>
-    <value>60000</value>
-    <description>Script time out period.</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.log.retain-second</name>
-    <value>604800</value>
-    <description>
-      Time in seconds to retain user logs. Only applicable if
-      log aggregation is disabled.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.log-aggregation-enable</name>
-    <value>true</value>
-    <description>Whether to enable log aggregation. </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.remote-app-log-dir</name>
-    <value>/app-logs</value>
-    <description>Location to aggregate logs to. </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.remote-app-log-dir-suffix</name>
-    <value>logs</value>
-    <description>
-      The remote log dir will be created at
-      {yarn.nodemanager.remote-app-log-dir}/${user}/{thisParam}.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.log-aggregation.compression-type</name>
-    <value>gz</value>
-    <description>
-      T-file compression types used to compress aggregated logs.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.delete.debug-delay-sec</name>
-    <value>0</value>
-    <description>
-      Number of seconds after an application finishes before the nodemanager's
-      DeletionService will delete the application's localized file directory
-      and log directory.
-
-      To diagnose Yarn application problems, set this property's value large
-      enough (for example, to 600 = 10 minutes) to permit examination of these
-      directories. After changing the property's value, you must restart the
-      nodemanager in order for it to have an effect.
-
-      The roots of Yarn applications' work directories is configurable with
-      the yarn.nodemanager.local-dirs property (see below), and the roots
-      of the Yarn applications' log directories is configurable with the
-      yarn.nodemanager.log-dirs property (see also below).
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.log-aggregation.retain-seconds</name>
-    <value>2592000</value>
-    <description>
-      How long to keep aggregation logs before deleting them. -1 disables.
-      Be careful set this too small and you will spam the name node.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.admin-env</name>
-    <value>MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX</value>
-    <description>
-      Environment variables that should be forwarded from the NodeManager's
-      environment to the container's.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.disk-health-checker.min-healthy-disks</name>
-    <value>0.25</value>
-    <description>
-      The minimum fraction of number of disks to be healthy for the nodemanager
-      to launch new containers. This correspond to both
-      yarn-nodemanager.local-dirs and yarn.nodemanager.log-dirs. i.e.
-      If there are less number of healthy local-dirs (or log-dirs) available,
-      then new containers will not be launched on this node.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.am.max-attempts</name>
-    <value>2</value>
-    <description>
-      The maximum number of application attempts. It's a global
-      setting for all application masters. Each application master can specify
-      its individual maximum number of application attempts via the API, but the
-      individual number cannot be more than the global upper bound. If it is,
-      the resourcemanager will override it. The default number is set to 2, to
-      allow at least one retry for AM.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.webapp.address</name>
-    <value>localhost:8088</value>
-    <description>
-      The address of the RM web application.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.vmem-check-enabled</name>
-    <value>false</value>
-    <description>
-      Whether virtual memory limits will be enforced for containers.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.log.server.url</name>
-    <value>http://localhost:19888/jobhistory/logs</value>
-    <description>
-      URI for the HistoryServer's log resource
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.nodes.exclude-path</name>
-    <value>/etc/hadoop/conf/yarn.exclude</value>
-    <description>
-      Names a file that contains a list of hosts that are
-      not permitted to connect to the resource manager.  The full pathname of the
-      file must be specified.  If the value is empty, no hosts are
-      excluded.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.timeline-service.enabled</name>
-    <value>true</value>
-    <description>Indicate to clients whether timeline service is enabled or not.
-      If enabled, clients will put entities and events to the timeline server.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.timeline-service.store-class</name>
-    <value>org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore</value>
-    <description>
-      Store class name for timeline store
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.timeline-service.generic-application-history.store-class</name>
-    <value>org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore</value>
-    <description>
-      Store class name for history store, defaulting to file system store
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.timeline-service.leveldb-timeline-store.path</name>
-    <value>/mnt/glusterfs/hadoop/yarn/timeline</value>
-    <description>
-      Store file name for leveldb timeline store
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.timeline-service.webapp.address</name>
-    <value>0.0.0.0:8188</value>
-    <description>
-      The http address of the timeline service web application.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.timeline-service.webapp.https.address</name>
-    <value>0.0.0.0:8190</value>
-    <description>
-      The http address of the timeline service web application.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.timeline-service.address</name>
-    <value>0.0.0.0:10200</value>
-    <description>
-      This is default address for the timeline server to start
-      the RPC server.
-    </description>
-  </property>
-  <property>
-    <description>Enable age off of timeline store data.</description>
-    <name>yarn.timeline-service.ttl-enable</name>
-    <value>true</value>
-  </property>
-  <property>
-    <description>Time to live for timeline store data in milliseconds.</description>
-    <name>yarn.timeline-service.ttl-ms</name>
-    <value>2678400000</value>
-  </property>
-  <property>
-    <description>Length of time to wait between deletion cycles of leveldb timeline store in milliseconds.</description>
-    <name>yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms</name>
-    <value>300000</value>
-  </property>
-  
-  <!-- New Stuff -->
-  <property>
-    <name>yarn.nodemanager.recovery.enabled</name>
-    <value>false</value>
-    <description>Enable the node manager to recover after starting</description>
-  </property>
-  <property>
-    <name>yarn.nodemanager.recovery.dir</name>
-    <value>/var/log/hadoop-yarn/nodemanager/recovery-state</value>
-    <description>
-      The local filesystem directory in which the node manager will store
-      state when recovery is enabled.
-    </description>
-  </property>
-    <property>
-    <name>yarn.client.nodemanager-connect.retry-interval-ms</name>
-    <value>10000</value>
-    <description>Time interval between each attempt to connect to NM</description>
-  </property>
-  <property>
-    <name>yarn.client.nodemanager-connect.max-wait-ms</name>
-    <value>60000</value>
-    <description>Max time to wait to establish a connection to NM</description>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.recovery.enabled</name>
-    <value>false</value>
-    <description>
-      Enable RM to recover state after starting.
-      If true, then yarn.resourcemanager.store.class must be specified.
-    </description>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.work-preserving-recovery.enabled</name>
-    <value>false</value>
-    <description>
-      Enable RM work preserving recovery. This configuration is private to YARN for experimenting the feature.
-    </description>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.store.class</name>
-    <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
-    <description>
-      The class to use as the persistent store.
-      If org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore is used,
-      the store is implicitly fenced; meaning a single ResourceManager
-      is able to use the store at any point in time.
-    </description>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.zk-address</name>
-    <value>localhost:2181</value>
-    <description>
-      List Host:Port of the ZooKeeper servers to be used by the RM. comma separated host:port pairs, each corresponding to a zk server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" If the optional chroot suffix is used the example would look like: "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002/app/a" where the client would be rooted at "/app/a" and all paths would be relative to this root - ie getting/setting/etc...  "/foo/bar" would result in operations being run on "/app/a/foo/bar" (from the server perspective).
-    </description>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.zk-state-store.parent-path</name>
-    <value>/rmstore</value>
-    <description>Full path of the ZooKeeper znode where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore as the value for yarn.resourcemanager.store.class</description>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.zk-acl</name>
-    <value>world:anyone:rwcda </value>
-    <description>ACL's to be used for ZooKeeper znodes.</description>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms</name>
-    <value>10000</value>
-    <description>Set the amount of time RM waits before allocating new containers on work-preserving-recovery. Such wait period gives RM a chance to settle down resyncing with NMs in the cluster on recovery, before assigning new containers to applications.</description>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.connect.retry-interval.ms</name>
-    <value>30000</value>
-    <description>How often to try connecting to the ResourceManager.</description>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.connect.max-wait.ms</name>
-    <value>900000</value>
-    <description>Maximum time to wait to establish connection to ResourceManager</description>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.zk-retry-interval-ms</name>
-    <value>1000</value>
-    <description>"Retry interval in milliseconds when connecting to ZooKeeper.
-      When HA is enabled, the value here is NOT used. It is generated
-      automatically from yarn.resourcemanager.zk-timeout-ms and
-      yarn.resourcemanager.zk-num-retries."
-    </description>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.zk-num-retries</name>
-    <value>1000</value>
-    <description>Number of times RM tries to connect to ZooKeeper.</description>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.zk-timeout-ms</name>
-    <value>10000</value>
-    <description>ZooKeeper session timeout in milliseconds. Session expiration is managed by the ZooKeeper cluster itself, not by the client. This value is used by the cluster to determine when the client's session expires. Expirations happens when the cluster does not hear from the client within the specified session timeout period (i.e. no heartbeat).</description>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.state-store.max-completed-applications</name>
-    <value>${yarn.resourcemanager.max-completed-applications}</value>
-    <description>The maximum number of completed applications RM state store keeps, less than or equals to ${yarn.resourcemanager.max-completed-applications}. By default, it equals to ${yarn.resourcemanager.max-completed-applications}. This ensures that the applications kept in the state store are consistent with the applications remembered in RM memory. Any values larger than ${yarn.resourcemanager.max-completed-applications} will be reset to ${yarn.resourcemanager.max-completed-applications}. Note that this value impacts the RM recovery performance.Typically,  a smaller value indicates better performance on RM recovery.</description>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.fs.state-store.retry-policy-spec</name>
-    <value>2000, 500</value>
-    <description>hdfs client retry policy specification. hdfs client retry is always enabled. Specified in pairs of sleep-time and number-of-retries and (t0, n0), (t1, n1), ..., the first n0 retries sleep t0 milliseconds on average, the following n1 retries sleep t1 milliseconds on average, and so on.</description>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.fs.state-store.uri</name>
-    <value> </value>
-    <description>RI pointing to the location of the FileSystem path where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore as the value for yarn.resourcemanager.store.class </description>
-  </property>
-  <property>
-    <name>yarn.resourcemanager.ha.enabled</name>
-    <value>false</value>
-    <description>enable RM HA or not</description>
-  </property>
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.resources-handler.class</name>
-    <value>org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler</value>
-    <description>Pre-requisite to use CGroups</description>
-  </property>
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name>
-    <value>hadoop-yarn</value>
-    <description>Name of the Cgroups hierarchy under which all YARN jobs will be launched</description>
-  </property>
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.cgroups.mount</name>
-    <value>false</value>
-    <description>If true, YARN will automount the CGroup, however the directory needs to already exist; else, the cgroup should be mounted by the admin</description>
-  </property>
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage</name>
-    <value>false</value>
-    <description>Strictly limit CPU resource usage to allocated usage even if spare CPU is available</description>
-  </property>
-  <property>
-    <name>yarn.nodemanager.resource.cpu-vcores</name>
-    <value>8</value>
-    <description></description>
-  </property>
-  <property>
-    <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
-    <value>100</value>
-    <description>The amount of CPU allocated for YARN containers - only effective when used with CGroups</description>
-  </property>
-    
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/627401f6/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/metainfo.xml
deleted file mode 100644
index 87cb7f2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.2.GlusterFS/services/YARN/metainfo.xml
+++ /dev/null
@@ -1,83 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>YARN</name>
-      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
-      <version>2.6.0.2.2.0.0</version>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>redhat5,redhat6,suse11</osFamily>
-          <packages>
-            <package>
-              <name>hadoop_2_2_*-yarn</name>
-            </package>
-            <package>
-              <name>hadoop_2_2_*-mapreduce</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>ubuntu12</osFamily>
-          <packages>
-            <package>
-              <name>hadoop-2-2-.*-yarn</name>
-            </package>
-            <package>
-              <name>hadoop-2-2-.*-mapreduce</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>      
-
-      <requiredServices>
-        <service>GLUSTERFS</service>
-      </requiredServices>
-
-    </service>
-    <service>
-      <name>MAPREDUCE2</name>
-      <displayName>MapReduce2</displayName>
-      <version>2.6.0.2.2.0.0</version>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>redhat5,redhat6,suse11</osFamily>
-          <packages>
-            <package>
-              <name>hadoop_2_2_*-mapreduce</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>ubuntu12</osFamily>
-          <packages>
-            <package>
-              <name>hadoop-2-2-.*-mapreduce</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <configuration-dir>configuration-mapred</configuration-dir>   
-    </service>
-
-  </services>
-</metainfo>