You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by yu...@apache.org on 2014/11/14 03:19:51 UTC

[09/29] ambari git commit: AMBARI-8269. Merge branch-windows-dev changes to trunk. (Jayush Luniya via yusaku)

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration-mapred/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration-mapred/mapred-site.xml
new file mode 100644
index 0000000..3fcb7e7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration-mapred/mapred-site.xml
@@ -0,0 +1,239 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!-- Put site-specific property overrides in this file. -->
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+  <!-- MR AM properties -->
+  <property>
+    <name>mapreduce.framework.name</name>
+    <value>yarn</value>
+  </property>
+  <property>
+    <name>yarn.app.mapreduce.am.staging-dir</name>
+    <value>/user</value>
+  </property>
+  <property>
+    <name>mapreduce.job.hdfs-servers</name>
+    <value>${fs.defaultFS}</value>
+  </property>
+  <property>
+    <name>mapreduce.map.speculative</name>
+    <value>false</value>
+    <description>If true, then multiple instances of some map tasks
+               may be executed in parallel.</description>
+  </property>
+  <property>
+    <name>mapreduce.reduce.speculative</name>
+    <value>false</value>
+    <description>If true, then multiple instances of some reduce tasks
+               may be executed in parallel.</description>
+  </property>
+  <property>
+    <name>mapreduce.job.reduce.slowstart.completedmaps</name>
+    <value>0.05</value>
+    <description>Fraction of the number of maps in the job which should be
+      complete before reduces are scheduled for the job.
+    </description>
+  </property>
+  <property>
+    <name>mapreduce.task.timeout</name>
+    <value>600000</value>
+    <description>The number of milliseconds before a task will be
+      terminated if it neither reads an input, writes an output, nor
+      updates its status string. A value of 0 disables the timeout.
+    </description>
+  </property>
+  <property>
+    <name>jetty.connector</name>
+    <value>org.mortbay.jetty.nio.SelectChannelConnector</value>
+    <description>No description</description>
+  </property>
+  <property>
+    <name>mapred.child.root.logger</name>
+    <value>INFO,TLA</value>
+  </property>
+  <property>
+    <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>mapreduce.job.acl-view-job</name>
+    <value>*</value>
+  </property>
+  <!-- i/o properties -->
+  <property>
+    <name>io.sort.mb</name>
+    <value>200</value>
+    <description>No description</description>
+  </property>
+  <property>
+    <name>io.sort.spill.percent</name>
+    <value>0.9</value>
+    <description>No description</description>
+  </property>
+  <property>
+    <name>io.sort.factor</name>
+    <value>100</value>
+    <description>No description</description>
+  </property>
+  <!-- map tasks' properties -->
+  <property>
+    <name>mapreduce.map.output.compress</name>
+    <value>true</value>
+    <description>Should the outputs of the maps be compressed before being
+               sent across the network. Uses SequenceFile compression.
+    </description>
+  </property>
+  <property>
+    <name>mapreduce.map.output.compress.codec</name>
+    <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+    <description>If the map outputs are compressed, how should they be
+               compressed?
+    </description>
+  </property>
+  <!-- reduce tasks' properties -->
+  <property>
+    <name>mapreduce.reduce.shuffle.parallelcopies</name>
+    <value>30</value>
+    <description>The default number of parallel transfers run by reduce
+      during the copy(shuffle) phase.
+    </description>
+  </property>
+  <property>
+    <name>mapreduce.reduce.merge.inmem.threshold</name>
+    <value>1000</value>
+    <description>The threshold, in terms of the number of files
+      for the in-memory merge process. When we accumulate threshold number of files
+      we initiate the in-memory merge and spill to disk. A value of 0 or less than
+      0 indicates we want to DON'T have any threshold and instead depend only on
+      the ramfs's memory consumption to trigger the merge.
+    </description>
+  </property>
+  <property>
+    <name>mapreduce.reduce.shuffle.merge.percent</name>
+    <value>0.66</value>
+    <description>The usage threshold at which an in-memory merge will be
+      initiated, expressed as a percentage of the total memory allocated to
+      storing in-memory map outputs, as defined by
+      mapreduce.reduce.shuffle.input.buffer.percent.
+    </description>
+  </property>
+  <property>
+    <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
+    <value>0.70</value>
+    <description>The percentage of memory to be allocated from the maximum heap
+      size to storing map outputs during the shuffle.
+    </description>
+  </property>
+  <!-- JobHistory Server -->
+  <property>
+    <name>mapreduce.jobhistory.intermediate-done-dir</name>
+    <value>/mapred/history/done_intermediate</value>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.done-dir</name>
+    <value>/mapred/history/done</value>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.address</name>
+    <value>localhost:10020</value>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.webapp.address</name>
+    <value>localhost:19888</value>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.webapp.https.address</name>
+    <value>localhost:19888</value>
+  </property>
+  <property>
+    <name>yarn.app.mapreduce.am.create-intermediate-jh-base-dir</name>
+    <value>false</value>
+  </property>
+  <!-- JobHistory Security Settings -->
+  <property>
+    <name>mapreduce.application.classpath</name>
+    <value>%HADOOP_CONF_DIR%,%HADOOP_COMMON_HOME%/share/hadoop/common/*,%HADOOP_COMMON_HOME%/share/hadoop/common/lib/*,%HADOOP_HDFS_HOME%/share/hadoop/hdfs/*,%HADOOP_HDFS_HOME%/share/hadoop/hdfs/lib/*,%HADOOP_MAPRED_HOME%/share/hadoop/mapreduce/*,%HADOOP_MAPRED_HOME%/share/hadoop/mapreduce/lib/*,%HADOOP_YARN_HOME%/share/hadoop/yarn/*,%HADOOP_YARN_HOME%/share/hadoop/yarn/lib/*</value>
+    <description>CLASSPATH for MR applications. A comma-separated list
+      of CLASSPATH entries</description>
+  </property>
+  <property>
+    <name>mapreduce.shuffle.ssl.enabled</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>mapreduce.ssl.enabled</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>mapreduce.job.counters.max</name>
+    <value>20000</value>
+  </property>
+  <property>
+    <name>mapreduce.job.counters.groups.max</name>
+    <value>10000</value>
+  </property>
+  <property>
+    <name>mapreduce.job.counters.group.name.max</name>
+    <value>1000</value>
+  </property>
+  <property>
+    <name>mapreduce.job.counters.counter.name.max</name>
+    <value>1000</value>
+  </property>
+  <property>
+    <name>mapreduce.cluster.local.dir</name>
+    <value>c:\hdpdata\hadoop\local</value>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker.history.completed.location</name>
+    <value>/mapred/history/done</value>
+  </property>
+
+  <property>
+    <name>mapred.local.dir</name>
+    <value>c:\hdpdata\hdfs\mapred\local</value>
+  </property>
+
+  <property>
+    <name>mapreduce.map.java.opts</name>
+    <value>-Xmx756m</value>
+  </property>
+
+  <property>
+    <name>mapred.child.tmp</name>
+    <value>c:\hdp\temp\hadoop</value>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.java.opts</name>
+    <value>-Xmx756m</value>
+  </property>
+
+  <property>
+    <name>mapreduce.task.io.sort.mb</name>
+    <value>200</value>
+    <description>
+      The total amount of buffer memory to use while sorting files, in megabytes.
+      By default, gives each merge stream 1MB, which should minimize seeks.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.app.mapreduce.am.resource.mb</name>
+    <value>512</value>
+    <description>The amount of memory the MR AppMaster needs.</description>
+  </property>
+
+  <property>
+    <name>mapreduce.map.memory.mb</name>
+    <value>1024</value>
+    <description>Virtual memory for single Map task</description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.memory.mb</name>
+    <value>1024</value>
+    <description>Virtual memory for single Reduce task</description>
+  </property>
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration/capacity-scheduler.xml
new file mode 100644
index 0000000..c2be95b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration/capacity-scheduler.xml
@@ -0,0 +1,114 @@
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <property>
+    <name>yarn.scheduler.capacity.maximum-applications</name>
+    <value>10000</value>
+    <description>
+      Maximum number of applications that can be pending and running.
+    </description>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
+    <value>0.1</value>
+    <description>
+      Maximum percent of resources in the cluster which can be used to run
+      application masters i.e. controls number of concurrent running
+      applications.
+    </description>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.resource-calculator</name>
+    <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
+    <description>
+      The ResourceCalculator implementation to be used to compare
+      Resources in the scheduler.
+      The default i.e. DefaultResourceCalculator only uses Memory while
+      DominantResourceCalculator uses dominant-resource to compare
+      multi-dimensional resources such as Memory, CPU etc.
+    </description>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.queues</name>
+    <value>default,joblauncher</value>
+    <description>
+      The queues at the this level (root is the root queue).
+    </description>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.default.capacity</name>
+    <value>95</value>
+    <description>Default queue target capacity.</description>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
+    <value>10</value>
+    <description>
+      Default queue user limit a percentage from 0.0 to 1.0.
+    </description>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
+    <value>100</value>
+    <description>
+      The maximum capacity of the default queue.
+    </description>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.default.state</name>
+    <value>RUNNING</value>
+    <description>
+      The state of the default queue. State can be one of RUNNING or STOPPED.
+    </description>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
+    <value>*</value>
+    <description>
+      The ACL of who can submit jobs to the default queue.
+    </description>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.default.acl_administer_queue</name>
+    <value>*</value>
+    <description>
+      The ACL of who can administer jobs on the default queue.
+    </description>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.node-locality-delay</name>
+    <value>40</value>
+    <description>
+      Number of missed scheduling opportunities after which the CapacityScheduler
+      attempts to schedule rack-local containers.
+      Typically this should be set to number of nodes in the cluster, By default is setting
+      approximately number of nodes in one rack which is 40.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.joblauncher.capacity</name>
+    <value>5</value>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.joblauncher.user-limit-factor</name>
+    <value>10</value>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.joblauncher.maximum-capacity</name>
+    <value>50</value>
+  </property>
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration/yarn-site.xml
new file mode 100644
index 0000000..b22bb5a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/configuration/yarn-site.xml
@@ -0,0 +1,214 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!-- Put site-specific property overrides in this file. -->
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+  <property>
+    <name>yarn.nodemanager.resource.memory-mb</name>
+    <value>5120</value>
+    <description>Amount of physical memory, in MB, that can be allocated
+      for containers.</description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.minimum-allocation-mb</name>
+    <value>512</value>
+    <description>
+      The minimum allocation for every container request at the RM,
+      in MBs. Memory requests lower than this won't take effect,
+      and the specified value will get allocated at minimum.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.maximum-allocation-mb</name>
+    <value>2048</value>
+    <description>
+      The maximum allocation for every container request at the RM,
+      in MBs. Memory requests higher than this won't take effect,
+      and will get capped to this value.
+    </description>
+  </property>
+  <property>
+    <name>yarn.nodemanager.pmem-check-enabled</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>yarn.nodemanager.vmem-check-enabled</name>
+    <value>true</value>
+  </property>
+  <!-- NodeManager -->
+  <property>
+    <name>yarn.nodemanager.address</name>
+    <value>0.0.0.0:45454</value>
+    <description>The address of the container manager in the NM.</description>
+  </property>
+  <property>
+    <name>yarn.nodemanager.resource.memory-mb</name>
+    <value>5120</value>
+    <description>Amount of physical memory, in MB, that can be allocated
+      for containers.</description>
+  </property>
+  <property>
+    <name>yarn.nodemanager.webapp.address</name>
+    <value>0.0.0.0:50060</value>
+  </property>
+  <property>
+    <name>yarn.nodemanager.vmem-pmem-ratio</name>
+    <value>2.1</value>
+    <description>Ratio between virtual memory to physical memory when
+      setting memory limits for containers. Container allocations are
+      expressed in terms of physical memory, and virtual memory usage
+      is allowed to exceed this allocation by this ratio.
+    </description>
+  </property>
+  <property>
+    <name>yarn.nodemanager.container-executor.class</name>
+    <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
+    <description>ContainerExecutor for launching containers</description>
+  </property>
+  <property>
+    <name>yarn.nodemanager.aux-services</name>
+    <value>mapreduce_shuffle</value>
+    <description>Auxilliary services of NodeManager</description>
+  </property>
+  <property>
+    <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
+    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
+  </property>
+  <property>
+    <name>yarn.nodemanager.container-monitor.interval-ms</name>
+    <value>3000</value>
+    <description>The interval, in milliseconds, for which the node manager
+      waits between two cycles of monitoring its containers' memory usage.
+    </description>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.group</name>
+    <value>hadoop</value>
+    <description>Unix group of the NodeManager</description>
+  </property>
+  <property>
+    <name>yarn.nodemanager.log.retain-second</name>
+    <value>604800</value>
+  </property>
+  <property>
+    <name>yarn.log-aggregation-enable</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>yarn.nodemanager.remote-app-log-dir</name>
+    <value>/app-logs</value>
+  </property>
+  <property>
+    <name>yarn.nodemanager.remote-app-log-dir-suffix</name>
+    <value>logs</value>
+  </property>
+  <property>
+    <name>yarn.nodemanager.log-aggregation.compression-type</name>
+    <value>gz</value>
+  </property>
+  <property>
+    <name>yarn.nodemanager.delete.debug-delay-sec</name>
+    <value>36000</value>
+  </property>
+  <property>
+    <description>Store class name for history store, defaulting to file system store</description>
+    <name>yarn.timeline-service.generic-application-history.store-class</name>
+    <value>org.apache.hadoop.yarn.server.applicationhistoryservice.FileSystemApplicationHistoryStore</value>
+  </property>
+  <!-- Use a directory that is set up on HDFS to store generic history -->
+  <property>
+    <description>URI pointing to the location of the FileSystem path where the history will be persisted. This must be
+      supplied when using org.apache.hadoop.yarn.server.applicationhistoryservice.FileSystemApplicationHistoryStore as
+      the value for yarn.timeline-service.generic-application-history.store-class
+    </description>
+    <name>yarn.timeline-service.generic-application-history.fs-history-store.uri</name>
+    <value>/yarn/generic-history/</value>
+  </property>
+  <property>
+    <description>T-file compression types used to compress history data.</description>
+    <name>yarn.timeline-service.generic-application-history.fs-history-store.compression-type</name>
+    <value>none</value>
+  </property>
+  <property>
+    <description>Indicate to ResourceManager as well as clients whether
+      history-service is enabled or not. If enabled, ResourceManager starts
+      recording historical data that ApplicationHistory service can consume.
+      Similarly, clients can redirect to the history service when applications
+      finish if this is enabled.
+    </description>
+    <name>yarn.timeline-service.generic-application-history.enabled</name>
+    <value>false</value>
+  </property>
+  <property>
+    <description>Indicate to clients whether timeline service is enabled or not.
+      If enabled, clients will put entities and events to the timeline server.
+    </description>
+    <name>yarn.timeline-service.enabled</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.scheduler.class</name>
+    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
+    <description>The class to use as the resource scheduler.</description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.minimum-allocation-mb</name>
+    <value>512</value>
+    <description>
+      The minimum allocation for every container request at the RM,
+      in MBs. Memory requests lower than this won't take effect,
+      and the specified value will get allocated at minimum.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.maximum-allocation-mb</name>
+    <value>2048</value>
+    <description>
+      The maximum allocation for every container request at the RM,
+      in MBs. Memory requests higher than this won't take effect,
+      and will get capped to this value.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.hostname</name>
+    <value>localhost</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.local-dirs</name>
+    <value>c:\hdpdata\hadoop\local</value>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.webapp.https.address</name>
+    <value>localhost:8088</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.log-dirs</name>
+    <value>c:\hdpdata\hadoop\logs</value>
+  </property>
+
+  <property>
+    <name>yarn.log.server.url</name>
+    <value>http://localhost:19888/jobhistory/logs</value>
+    <description>
+      URI for the HistoryServer's log resource
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.timeline-service.hostname</name>
+    <value>localhost</value>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.webapp.address</name>
+    <value>localhost:8088</value>
+  </property>
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/metainfo.xml
new file mode 100644
index 0000000..a1a4804
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/metainfo.xml
@@ -0,0 +1,224 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>YARN</name>
+      <displayName>YARN</displayName>
+      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+      <version>2.1.0.2.0</version>
+      <components>
+
+        <component>
+          <name>RESOURCEMANAGER</name>
+          <displayName>ResourceManager</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <commandScript>
+            <script>scripts/resourcemanager.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/resourcemanager.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+            <customCommand>
+              <name>REFRESHQUEUES</name>
+              <commandScript>
+                <script>scripts/resourcemanager.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+          <configuration-dependencies>
+            <config-type>capacity-scheduler</config-type>
+          </configuration-dependencies>
+        </component>
+
+        <component>
+          <name>NODEMANAGER</name>
+          <displayName>NodeManager</displayName>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/nodemanager.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>YARN_CLIENT</name>
+          <displayName>Yarn Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/yarn_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>yarn-site.xml</fileName>
+              <dictionaryName>yarn-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>core-site.xml</fileName>
+              <dictionaryName>core-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>yarn-env.cmd</fileName>
+              <dictionaryName>yarn-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>log4j.properties</fileName>
+              <dictionaryName>hdfs-log4j,yarn-log4j</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>capacity-scheduler.xml</fileName>
+              <dictionaryName>capacity-scheduler</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>HDFS</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>yarn-site</config-type>
+        <config-type>yarn-env</config-type>
+        <config-type>core-site</config-type>
+        <config-type>yarn-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+
+    <service>
+      <name>MAPREDUCE2</name>
+      <displayName>MapReduce2</displayName>
+      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+      <version>2.1.0.2.0.6.0</version>
+      <components>
+        <component>
+          <name>HISTORYSERVER</name>
+          <displayName>History Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <auto-deploy>
+            <enabled>true</enabled>
+            <co-locate>YARN/RESOURCEMANAGER</co-locate>
+          </auto-deploy>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/historyserver.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>MAPREDUCE2_CLIENT</name>
+          <displayName>MapReduce2 Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <commandScript>
+            <script>scripts/mapreduce2_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>mapred-site.xml</fileName>
+              <dictionaryName>mapred-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>core-site.xml</fileName>
+              <dictionaryName>core-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>mapred-env.cmd</fileName>
+              <dictionaryName>mapred-env</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hadoop-mapreduce</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/mapred_service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>YARN</service>
+      </requiredServices>
+
+      <configuration-dir>configuration-mapred</configuration-dir>
+
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>mapred-site</config-type>
+        <config-type>mapred-env</config-type>
+      </configuration-dependencies>
+    </service>
+
+  </services>
+</metainfo>