You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ao...@apache.org on 2013/12/16 15:27:15 UTC
[2/2] git commit: AMBARI-4079. YARN on HDP2. Using resource
management lib (Arsen Babych via aonishuk)
AMBARI-4079. YARN on HDP2. Using resource management lib (Arsen Babych
via aonishuk)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a58a0f80
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a58a0f80
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a58a0f80
Branch: refs/heads/trunk
Commit: a58a0f801a51c7128e3cb2b329277d4aece73faf
Parents: bf536d1
Author: Andrew Onischuk <ao...@hortonworks.com>
Authored: Mon Dec 16 06:25:39 2013 -0800
Committer: Andrew Onischuk <ao...@hortonworks.com>
Committed: Mon Dec 16 06:25:39 2013 -0800
----------------------------------------------------------------------
.../configuration/container-executor.cfg | 20 -
.../MAPREDUCE2/configuration/core-site.xml | 20 -
.../MAPREDUCE2/configuration/global.xml | 44 ---
.../configuration/mapred-queue-acls.xml | 39 --
.../MAPREDUCE2/configuration/mapred-site.xml | 381 ------------------
.../HDP/2.0._/services/MAPREDUCE2/metainfo.xml | 37 --
.../HDP/2.0._/services/MAPREDUCE2/metrics.json | 383 -------------------
.../YARN/configuration/container-executor.cfg | 20 -
.../services/YARN/configuration/global.xml | 24 ++
.../YARN/configuration/mapred-queue-acls.xml | 39 ++
.../services/YARN/configuration/mapred-site.xml | 381 ++++++++++++++++++
.../stacks/HDP/2.0._/services/YARN/metainfo.xml | 158 +++++++-
.../files/validateYarnComponentStatus.py | 165 ++++++++
.../services/YARN/package/scripts/__init__.py | 21 +
.../YARN/package/scripts/historyserver.py | 55 +++
.../package/scripts/mapred_service_check.py | 67 ++++
.../YARN/package/scripts/mapreduce2_client.py | 40 ++
.../YARN/package/scripts/nodemanager.py | 56 +++
.../services/YARN/package/scripts/params.py | 84 ++++
.../YARN/package/scripts/resourcemanager.py | 78 ++++
.../services/YARN/package/scripts/service.py | 65 ++++
.../YARN/package/scripts/service_check.py | 67 ++++
.../2.0._/services/YARN/package/scripts/yarn.py | 128 +++++++
.../YARN/package/scripts/yarn_client.py | 40 ++
.../package/templates/container-executor.cfg.j2 | 22 ++
.../YARN/package/templates/mapreduce.conf.j2 | 17 +
.../YARN/package/templates/yarn-env.sh.j2 | 119 ++++++
.../YARN/package/templates/yarn.conf.j2 | 17 +
28 files changed, 1624 insertions(+), 963 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/a58a0f80/ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/configuration/container-executor.cfg
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/configuration/container-executor.cfg b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/configuration/container-executor.cfg
deleted file mode 100644
index 502ddaa..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/configuration/container-executor.cfg
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-yarn.nodemanager.local-dirs=TODO-YARN-LOCAL-DIR
-yarn.nodemanager.linux-container-executor.group=hadoop
-yarn.nodemanager.log-dirs=TODO-YARN-LOG-DIR
-banned.users=hfds,bin,0
http://git-wip-us.apache.org/repos/asf/ambari/blob/a58a0f80/ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/configuration/core-site.xml
deleted file mode 100644
index 3a2af49..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/configuration/core-site.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<configuration>
-</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/a58a0f80/ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/configuration/global.xml
deleted file mode 100644
index ceedd56..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/configuration/global.xml
+++ /dev/null
@@ -1,44 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
- <property>
- <name>hs_host</name>
- <value></value>
- <description>History Server.</description>
- </property>
- <property>
- <name>mapred_log_dir_prefix</name>
- <value>/var/log/hadoop-mapreduce</value>
- <description>Mapreduce Log Dir Prefix</description>
- </property>
- <property>
- <name>mapred_pid_dir_prefix</name>
- <value>/var/run/hadoop-mapreduce</value>
- <description>Mapreduce PID Dir Prefix</description>
- </property>
- <property>
- <name>mapred_user</name>
- <value>mapred</value>
- <description>Mapreduce User</description>
- </property>
-</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/a58a0f80/ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/configuration/mapred-queue-acls.xml b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
deleted file mode 100644
index ce12380..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
+++ /dev/null
@@ -1,39 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!-- mapred-queue-acls.xml -->
-<configuration>
-
-
-<!-- queue default -->
-
- <property>
- <name>mapred.queue.default.acl-submit-job</name>
- <value>*</value>
- </property>
-
- <property>
- <name>mapred.queue.default.acl-administer-jobs</name>
- <value>*</value>
- </property>
-
- <!-- END ACLs -->
-
-</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/a58a0f80/ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/configuration/mapred-site.xml
deleted file mode 100644
index 424d216..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/configuration/mapred-site.xml
+++ /dev/null
@@ -1,381 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
- <property>
- <name>mapreduce.task.io.sort.mb</name>
- <value>200</value>
- <description>
- The total amount of buffer memory to use while sorting files, in megabytes.
- By default, gives each merge stream 1MB, which should minimize seeks.
- </description>
- </property>
-
- <property>
- <name>mapreduce.map.sort.spill.percent</name>
- <value>0.7</value>
- <description>
- The soft limit in the serialization buffer. Once reached, a thread will
- begin to spill the contents to disk in the background. Note that
- collection will not block if this threshold is exceeded while a spill
- is already in progress, so spills may be larger than this threshold when
- it is set to less than .5
- </description>
- </property>
-
- <property>
- <name>mapreduce.task.io.sort.factor</name>
- <value>100</value>
- <description>
- The number of streams to merge at once while sorting files.
- This determines the number of open file handles.
- </description>
- </property>
-
-<!-- map/reduce properties -->
- <property>
- <name>mapreduce.cluster.administrators</name>
- <value> hadoop</value>
- <description>
- Administrators for MapReduce applications.
- </description>
- </property>
-
- <property>
- <name>mapreduce.reduce.shuffle.parallelcopies</name>
- <value>30</value>
- <description>
- The default number of parallel transfers run by reduce during
- the copy(shuffle) phase.
- </description>
- </property>
-
- <property>
- <name>mapreduce.map.speculative</name>
- <value>false</value>
- <description>
- If true, then multiple instances of some map tasks
- may be executed in parallel.
- </description>
- </property>
-
- <property>
- <name>mapreduce.reduce.speculative</name>
- <value>false</value>
- <description>
- If true, then multiple instances of some reduce tasks may be
- executed in parallel.
- </description>
- </property>
-
- <property>
- <name>mapreduce.job.reduce.slowstart.completedmaps</name>
- <value>0.05</value>
- <description>
- Fraction of the number of maps in the job which should be complete before
- reduces are scheduled for the job.
- </description>
- </property>
-
- <property>
- <name>mapreduce.reduce.shuffle.merge.percent</name>
- <value>0.66</value>
- <description>
- The usage threshold at which an in-memory merge will be
- initiated, expressed as a percentage of the total memory allocated to
- storing in-memory map outputs, as defined by
- mapreduce.reduce.shuffle.input.buffer.percent.
- </description>
- </property>
-
- <property>
- <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
- <value>0.7</value>
- <description>
- The percentage of memory to be allocated from the maximum heap
- size to storing map outputs during the shuffle.
- </description>
- </property>
-
- <property>
- <name>mapreduce.map.output.compress.codec</name>
- <value></value>
- <description>If the map outputs are compressed, how should they be
- compressed
- </description>
- </property>
-
- <property>
- <name>mapreduce.output.fileoutputformat.compress.type</name>
- <value>BLOCK</value>
- <description>
- If the job outputs are to compressed as SequenceFiles, how should
- they be compressed? Should be one of NONE, RECORD or BLOCK.
- </description>
- </property>
-
- <property>
- <name>mapreduce.reduce.input.buffer.percent</name>
- <value>0.0</value>
- <description>
- The percentage of memory- relative to the maximum heap size- to
- retain map outputs during the reduce. When the shuffle is concluded, any
- remaining map outputs in memory must consume less than this threshold before
- the reduce can begin.
- </description>
- </property>
-
- <!-- copied from kryptonite configuration -->
- <property>
- <name>mapreduce.map.output.compress</name>
- <value>false</value>
- </property>
-
- <property>
- <name>mapreduce.task.timeout</name>
- <value>300000</value>
- <description>
- The number of milliseconds before a task will be
- terminated if it neither reads an input, writes an output, nor
- updates its status string.
- </description>
- </property>
-
- <property>
- <name>mapreduce.map.memory.mb</name>
- <value>1024</value>
- <description>Virtual memory for single Map task</description>
- </property>
-
- <property>
- <name>mapreduce.reduce.memory.mb</name>
- <value>1024</value>
- <description>Virtual memory for single Reduce task</description>
- </property>
-
- <property>
- <name>mapreduce.jobhistory.keytab.file</name>
- <!-- cluster variant -->
- <value></value>
- <description>The keytab for the job history server principal.</description>
- </property>
-
- <property>
- <name>mapreduce.shuffle.port</name>
- <value>13562</value>
- <description>
- Default port that the ShuffleHandler will run on.
- ShuffleHandler is a service run at the NodeManager to facilitate
- transfers of intermediate Map outputs to requesting Reducers.
- </description>
- </property>
-
- <property>
- <name>mapreduce.jobhistory.intermediate-done-dir</name>
- <value>/mr-history/tmp</value>
- <description>
- Directory where history files are written by MapReduce jobs.
- </description>
- </property>
-
- <property>
- <name>mapreduce.jobhistory.done-dir</name>
- <value>/mr-history/done</value>
- <description>
- Directory where history files are managed by the MR JobHistory Server.
- </description>
- </property>
-
- <property>
- <name>mapreduce.jobhistory.address</name>
- <value>localhost:10020</value>
- <description>Enter your JobHistoryServer hostname.</description>
- </property>
-
- <property>
- <name>mapreduce.jobhistory.webapp.address</name>
- <value>localhost:19888</value>
- <description>Enter your JobHistoryServer hostname.</description>
- </property>
-
- <property>
- <name>mapreduce.framework.name</name>
- <value>yarn</value>
- <description>
- The runtime framework for executing MapReduce jobs. Can be one of local,
- classic or yarn.
- </description>
- </property>
-
- <property>
- <name>yarn.app.mapreduce.am.staging-dir</name>
- <value>/user</value>
- <description>
- The staging dir used while submitting jobs.
- </description>
- </property>
-
- <property>
- <name>yarn.app.mapreduce.am.resource.mb</name>
- <value>512</value>
- <description>The amount of memory the MR AppMaster needs.</description>
- </property>
-
- <property>
- <name>yarn.app.mapreduce.am.command-opts</name>
- <value>-Xmx312m</value>
- <description>
- Java opts for the MR App Master processes.
- The following symbol, if present, will be interpolated: @taskid@ is replaced
- by current TaskID. Any other occurrences of '@' will go unchanged.
- For example, to enable verbose gc logging to a file named for the taskid in
- /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
- -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
-
- Usage of -Djava.library.path can cause programs to no longer function if
- hadoop native libraries are used. These values should instead be set as part
- of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
- mapreduce.reduce.env config settings.
- </description>
- </property>
-
- <property>
- <name>yarn.app.mapreduce.am.admin-command-opts</name>
- <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
- <description>
- Java opts for the MR App Master processes for admin purposes.
- It will appears before the opts set by yarn.app.mapreduce.am.command-opts and
- thus its options can be overridden user.
-
- Usage of -Djava.library.path can cause programs to no longer function if
- hadoop native libraries are used. These values should instead be set as part
- of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
- mapreduce.reduce.env config settings.
- </description>
- </property>
-
- <property>
- <name>yarn.app.mapreduce.am.log.level</name>
- <value>INFO</value>
- <description>MR App Master process log level.</description>
- </property>
-
- <property>
- <name>yarn.app.mapreduce.am.env</name>
- <value></value>
- <description>
- User added environment variables for the MR App Master
- processes. Example :
- 1) A=foo This will set the env variable A to foo
- 2) B=$B:c This is inherit tasktracker's B env variable.
- </description>
- </property>
-
- <property>
- <name>mapreduce.admin.map.child.java.opts</name>
- <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
- </property>
-
- <property>
- <name>mapreduce.admin.reduce.child.java.opts</name>
- <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
- </property>
-
- <property>
- <name>mapreduce.application.classpath</name>
- <value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*</value>
- <description>
- CLASSPATH for MR applications. A comma-separated list of CLASSPATH
- entries.
- </description>
- </property>
-
- <property>
- <name>mapreduce.am.max-attempts</name>
- <value>2</value>
- <description>
- The maximum number of application attempts. It is a
- application-specific setting. It should not be larger than the global number
- set by resourcemanager. Otherwise, it will be override. The default number is
- set to 2, to allow at least one retry for AM.
- </description>
- </property>
-
-
-
- <property>
- <name>mapreduce.map.java.opts</name>
- <value>-Xmx756m</value>
- <description>
- Larger heap-size for child jvms of maps.
- </description>
- </property>
-
-
- <property>
- <name>mapreduce.reduce.java.opts</name>
- <value>-Xmx756m</value>
- <description>
- Larger heap-size for child jvms of reduces.
- </description>
- </property>
-
- <property>
- <name>mapreduce.map.log.level</name>
- <value>INFO</value>
- <description>
- The logging level for the map task. The allowed levels are:
- OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
- </description>
- </property>
-
- <property>
- <name>mapreduce.reduce.log.level</name>
- <value>INFO</value>
- <description>
- The logging level for the reduce task. The allowed levels are:
- OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
- </description>
- </property>
-
- <property>
- <name>mapreduce.admin.user.env</name>
- <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &> /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`</value>
- <description>
- Additional execution environment entries for map and reduce task processes.
- This is not an additive property. You must preserve the original value if
- you want your map and reduce tasks to have access to native libraries (compression, etc)
- </description>
- </property>
-
- <property>
- <name>mapreduce.output.fileoutputformat.compress</name>
- <value>false</value>
- <description>
- Should the job outputs be compressed?
- </description>
- </property>
-
-</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/a58a0f80/ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/metainfo.xml
deleted file mode 100644
index 3790da2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/metainfo.xml
+++ /dev/null
@@ -1,37 +0,0 @@
-<?xml version="1.0"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<metainfo>
- <user>mapred</user>
- <comment>Apache Hadoop NextGen MapReduce (client libraries)</comment>
- <version>2.1.0.2.0.6.0</version>
- <components>
- <component>
- <name>HISTORYSERVER</name>
- <category>MASTER</category>
- </component>
- <component>
- <name>MAPREDUCE2_CLIENT</name>
- <category>CLIENT</category>
- </component>
- </components>
- <configuration-dependencies>
- <config-type>core-site</config-type>
- <config-type>global</config-type>
- <config-type>mapred-site</config-type>
- </configuration-dependencies>
-</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/a58a0f80/ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/metrics.json b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/metrics.json
deleted file mode 100644
index 97317b1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/MAPREDUCE2/metrics.json
+++ /dev/null
@@ -1,383 +0,0 @@
-{
- "HISTORYSERVER": {
- "Component": [
- {
- "type": "ganglia",
- "metrics": {
- "metrics/memory/mem_total": {
- "metric": "mem_total",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/jvm/memHeapCommittedM": {
- "metric": "jvm.JvmMetrics.MemHeapCommittedM",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/jvm/threadsRunnable": {
- "metric": "jvm.JvmMetrics.ThreadsRunnable",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/jvm/threadsNew": {
- "metric": "jvm.JvmMetrics.ThreadsNew",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/rpc/rpcAuthorizationFailures": {
- "metric": "rpc.metrics.RpcAuthorizationFailures",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/ugi/loginSuccess_avg_time": {
- "metric": "ugi.ugi.LoginSuccessAvgTime",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/rpc/RpcQueueTime_avg_time": {
- "metric": "rpc.rpc.RpcQueueTimeAvgTime",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/rpc/SentBytes": {
- "metric": "rpc.rpc.SentBytes",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/jvm/memNonHeapUsedM": {
- "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/jvm/logWarn": {
- "metric": "jvm.JvmMetrics.LogWarn",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/jvm/threadsTimedWaiting": {
- "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/process/proc_run": {
- "metric": "proc_run",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/jvm/gcCount": {
- "metric": "jvm.JvmMetrics.GcCount",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/rpc/ReceivedBytes": {
- "metric": "rpc.rpc.ReceivedBytes",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/memory/swap_total": {
- "metric": "swap_total",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/cpu/cpu_nice": {
- "metric": "cpu_nice",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/jvm/threadsBlocked": {
- "metric": "jvm.JvmMetrics.ThreadsBlocked",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/rpc/RpcQueueTime_num_ops": {
- "metric": "rpc.rpc.RpcQueueTimeNumOps",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/process/proc_total": {
- "metric": "proc_total",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/disk/part_max_used": {
- "metric": "part_max_used",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/rpc/NumOpenConnections": {
- "metric": "rpc.rpc.NumOpenConnections",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/jvm/memHeapUsedM": {
- "metric": "jvm.JvmMetrics.MemHeapUsedM",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/jvm/threadsWaiting": {
- "metric": "jvm.JvmMetrics.ThreadsWaiting",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/disk/disk_free": {
- "metric": "disk_free",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/memory/mem_buffers": {
- "metric": "mem_buffers",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/ugi/loginSuccess_num_ops": {
- "metric": "ugi.ugi.LoginSuccessNumOps",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/jvm/gcTimeMillis": {
- "metric": "jvm.JvmMetrics.GcTimeMillis",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/cpu/cpu_idle": {
- "metric": "cpu_idle",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/jvm/threadsTerminated": {
- "metric": "jvm.JvmMetrics.ThreadsTerminated",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/network/bytes_out": {
- "metric": "bytes_out",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/cpu/cpu_aidle": {
- "metric": "cpu_aidle",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/memory/mem_free": {
- "metric": "mem_free",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/cpu/cpu_user": {
- "metric": "cpu_user",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/memory/swap_free": {
- "metric": "swap_free",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/cpu/cpu_system": {
- "metric": "cpu_system",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/network/bytes_in": {
- "metric": "bytes_in",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/boottime": {
- "metric": "boottime",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/network/pkts_out": {
- "metric": "pkts_out",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/jvm/memNonHeapCommittedM": {
- "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/rpc/callQueueLen": {
- "metric": "rpc.rpc.CallQueueLength",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/memory/mem_cached": {
- "metric": "mem_cached",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/disk/disk_total": {
- "metric": "disk_total",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/jvm/logInfo": {
- "metric": "jvm.JvmMetrics.LogInfo",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/ugi/loginFailure_num_ops": {
- "metric": "ugi.ugi.LoginFailureNumOps",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/rpc/RpcProcessingTime_num_ops": {
- "metric": "rpc.rpc.RpcProcessingTimeNumOps",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/memory/mem_shared": {
- "metric": "mem_shared",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/cpu/cpu_wio": {
- "metric": "cpu_wio",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/jvm/logError": {
- "metric": "jvm.JvmMetrics.LogError",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/ugi/loginFailure_avg_time": {
- "metric": "ugi.ugi.LoginFailureAvgTime",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/cpu/cpu_num": {
- "metric": "cpu_num",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/cpu/cpu_speed": {
- "metric": "cpu_speed",
- "pointInTime": true,
- "temporal": true
- },
- "metrics/rpc/rpcAuthorizationSuccesses": {
- "metric": "rpc.rpc.RpcAuthorizationSuccesses",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/jvm/logFatal": {
- "metric": "jvm.JvmMetrics.LogFatal",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/rpc/RpcProcessingTime_avg_time": {
- "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/rpc/rpcAuthenticationSuccesses": {
- "metric": "rpc.metrics.RpcAuthenticationSuccesses",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/rpc/rpcAuthenticationFailures": {
- "metric": "rpc.metrics.RpcAuthenticationFailures",
- "pointInTime": false,
- "temporal": true
- },
- "metrics/network/pkts_in": {
- "metric": "pkts_in",
- "pointInTime": true,
- "temporal": true
- }
- }
- }
- ],
- "HostComponent": [
- {
- "type": "jmx",
- "metrics": {
- "metrics/jvm/memNonHeapUsedM": {
- "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapUsedM",
- "pointInTime": true,
- "temporal": false
- },
- "metrics/jvm/logWarn": {
- "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogWarn",
- "pointInTime": true,
- "temporal": false
- },
- "metrics/jvm/memHeapCommittedM": {
- "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapCommittedM",
- "pointInTime": true,
- "temporal": false
- },
- "metrics/jvm/threadsTimedWaiting": {
- "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTimedWaiting",
- "pointInTime": true,
- "temporal": false
- },
- "metrics/jvm/gcCount": {
- "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCount",
- "pointInTime": true,
- "temporal": false
- },
- "metrics/jvm/threadsRunnable": {
- "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsRunnable",
- "pointInTime": true,
- "temporal": false
- },
- "metrics/jvm/threadsBlocked": {
- "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsBlocked",
- "pointInTime": true,
- "temporal": false
- },
- "metrics/jvm/threadsNew": {
- "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsNew",
- "pointInTime": true,
- "temporal": false
- },
- "metrics/jvm/memNonHeapCommittedM": {
- "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapCommittedM",
- "pointInTime": true,
- "temporal": false
- },
- "metrics/jvm/logError": {
- "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogError",
- "pointInTime": true,
- "temporal": false
- },
- "metrics/jvm/memHeapUsedM": {
- "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapUsedM",
- "pointInTime": true,
- "temporal": false
- },
- "metrics/jvm/logFatal": {
- "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogFatal",
- "pointInTime": true,
- "temporal": false
- },
- "metrics/jvm/threadsWaiting": {
- "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsWaiting",
- "pointInTime": true,
- "temporal": false
- },
- "metrics/jvm/gcTimeMillis": {
- "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillis",
- "pointInTime": true,
- "temporal": false
- },
- "metrics/jvm/logInfo": {
- "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogInfo",
- "pointInTime": true,
- "temporal": false
- },
- "metrics/jvm/threadsTerminated": {
- "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTerminated",
- "pointInTime": true,
- "temporal": false
- }
- }
- }
- ]
- }
-}
http://git-wip-us.apache.org/repos/asf/ambari/blob/a58a0f80/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/configuration/container-executor.cfg
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/configuration/container-executor.cfg b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/configuration/container-executor.cfg
deleted file mode 100644
index 502ddaa..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/configuration/container-executor.cfg
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-yarn.nodemanager.local-dirs=TODO-YARN-LOCAL-DIR
-yarn.nodemanager.linux-container-executor.group=hadoop
-yarn.nodemanager.log-dirs=TODO-YARN-LOG-DIR
-banned.users=hfds,bin,0
http://git-wip-us.apache.org/repos/asf/ambari/blob/a58a0f80/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/configuration/global.xml
index edd1636..429c39f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/configuration/global.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/configuration/global.xml
@@ -61,4 +61,28 @@
<value>1024</value>
<description>Max heapsize for NodeManager using a numerical value in the scale of MB</description>
</property>
+
+ <!--MAPREDUCE2-->
+
+ <property>
+ <name>hs_host</name>
+ <value></value>
+ <description>History Server.</description>
+ </property>
+ <property>
+ <name>mapred_log_dir_prefix</name>
+ <value>/var/log/hadoop-mapreduce</value>
+ <description>Mapreduce Log Dir Prefix</description>
+ </property>
+ <property>
+ <name>mapred_pid_dir_prefix</name>
+ <value>/var/run/hadoop-mapreduce</value>
+ <description>Mapreduce PID Dir Prefix</description>
+ </property>
+ <property>
+ <name>mapred_user</name>
+ <value>mapred</value>
+ <description>Mapreduce User</description>
+ </property>
+
</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/a58a0f80/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/configuration/mapred-queue-acls.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/configuration/mapred-queue-acls.xml b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/configuration/mapred-queue-acls.xml
new file mode 100644
index 0000000..ce12380
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/configuration/mapred-queue-acls.xml
@@ -0,0 +1,39 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- mapred-queue-acls.xml -->
+<configuration>
+
+
+<!-- queue default -->
+
+ <property>
+ <name>mapred.queue.default.acl-submit-job</name>
+ <value>*</value>
+ </property>
+
+ <property>
+ <name>mapred.queue.default.acl-administer-jobs</name>
+ <value>*</value>
+ </property>
+
+ <!-- END ACLs -->
+
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/a58a0f80/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/configuration/mapred-site.xml
new file mode 100644
index 0000000..424d216
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/configuration/mapred-site.xml
@@ -0,0 +1,381 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+<!-- i/o properties -->
+
+ <property>
+ <name>mapreduce.task.io.sort.mb</name>
+ <value>200</value>
+ <description>
+ The total amount of buffer memory to use while sorting files, in megabytes.
+ By default, gives each merge stream 1MB, which should minimize seeks.
+ </description>
+ </property>
+
+ <property>
+ <name>mapreduce.map.sort.spill.percent</name>
+ <value>0.7</value>
+ <description>
+ The soft limit in the serialization buffer. Once reached, a thread will
+ begin to spill the contents to disk in the background. Note that
+ collection will not block if this threshold is exceeded while a spill
+ is already in progress, so spills may be larger than this threshold when
+ it is set to less than .5
+ </description>
+ </property>
+
+ <property>
+ <name>mapreduce.task.io.sort.factor</name>
+ <value>100</value>
+ <description>
+ The number of streams to merge at once while sorting files.
+ This determines the number of open file handles.
+ </description>
+ </property>
+
+<!-- map/reduce properties -->
+ <property>
+ <name>mapreduce.cluster.administrators</name>
+ <value> hadoop</value>
+ <description>
+ Administrators for MapReduce applications.
+ </description>
+ </property>
+
+ <property>
+ <name>mapreduce.reduce.shuffle.parallelcopies</name>
+ <value>30</value>
+ <description>
+ The default number of parallel transfers run by reduce during
+ the copy(shuffle) phase.
+ </description>
+ </property>
+
+ <property>
+ <name>mapreduce.map.speculative</name>
+ <value>false</value>
+ <description>
+ If true, then multiple instances of some map tasks
+ may be executed in parallel.
+ </description>
+ </property>
+
+ <property>
+ <name>mapreduce.reduce.speculative</name>
+ <value>false</value>
+ <description>
+ If true, then multiple instances of some reduce tasks may be
+ executed in parallel.
+ </description>
+ </property>
+
+ <property>
+ <name>mapreduce.job.reduce.slowstart.completedmaps</name>
+ <value>0.05</value>
+ <description>
+ Fraction of the number of maps in the job which should be complete before
+ reduces are scheduled for the job.
+ </description>
+ </property>
+
+ <property>
+ <name>mapreduce.reduce.shuffle.merge.percent</name>
+ <value>0.66</value>
+ <description>
+ The usage threshold at which an in-memory merge will be
+ initiated, expressed as a percentage of the total memory allocated to
+ storing in-memory map outputs, as defined by
+ mapreduce.reduce.shuffle.input.buffer.percent.
+ </description>
+ </property>
+
+ <property>
+ <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
+ <value>0.7</value>
+ <description>
+ The percentage of memory to be allocated from the maximum heap
+ size to storing map outputs during the shuffle.
+ </description>
+ </property>
+
+ <property>
+ <name>mapreduce.map.output.compress.codec</name>
+ <value></value>
+ <description>If the map outputs are compressed, how should they be
+ compressed
+ </description>
+ </property>
+
+ <property>
+ <name>mapreduce.output.fileoutputformat.compress.type</name>
+ <value>BLOCK</value>
+ <description>
+ If the job outputs are to compressed as SequenceFiles, how should
+ they be compressed? Should be one of NONE, RECORD or BLOCK.
+ </description>
+ </property>
+
+ <property>
+ <name>mapreduce.reduce.input.buffer.percent</name>
+ <value>0.0</value>
+ <description>
+ The percentage of memory- relative to the maximum heap size- to
+ retain map outputs during the reduce. When the shuffle is concluded, any
+ remaining map outputs in memory must consume less than this threshold before
+ the reduce can begin.
+ </description>
+ </property>
+
+ <!-- copied from kryptonite configuration -->
+ <property>
+ <name>mapreduce.map.output.compress</name>
+ <value>false</value>
+ </property>
+
+ <property>
+ <name>mapreduce.task.timeout</name>
+ <value>300000</value>
+ <description>
+ The number of milliseconds before a task will be
+ terminated if it neither reads an input, writes an output, nor
+ updates its status string.
+ </description>
+ </property>
+
+ <property>
+ <name>mapreduce.map.memory.mb</name>
+ <value>1024</value>
+ <description>Virtual memory for single Map task</description>
+ </property>
+
+ <property>
+ <name>mapreduce.reduce.memory.mb</name>
+ <value>1024</value>
+ <description>Virtual memory for single Reduce task</description>
+ </property>
+
+ <property>
+ <name>mapreduce.jobhistory.keytab.file</name>
+ <!-- cluster variant -->
+ <value></value>
+ <description>The keytab for the job history server principal.</description>
+ </property>
+
+ <property>
+ <name>mapreduce.shuffle.port</name>
+ <value>13562</value>
+ <description>
+ Default port that the ShuffleHandler will run on.
+ ShuffleHandler is a service run at the NodeManager to facilitate
+ transfers of intermediate Map outputs to requesting Reducers.
+ </description>
+ </property>
+
+ <property>
+ <name>mapreduce.jobhistory.intermediate-done-dir</name>
+ <value>/mr-history/tmp</value>
+ <description>
+ Directory where history files are written by MapReduce jobs.
+ </description>
+ </property>
+
+ <property>
+ <name>mapreduce.jobhistory.done-dir</name>
+ <value>/mr-history/done</value>
+ <description>
+ Directory where history files are managed by the MR JobHistory Server.
+ </description>
+ </property>
+
+ <property>
+ <name>mapreduce.jobhistory.address</name>
+ <value>localhost:10020</value>
+ <description>Enter your JobHistoryServer hostname.</description>
+ </property>
+
+ <property>
+ <name>mapreduce.jobhistory.webapp.address</name>
+ <value>localhost:19888</value>
+ <description>Enter your JobHistoryServer hostname.</description>
+ </property>
+
+ <property>
+ <name>mapreduce.framework.name</name>
+ <value>yarn</value>
+ <description>
+ The runtime framework for executing MapReduce jobs. Can be one of local,
+ classic or yarn.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.app.mapreduce.am.staging-dir</name>
+ <value>/user</value>
+ <description>
+ The staging dir used while submitting jobs.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.app.mapreduce.am.resource.mb</name>
+ <value>512</value>
+ <description>The amount of memory the MR AppMaster needs.</description>
+ </property>
+
+ <property>
+ <name>yarn.app.mapreduce.am.command-opts</name>
+ <value>-Xmx312m</value>
+ <description>
+ Java opts for the MR App Master processes.
+ The following symbol, if present, will be interpolated: @taskid@ is replaced
+ by current TaskID. Any other occurrences of '@' will go unchanged.
+ For example, to enable verbose gc logging to a file named for the taskid in
+ /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
+ -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
+
+ Usage of -Djava.library.path can cause programs to no longer function if
+ hadoop native libraries are used. These values should instead be set as part
+ of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
+ mapreduce.reduce.env config settings.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.app.mapreduce.am.admin-command-opts</name>
+ <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
+ <description>
+ Java opts for the MR App Master processes for admin purposes.
+ It will appears before the opts set by yarn.app.mapreduce.am.command-opts and
+ thus its options can be overridden user.
+
+ Usage of -Djava.library.path can cause programs to no longer function if
+ hadoop native libraries are used. These values should instead be set as part
+ of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
+ mapreduce.reduce.env config settings.
+ </description>
+ </property>
+
+ <property>
+ <name>yarn.app.mapreduce.am.log.level</name>
+ <value>INFO</value>
+ <description>MR App Master process log level.</description>
+ </property>
+
+ <property>
+ <name>yarn.app.mapreduce.am.env</name>
+ <value></value>
+ <description>
+ User added environment variables for the MR App Master
+ processes. Example :
+ 1) A=foo This will set the env variable A to foo
+ 2) B=$B:c This is inherit tasktracker's B env variable.
+ </description>
+ </property>
+
+ <property>
+ <name>mapreduce.admin.map.child.java.opts</name>
+ <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
+ </property>
+
+ <property>
+ <name>mapreduce.admin.reduce.child.java.opts</name>
+ <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
+ </property>
+
+ <property>
+ <name>mapreduce.application.classpath</name>
+ <value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*</value>
+ <description>
+ CLASSPATH for MR applications. A comma-separated list of CLASSPATH
+ entries.
+ </description>
+ </property>
+
+ <property>
+ <name>mapreduce.am.max-attempts</name>
+ <value>2</value>
+ <description>
+ The maximum number of application attempts. It is a
+ application-specific setting. It should not be larger than the global number
+ set by resourcemanager. Otherwise, it will be override. The default number is
+ set to 2, to allow at least one retry for AM.
+ </description>
+ </property>
+
+
+
+ <property>
+ <name>mapreduce.map.java.opts</name>
+ <value>-Xmx756m</value>
+ <description>
+ Larger heap-size for child jvms of maps.
+ </description>
+ </property>
+
+
+ <property>
+ <name>mapreduce.reduce.java.opts</name>
+ <value>-Xmx756m</value>
+ <description>
+ Larger heap-size for child jvms of reduces.
+ </description>
+ </property>
+
+ <property>
+ <name>mapreduce.map.log.level</name>
+ <value>INFO</value>
+ <description>
+ The logging level for the map task. The allowed levels are:
+ OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
+ </description>
+ </property>
+
+ <property>
+ <name>mapreduce.reduce.log.level</name>
+ <value>INFO</value>
+ <description>
+ The logging level for the reduce task. The allowed levels are:
+ OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
+ </description>
+ </property>
+
+ <property>
+ <name>mapreduce.admin.user.env</name>
+ <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &> /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`</value>
+ <description>
+ Additional execution environment entries for map and reduce task processes.
+ This is not an additive property. You must preserve the original value if
+ you want your map and reduce tasks to have access to native libraries (compression, etc)
+ </description>
+ </property>
+
+ <property>
+ <name>mapreduce.output.fileoutputformat.compress</name>
+ <value>false</value>
+ <description>
+ Should the job outputs be compressed?
+ </description>
+ </property>
+
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/a58a0f80/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/metainfo.xml
index 8187329..127d055 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/metainfo.xml
@@ -15,28 +15,148 @@
See the License for the specific language governing permissions and
limitations under the License.
-->
+
<metainfo>
- <user>mapred</user>
- <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
- <version>2.1.0.2.0.6.0</version>
- <components>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>YARN</name>
+ <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+ <version>2.1.0.2.0.6.0</version>
+ <components>
+
+ <component>
+ <name>RESOURCEMANAGER</name>
+ <category>MASTER</category>
+ <commandScript>
+ <script>scripts/resourcemanager.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </component>
+
<component>
- <name>RESOURCEMANAGER</name>
- <category>MASTER</category>
+ <name>NODEMANAGER</name>
+ <category>SLAVE</category>
+ <commandScript>
+ <script>scripts/nodemanager.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
</component>
+
+ <component>
+ <name>YARN_CLIENT</name>
+ <category>CLIENT</category>
+ <commandScript>
+ <script>scripts/yarn_client.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </component>
+ </components>
+
+ <osSpecifics>
+ <osSpecific>
+ <osType>any</osType>
+ <packages>
+ <package>
+ <type>rpm</type>
+ <name>hadoop-yarn</name>
+ </package>
+ <package>
+ <type>rpm</type>
+ <name>hadoop-yarn-nodemanager</name>
+ </package>
+ <package>
+ <type>rpm</type>
+ <name>hadoop-mapreduce</name>
+ </package>
+ <package>
+ <type>rpm</type>
+ <name>hadoop-yarn-proxyserver</name>
+ </package>
+ <package>
+ <type>rpm</type>
+ <name>hadoop-yarn-resourcemanager</name>
+ </package>
+ </packages>
+ </osSpecific>
+ </osSpecifics>
+
+ <commandScript>
+ <script>scripts/service_check.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>300</timeout>
+ </commandScript>
+
+ <configuration-dependencies>
+ <config-type>yarn-site</config-type>
+ <config-type>capacity-scheduler</config-type>
+ <config-type>core-site</config-type>
+ <config-type>global</config-type>
+ <config-type>mapred-site</config-type>
+ <config-type>mapred-queue-acls</config-type>
+ </configuration-dependencies>
+ </service>
+
+ <service>
+ <name>MAPREDUCE2</name>
+ <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+ <version>2.1.0.2.0.6.0</version>
+ <components>
<component>
- <name>NODEMANAGER</name>
- <category>SLAVE</category>
+ <name>HISTORYSERVER</name>
+ <category>MASTER</category>
+ <commandScript>
+ <script>scripts/historyserver.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
</component>
- <component>
- <name>YARN_CLIENT</name>
- <category>CLIENT</category>
+
+ <component>
+ <name>MAPREDUCE2_CLIENT</name>
+ <category>CLIENT</category>
+ <commandScript>
+ <script>scripts/mapreduce2_client.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
</component>
- </components>
- <configuration-dependencies>
- <config-type>global</config-type>
- <config-type>core-site</config-type>
- <config-type>yarn-site</config-type>
- <config-type>capacity-scheduler</config-type>
- </configuration-dependencies>
-</metainfo>
+ </components>
+
+ <osSpecifics>
+ <osSpecific>
+ <osType>any</osType>
+ <packages>
+ <package>
+ <type>rpm</type>
+ <name>hadoop-mapreduce</name>
+ </package>
+ <package>
+ <type>rpm</type>
+ <name>hadoop-mapreduce-historyserver</name>
+ </package>
+ </packages>
+ </osSpecific>
+ </osSpecifics>
+
+ <commandScript>
+ <script>scripts/mapred_service_check.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>300</timeout>
+ </commandScript>
+
+ <configuration-dependencies>
+ <config-type>yarn-site</config-type>
+ <config-type>capacity-scheduler</config-type>
+ <config-type>core-site</config-type>
+ <config-type>global</config-type>
+ <config-type>mapred-site</config-type>
+ <config-type>mapred-queue-acls</config-type>
+ </configuration-dependencies>
+ </service>
+
+ </services>
+</metainfo>
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/a58a0f80/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/files/validateYarnComponentStatus.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/files/validateYarnComponentStatus.py b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/files/validateYarnComponentStatus.py
new file mode 100644
index 0000000..dac198a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/files/validateYarnComponentStatus.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import optparse
+import subprocess
+import json
+
+RESOURCEMANAGER = 'rm'
+NODEMANAGER = 'nm'
+HISTORYSERVER = 'hs'
+
+STARTED_STATE = 'STARTED'
+RUNNING_STATE = 'RUNNING'
+
+#Return reponse for given path and address
+def getResponse(path, address, ssl_enabled):
+
+ command = "curl"
+ httpGssnegotiate = "--negotiate"
+ userpswd = "-u:"
+ insecure = "-k"# This is smoke test, no need to check CA of server
+ if ssl_enabled:
+ url = 'https://' + address + path
+ else:
+ url = 'http://' + address + path
+
+ command_with_flags = [command,httpGssnegotiate,userpswd,insecure,url]
+ try:
+ proc = subprocess.Popen(command_with_flags, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ (stdout, stderr) = proc.communicate()
+ response = json.loads(stdout)
+ if response == None:
+ print 'There is no response for url: ' + str(url)
+ exit(1)
+ return response
+ except Exception as e:
+ print 'Error getting response for url:' + str(url), e
+ exit(1)
+
+#Verify that REST api is available for given component
+def validateAvailability(component, path, address, ssl_enabled):
+
+ try:
+ response = getResponse(path, address, ssl_enabled)
+ is_valid = validateAvailabilityResponse(component, response)
+ if not is_valid:
+ exit(1)
+ except Exception as e:
+ print 'Error checking availability status of component', e
+ exit(1)
+
+#Validate component-specific response
+def validateAvailabilityResponse(component, response):
+ try:
+ if component == RESOURCEMANAGER:
+ rm_state = response['clusterInfo']['state']
+ if rm_state == STARTED_STATE:
+ return True
+ else:
+ print 'Resourcemanager is not started'
+ return False
+
+ elif component == NODEMANAGER:
+ node_healthy = bool(response['nodeInfo']['nodeHealthy'])
+ if node_healthy:
+ return True
+ else:
+ return False
+ elif component == HISTORYSERVER:
+ hs_start_time = response['historyInfo']['startedOn']
+ if hs_start_time > 0:
+ return True
+ else:
+ return False
+ else:
+ return False
+ except Exception as e:
+ print 'Error validation of availability response for ' + str(component), e
+ return False
+
+#Verify that component has required resources to work
+def validateAbility(component, path, address, ssl_enabled):
+
+ try:
+ response = getResponse(path, address, ssl_enabled)
+ is_valid = validateAbilityResponse(component, response)
+ if not is_valid:
+ exit(1)
+ except Exception as e:
+ print 'Error checking ability of component', e
+ exit(1)
+
+#Validate component-specific response that it has required resources to work
+def validateAbilityResponse(component, response):
+ try:
+ if component == RESOURCEMANAGER:
+ nodes = []
+ if response.has_key('nodes') and not response['nodes'] == None and response['nodes'].has_key('node'):
+ nodes = response['nodes']['node']
+ connected_nodes_count = len(nodes)
+ if connected_nodes_count == 0:
+ print 'There is no connected nodemanagers to resourcemanager'
+ return False
+ active_nodes = filter(lambda x: x['state'] == RUNNING_STATE, nodes)
+ active_nodes_count = len(active_nodes)
+
+ if connected_nodes_count == 0:
+ print 'There is no connected active nodemanagers to resourcemanager'
+ return False
+ else:
+ return True
+ else:
+ return False
+ except Exception as e:
+ print 'Error validation of ability response', e
+ return False
+
+#
+# Main.
+#
+def main():
+ parser = optparse.OptionParser(usage="usage: %prog [options] component ")
+ parser.add_option("-p", "--port", dest="address", help="Host:Port for REST API of a desired component")
+ parser.add_option("-s", "--ssl", dest="ssl_enabled", help="Is SSL enabled for UI of component")
+
+ (options, args) = parser.parse_args()
+
+ component = args[0]
+
+ address = options.address
+ ssl_enabled = (options.ssl_enabled) in 'true'
+ if component == RESOURCEMANAGER:
+ path = '/ws/v1/cluster/info'
+ elif component == NODEMANAGER:
+ path = '/ws/v1/node/info'
+ elif component == HISTORYSERVER:
+ path = '/ws/v1/history/info'
+ else:
+ parser.error("Invalid component")
+
+ validateAvailability(component, path, address, ssl_enabled)
+
+ if component == RESOURCEMANAGER:
+ path = '/ws/v1/cluster/nodes'
+ validateAbility(component, path, address, ssl_enabled)
+
+if __name__ == "__main__":
+ main()
http://git-wip-us.apache.org/repos/asf/ambari/blob/a58a0f80/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/__init__.py
new file mode 100644
index 0000000..a582077
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/__init__.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/a58a0f80/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/historyserver.py b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/historyserver.py
new file mode 100644
index 0000000..9f757a0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/historyserver.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import sys
+from resource_management import *
+
+from yarn import yarn
+from service import service
+
+class Histroryserver(Script):
+ def install(self, env):
+ self.install_packages(env)
+ self.configure(env)
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+ yarn()
+
+ def start(self, env):
+ import params
+ env.set_params(params)
+ self.configure(env) # FOR SECURITY
+ service('historyserver',
+ action='start'
+ )
+
+ def stop(self, env):
+ import params
+ env.set_params(params)
+
+ service('historyserver',
+ action='stop'
+ )
+
+if __name__ == "__main__":
+ Histroryserver().execute()
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/a58a0f80/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/mapred_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/mapred_service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/mapred_service_check.py
new file mode 100644
index 0000000..3d5a9da
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/mapred_service_check.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+class MapReduce2ServiceCheck(Script):
+ def service_check(self, env):
+ import params
+ env.set_params(params)
+
+ jar_path = format("{hadoop_mapred2_jar_location}/{hadoopMapredExamplesJarName}")
+ input_file = format("/user/{smokeuser}/mapredsmokeinput")
+ output_file = format("/user/{smokeuser}/mapredsmokeoutput")
+
+ cleanup_cmd = format("fs -rm -r -f {output_file} {input_file}")
+ create_file_cmd = format("fs -put /etc/passwd {input_file}")
+ test_cmd = format("fs -test -e {output_file}")
+ run_wordcount_job = format("jar {jar_path} wordcount {input_file} {output_file}")
+
+ ExecuteHadoop(cleanup_cmd,
+ tries=1,
+ try_sleep=5,
+ user=params.smokeuser,
+ conf_dir=params.hadoop_conf_dir
+ )
+
+ ExecuteHadoop(create_file_cmd,
+ tries=1,
+ try_sleep=5,
+ user=params.smokeuser,
+ conf_dir=params.hadoop_conf_dir
+ )
+
+ ExecuteHadoop(run_wordcount_job,
+ tries=1,
+ try_sleep=5,
+ user=params.smokeuser,
+ conf_dir=params.hadoop_conf_dir,
+ logoutput=True
+ )
+
+ ExecuteHadoop(test_cmd,
+ user=params.smokeuser,
+ conf_dir=params.hadoop_conf_dir
+ )
+
+if __name__ == "__main__":
+ MapReduce2ServiceCheck().execute()
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/a58a0f80/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/mapreduce2_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/mapreduce2_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/mapreduce2_client.py
new file mode 100644
index 0000000..2583dbc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/mapreduce2_client.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+from yarn import yarn
+
+class MapReduce2Client(Script):
+
+ def install(self, env):
+ self.install_packages(env)
+ self.configure(env)
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+ yarn()
+
+if __name__ == "__main__":
+ MapReduce2Client().execute()
http://git-wip-us.apache.org/repos/asf/ambari/blob/a58a0f80/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/nodemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/nodemanager.py b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/nodemanager.py
new file mode 100644
index 0000000..a2a6308
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/nodemanager.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+from yarn import yarn
+from service import service
+
+class Nodemanager(Script):
+ def install(self, env):
+ self.install_packages(env)
+ self.configure(env)
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+ yarn()
+
+ def start(self, env):
+ import params
+ env.set_params(params)
+ self.configure(env) # FOR SECURITY
+ service('nodemanager',
+ action='start'
+ )
+
+ def stop(self, env):
+ import params
+ env.set_params(params)
+
+ service('nodemanager',
+ action='stop'
+ )
+
+if __name__ == "__main__":
+ Nodemanager().execute()
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/a58a0f80/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/params.py
new file mode 100644
index 0000000..4b09d8c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/params.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+# server configurations
+config = Script.get_config()
+
+config_dir = "/etc/hadoop/conf"
+
+mapred_user = config['configurations']['global']['mapred_user']
+yarn_user = config['configurations']['global']['yarn_user']
+hdfs_user = config['configurations']['global']['hdfs_user']
+
+smokeuser = config['configurations']['global']['smokeuser']
+security_enabled = config['configurations']['global']['security_enabled']
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+yarn_executor_container_group = config['configurations']['yarn-site']['yarn.nodemanager.linux-container-executor.group']
+kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+rm_host = config['clusterHostInfo']['rm_host'][0]
+rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
+rm_https_port = "8090"
+
+java64_home = config['configurations']['global']['java64_home']
+hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
+
+hadoop_libexec_dir = '/usr/lib/hadoop/libexec'
+hadoop_yarn_home = '/usr/lib/hadoop-yarn'
+yarn_heapsize = config['configurations']['global']['yarn_heapsize']
+resourcemanager_heapsize = config['configurations']['global']['resourcemanager_heapsize']
+nodemanager_heapsize = config['configurations']['global']['nodemanager_heapsize']
+
+yarn_log_dir_prefix = config['configurations']['global']['yarn_log_dir_prefix']
+yarn_pid_dir_prefix = config['configurations']['global']['yarn_pid_dir_prefix']
+mapred_pid_dir_prefix = config['configurations']['global']['mapred_pid_dir_prefix']
+mapred_log_dir_prefix = config['configurations']['global']['mapred_log_dir_prefix']
+
+rm_webui_address = format("{rm_host}:{rm_port}")
+rm_webui_https_address = format("{rm_host}:{rm_https_port}")
+nm_webui_address = config['configurations']['yarn-site']['yarn.nodemanager.webapp.address']
+hs_webui_address = config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address']
+
+nm_local_dirs = config['configurations']['yarn-site']['yarn.nodemanager.local-dirs']
+nm_log_dirs = config['configurations']['yarn-site']['yarn.nodemanager.log-dirs']
+
+
+hadoop_mapred2_jar_location = "/usr/lib/hadoop-mapreduce"
+distrAppJarName = "hadoop-yarn-applications-distributedshell-2.*.jar"
+hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
+
+yarn_pid_dir = format("{yarn_pid_dir_prefix}/{yarn_user}")
+mapred_pid_dir = format("{mapred_pid_dir_prefix}/{mapred_user}")
+
+mapred_log_dir = format("{mapred_log_dir_prefix}/{mapred_user}")
+yarn_log_dir = format("{yarn_log_dir_prefix}/{yarn_user}")
+mapred_job_summary_log = format("{mapred_log_dir_prefix}/{mapred_user}/hadoop-mapreduce.jobsummary.log")
+yarn_job_summary_log = format("{yarn_log_dir_prefix}/{yarn_user}/hadoop-mapreduce.jobsummary.log")
+
+mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
+yarn_bin = "/usr/lib/hadoop-yarn/sbin"
+
+user_group = config['configurations']['global']['user_group']
+limits_conf_dir = "/etc/security/limits.d"
+hadoop_conf_dir = "/etc/hadoop/conf"
+yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/a58a0f80/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/resourcemanager.py
new file mode 100644
index 0000000..aef6571
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/resourcemanager.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+from yarn import yarn
+from service import service
+
+class Resourcemanager(Script):
+ def install(self, env):
+ self.install_packages(env)
+ self.configure(env)
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+ yarn()
+
+ def start(self, env):
+ import params
+ env.set_params(params)
+ self.configure(env) # FOR SECURITY
+ service('resourcemanager',
+ action='start'
+ )
+
+ def stop(self, env):
+ import params
+ env.set_params(params)
+
+ service('resourcemanager',
+ action='stop'
+ )
+
+if __name__ == "__main__":
+ Resourcemanager().execute()
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/a58a0f80/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/service.py b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/service.py
new file mode 100644
index 0000000..714f971
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0._/services/YARN/package/scripts/service.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+
+def service(
+ name,
+ action='start'):
+
+ import params
+
+ if (name == 'historyserver'):
+ daemon = format("{mapred_bin}/mr-jobhistory-daemon.sh")
+ pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-{name}.pid")
+ usr = params.mapred_user
+ else:
+ daemon = format("{yarn_bin}/yarn-daemon.sh")
+ pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-{name}.pid")
+ usr = params.yarn_user
+
+ cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {daemon} --config {config_dir}")
+
+ if action == 'start':
+ daemon_cmd = format("su - {usr} -c '{cmd} start {name}'")
+ no_op = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+ Execute(daemon_cmd,
+ #user=usr, #Fix execution from user
+ not_if=no_op
+ )
+
+ Execute(no_op,
+ user=usr,
+ not_if=no_op,
+ initial_wait=5
+ )
+
+ elif action == 'stop':
+ daemon_cmd = format("{cmd} stop {name}")
+ Execute(daemon_cmd,
+ user=usr,
+ )
+ rm_pid = format("rm -f {pid_file}")
+ Execute(rm_pid,
+ user=usr
+ )
\ No newline at end of file