You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ao...@apache.org on 2014/01/31 20:51:01 UTC
[36/51] [partial] AMBARI-4491. Move all the supported versions in
Baikal for stack to python code (remove dependence on puppet). (aonishuk)
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml
deleted file mode 100644
index bb54a93..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml
+++ /dev/null
@@ -1,623 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
- <!-- i/o properties -->
-
- <property>
- <name>io.sort.mb</name>
- <value>200</value>
- <description>
- The total amount of Map-side buffer memory to use while sorting files
- </description>
- </property>
-
- <property>
- <name>io.sort.record.percent</name>
- <value>.2</value>
- <description>The percentage of io.sort.mb dedicated to tracking record boundaries. Let this value be r, io.sort.mb be x.
- The maximum number of records collected before the collection thread must block is equal to (r * x) / 4
- </description>
- </property>
-
- <property>
- <name>io.sort.spill.percent</name>
- <value>0.9</value>
- <description>Percentage of sort buffer used for record collection</description>
- </property>
-
- <property>
- <name>io.sort.factor</name>
- <value>100</value>
- <description>The number of streams to merge at once while sorting files. This determines the number of open file handles.
- </description>
- </property>
-
- <!-- map/reduce properties -->
-
- <property>
- <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
- <value>250</value>
- <description>Normally, this is the amount of time before killing
- processes, and the recommended-default is 5.000 seconds - a value of
- 5000 here. In this case, we are using it solely to blast tasks before
- killing them, and killing them very quickly (1/4 second) to guarantee
- that we do not leave VMs around for later jobs.
- </description>
- </property>
-
- <property>
- <name>mapred.job.tracker.handler.count</name>
- <value>50</value>
- <description>
- The number of server threads for the JobTracker. This should be roughly
- 4% of the number of tasktracker nodes.
- </description>
- </property>
-
- <property>
- <name>mapred.system.dir</name>
- <value>/mapred/system</value>
- <description>Path on the HDFS where where the MapReduce framework stores system files</description>
- <final>true</final>
- </property>
-
- <property>
- <name>mapred.job.tracker</name>
- <!-- cluster variant -->
- <value>localhost:50300</value>
- <description>JobTracker address</description>
- <final>true</final>
- </property>
-
- <property>
- <name>mapred.job.tracker.http.address</name>
- <!-- cluster variant -->
- <value>localhost:50030</value>
- <description>JobTracker host and http port address</description>
- <final>true</final>
- </property>
-
- <property>
- <!-- cluster specific -->
- <name>mapred.local.dir</name>
- <value>/hadoop/mapred</value>
- <description>The local directory where MapReduce stores intermediate data files. May be a comma-separated list of
- directories on different devices in order to spread disk i/o. Directories that do not exist are ignored.
- </description>
- <final>true</final>
- </property>
-
- <property>
- <name>mapreduce.cluster.administrators</name>
- <value> hadoop</value>
- <description>Cluster administrators. Irrespective of the job ACLs configured, cluster administrators always have
- access to view and modify a job.
- </description>
- </property>
-
- <property>
- <name>mapred.reduce.parallel.copies</name>
- <value>30</value>
- <description>The default number of parallel transfers run by reduce
- during the copy(shuffle) phase.
- </description>
- </property>
-
- <property>
- <name>mapred.tasktracker.map.tasks.maximum</name>
- <value>4</value>
- <description>The maximum number of map tasks that will be run simultaneously by a task tracker.</description>
- </property>
-
- <property>
- <name>mapred.tasktracker.reduce.tasks.maximum</name>
- <value>2</value>
- <description>The maximum number of reduce tasks that will be run simultaneously by a task tracker.</description>
- </property>
-
- <property>
- <name>tasktracker.http.threads</name>
- <value>50</value>
- <description>The number of worker threads that for the http server. This is used for map output fetching.</description>
- </property>
-
- <property>
- <name>mapred.map.tasks.speculative.execution</name>
- <value>false</value>
- <description>If true, then multiple instances of some map tasks
- may be executed in parallel.</description>
- </property>
-
- <property>
- <name>mapred.reduce.tasks.speculative.execution</name>
- <value>false</value>
- <description>If true, then multiple instances of some reduce tasks
- may be executed in parallel.</description>
- </property>
-
- <property>
- <name>mapred.reduce.slowstart.completed.maps</name>
- <value>0.05</value>
- <description>Fraction of the number of maps in the job which should be complete before reduces are scheduled for the job.</description>
- </property>
-
- <property>
- <name>mapred.inmem.merge.threshold</name>
- <value>1000</value>
- <description>The threshold, in terms of the number of files
- for the in-memory merge process. When we accumulate threshold number of files
- we initiate the in-memory merge and spill to disk. A value of 0 or less than
- 0 indicates we want to DON'T have any threshold and instead depend only on
- the ramfs's memory consumption to trigger the merge.
- </description>
- </property>
-
- <property>
- <name>mapred.job.shuffle.merge.percent</name>
- <value>0.66</value>
- <description>The usage threshold at which an in-memory merge will be
- initiated, expressed as a percentage of the total memory allocated to
- storing in-memory map outputs, as defined by
- mapred.job.shuffle.input.buffer.percent.
- </description>
- </property>
-
- <property>
- <name>mapred.job.shuffle.input.buffer.percent</name>
- <value>0.7</value>
- <description>The percentage of memory to be allocated from the maximum heap
- size to storing map outputs during the shuffle.
- </description>
- </property>
-
- <property>
- <name>mapred.map.output.compression.codec</name>
- <value>org.apache.hadoop.io.compress.SnappyCodec</value>
- <description>If the map outputs are compressed, how should they be
- compressed
- </description>
- </property>
-
- <property>
- <name>mapred.output.compression.type</name>
- <value>BLOCK</value>
- <description>If the job outputs are to compressed as SequenceFiles, how should
- they be compressed? Should be one of NONE, RECORD or BLOCK.
- </description>
- </property>
-
-
- <property>
- <name>mapred.jobtracker.completeuserjobs.maximum</name>
- <value>0</value>
- <description>The maximum number of complete jobs per user to keep around before delegating them to the job history.</description>
- </property>
-
- <property>
- <name>mapred.jobtracker.taskScheduler</name>
- <value>org.apache.hadoop.mapred.CapacityTaskScheduler</value>
- <description>The class responsible for scheduling the tasks.</description>
- </property>
-
- <property>
- <name>mapred.jobtracker.restart.recover</name>
- <value>false</value>
- <description>"true" to enable (job) recovery upon restart,
- "false" to start afresh
- </description>
- </property>
-
- <property>
- <name>mapred.job.reduce.input.buffer.percent</name>
- <value>0.0</value>
- <description>The percentage of memory- relative to the maximum heap size- to
- retain map outputs during the reduce. When the shuffle is concluded, any
- remaining map outputs in memory must consume less than this threshold before
- the reduce can begin.
- </description>
- </property>
-
- <property>
- <name>mapreduce.reduce.input.limit</name>
- <value>10737418240</value>
- <description>The limit on the input size of the reduce. (This value
- is 10 Gb.) If the estimated input size of the reduce is greater than
- this value, job is failed. A value of -1 means that there is no limit
- set. </description>
- </property>
-
-
- <!-- copied from kryptonite configuration -->
- <property>
- <name>mapred.compress.map.output</name>
- <value></value>
- </property>
-
-
- <property>
- <name>mapred.task.timeout</name>
- <value>600000</value>
- <description>The number of milliseconds before a task will be
- terminated if it neither reads an input, writes an output, nor
- updates its status string.
- </description>
- </property>
-
- <property>
- <name>jetty.connector</name>
- <value>org.mortbay.jetty.nio.SelectChannelConnector</value>
- <description>The connector to be used by Jetty server.</description>
- </property>
-
- <property>
- <name>mapred.task.tracker.task-controller</name>
- <value>org.apache.hadoop.mapred.DefaultTaskController</value>
- <description>
- TaskController which is used to launch and manage task execution.
- </description>
- </property>
-
- <property>
- <name>mapred.child.root.logger</name>
- <value>INFO,TLA</value>
- <description>Logger configuration for the TaskTracker child processes</description>
- </property>
-
- <property>
- <name>ambari.mapred.child.java.opts.memory</name>
- <value>768</value>
- <description>Java options Memory for the TaskTracker child processes</description>
- </property>
-
- <property>
- <name>mapred.child.java.opts</name>
- <value>-server -Xmx${ambari.mapred.child.java.opts.memory}m -Djava.net.preferIPv4Stack=true</value>
- <description>Java options for the TaskTracker child processes</description>
- </property>
-
- <property>
- <name>mapred.cluster.map.memory.mb</name>
- <value>1536</value>
- <description>
- The virtual memory size of a single Map slot in the MapReduce framework
- </description>
- </property>
-
- <property>
- <name>mapred.cluster.reduce.memory.mb</name>
- <value>2048</value>
- <description>
- The virtual memory size of a single Reduce slot in the MapReduce framework
- </description>
- </property>
-
- <property>
- <name>mapred.job.map.memory.mb</name>
- <value>1536</value>
- <description>
- Virtual memory for single Map task
- </description>
- </property>
-
- <property>
- <name>mapred.job.reduce.memory.mb</name>
- <value>2048</value>
- <description>
- Virtual memory for single Reduce task
- </description>
- </property>
-
- <property>
- <name>mapred.cluster.max.map.memory.mb</name>
- <value>6144</value>
- <description>
- Upper limit on virtual memory size for a single Map task of any MapReduce job
- </description>
- </property>
-
- <property>
- <name>mapred.cluster.max.reduce.memory.mb</name>
- <value>4096</value>
- <description>
- Upper limit on virtual memory size for a single Reduce task of any MapReduce job
- </description>
- </property>
-
- <property>
- <name>mapred.hosts</name>
- <value>/etc/hadoop/conf/mapred.include</value>
- <description>
- Names a file that contains the list of nodes that may
- connect to the jobtracker. If the value is empty, all hosts are
- permitted.
- </description>
- </property>
-
- <property>
- <name>mapred.hosts.exclude</name>
- <value>/etc/hadoop/conf/mapred.exclude</value>
- <description>
- Names a file that contains the list of hosts that
- should be excluded by the jobtracker. If the value is empty, no
- hosts are excluded.
- </description>
- </property>
-
- <property>
- <name>mapred.max.tracker.blacklists</name>
- <value>16</value>
- <description>
- if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
- </description>
- </property>
-
- <property>
- <name>mapred.healthChecker.script.path</name>
- <value>/etc/hadoop/conf/health_check</value>
- <description>
- Directory path to view job status
- </description>
- </property>
-
- <property>
- <name>mapred.healthChecker.interval</name>
- <value>135000</value>
- <description>Frequency of the node health script to be run, in milliseconds</description>
- </property>
-
- <property>
- <name>mapred.healthChecker.script.timeout</name>
- <value>60000</value>
- <description>Time after node health script should be killed if unresponsive and considered that the script has failed.</description>
- </property>
-
- <property>
- <name>mapred.job.tracker.persist.jobstatus.active</name>
- <value>false</value>
- <description>Indicates if persistency of job status information is
- active or not.
- </description>
- </property>
-
- <property>
- <name>mapred.job.tracker.persist.jobstatus.hours</name>
- <value>1</value>
- <description>The number of hours job status information is persisted in DFS.
- The job status information will be available after it drops of the memory
- queue and between jobtracker restarts. With a zero value the job status
- information is not persisted at all in DFS.
- </description>
- </property>
-
- <property>
- <name>mapred.job.tracker.persist.jobstatus.dir</name>
- <value>/mapred/jobstatus</value>
- <description>The directory where the job status information is persisted
- in a file system to be available after it drops of the memory queue and
- between jobtracker restarts.
- </description>
- </property>
-
- <property>
- <name>mapred.jobtracker.retirejob.check</name>
- <value>10000</value>
- <description>Interval for the check for jobs to be retired. </description>
- </property>
-
- <property>
- <name>mapred.jobtracker.retirejob.interval</name>
- <value>21600000</value>
- <description> Completed Job retirement interval. </description>
- </property>
-
- <property>
- <name>mapred.job.tracker.history.completed.location</name>
- <value>/mapred/history/done</value>
- <description>The completed job history files are stored at this single well known location.</description>
- </property>
-
- <property>
- <name>mapred.task.maxvmem</name>
- <value></value>
- <final>true</final>
- <description>No description</description>
- </property>
-
- <property>
- <name>mapred.jobtracker.maxtasks.per.job</name>
- <value>-1</value>
- <final>true</final>
- <description>The maximum number of tasks for a single job.
- A value of -1 indicates that there is no maximum. </description>
- </property>
-
- <property>
- <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
- <value>false</value>
- <description>Enable this flag to create _SUCCESS file for successful job. </description>
- </property>
-
- <property>
- <name>mapred.userlog.retain.hours</name>
- <value>24</value>
- <description>
- The maximum time, in hours, for which the user-logs are to be retained after the job completion.
- </description>
- </property>
-
- <property>
- <name>mapred.job.reuse.jvm.num.tasks</name>
- <value>1</value>
- <description>
- How many tasks to run per jvm. If set to -1, there is no limit
- </description>
- <final>true</final>
- </property>
-
- <property>
- <name>mapreduce.jobtracker.kerberos.principal</name>
- <value></value>
- <description>
- JT user name key.
- </description>
- </property>
-
- <property>
- <name>mapreduce.tasktracker.kerberos.principal</name>
- <value></value>
- <description>
- tt user name key. "_HOST" is replaced by the host name of the task tracker.
- </description>
- </property>
-
-
- <property>
- <name>hadoop.job.history.user.location</name>
- <value>none</value>
- <description> Location to store the history files of a particular job. If set to none, then the job histories are
- not collected to anywhere outside the master node.
- </description>
- <final>true</final>
- </property>
-
-
- <property>
- <name>mapreduce.jobtracker.keytab.file</name>
- <value></value>
- <description>
- The keytab for the jobtracker principal.
- </description>
-
- </property>
-
- <property>
- <name>mapreduce.tasktracker.keytab.file</name>
- <value></value>
- <description>The filename of the keytab for the task tracker</description>
- </property>
-
- <property>
- <name>mapred.task.tracker.http.address</name>
- <value></value>
- <description>Http address for task tracker.</description>
- </property>
-
- <property>
- <name>mapreduce.jobtracker.staging.root.dir</name>
- <value>/user</value>
- <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
- name. It is a path in the default file system.</description>
- </property>
-
- <property>
- <name>mapreduce.tasktracker.group</name>
- <value>hadoop</value>
- <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
-
- </property>
-
- <property>
- <name>mapreduce.jobtracker.split.metainfo.maxsize</name>
- <value>50000000</value>
- <final>true</final>
- <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
- initialize.
- </description>
- </property>
- <property>
- <name>mapreduce.history.server.embedded</name>
- <value>false</value>
- <description>Should job history server be embedded within Job tracker
- process</description>
- <final>true</final>
- </property>
-
- <property>
- <name>mapreduce.history.server.http.address</name>
- <!-- cluster variant -->
- <value>localhost:51111</value>
- <description>Http address of the history server</description>
- <final>true</final>
- </property>
-
- <property>
- <name>mapreduce.jobhistory.kerberos.principal</name>
- <!-- cluster variant -->
- <value></value>
- <description>Job history user name key. (must map to same user as JT
- user)</description>
- </property>
-
- <property>
- <name>mapreduce.jobhistory.keytab.file</name>
- <!-- cluster variant -->
- <value></value>
- <description>The keytab for the job history server principal.</description>
- </property>
-
- <property>
- <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
- <value>180</value>
- <description>
- 3-hour sliding window (value is in minutes)
- </description>
- </property>
-
- <property>
- <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
- <value>15</value>
- <description>
- 15-minute bucket size (value is in minutes)
- </description>
- </property>
-
- <property>
- <name>mapred.queue.names</name>
- <value>default</value>
- <description> Comma separated list of queues configured for this jobtracker.</description>
- </property>
-
- <property>
- <name>mapreduce.jobhistory.intermediate-done-dir</name>
- <value>/mr-history/tmp</value>
- <description>
- Directory where history files are written by MapReduce jobs.
- </description>
- </property>
-
- <property>
- <name>mapreduce.jobhistory.done-dir</name>
- <value>/mr-history/done</value>
- <description>
- Directory where history files are managed by the MR JobHistory Server.
- </description>
- </property>
-
- <property>
- <name>mapreduce.jobhistory.webapp.address</name>
- <value>localhost:19888</value>
- <description>Enter your JobHistoryServer hostname.</description>
- </property>
-
-</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapreduce-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapreduce-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapreduce-log4j.xml
deleted file mode 100644
index 09c9c4f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapreduce-log4j.xml
+++ /dev/null
@@ -1,73 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-
- <property>
- <name>hadoop.mapreduce.jobsummary.logger</name>
- <value>${hadoop.root.logger}</value>
- </property>
-
- <property>
- <name>hadoop.mapreduce.jobsummary.log.file</name>
- <value>hadoop-mapreduce.jobsummary.log</value>
- </property>
-
- <property>
- <name>log4j.appender.JSA</name>
- <value>org.apache.log4j.DailyRollingFileAppender</value>
- </property>
-
- <property>
- <name>log4j.appender.JSA.layout.ConversionPattern</name>
- <value>%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n</value>
- </property>
-
- <property>
- <name>log4j.appender.JSA.DatePattern</name>
- <value>.yyyy-MM-dd</value>
- </property>
-
- <property>
- <name>log4j.appender.JSA.layout</name>
- <value>org.apache.log4j.PatternLayout</value>
- </property>
-
- <!--Only for HDP1.0 below-->
- <property>
- <name>log4j.appender.JSA.File</name>
- <value>/var/log/hadoop/mapred/${hadoop.mapreduce.jobsummary.log.file}</value>
- </property>
-
- <property>
- <name>log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary</name>
- <value>${hadoop.mapreduce.jobsummary.logger}</value>
- </property>
-
- <property>
- <name>log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary</name>
- <value>false</value>
- </property>
-
- <!--Only for HDP1.0 above-->
-
-</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/metainfo.xml
deleted file mode 100644
index 643b64c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/metainfo.xml
+++ /dev/null
@@ -1,103 +0,0 @@
-<?xml version="1.0"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<metainfo>
- <schemaVersion>2.0</schemaVersion>
- <services>
- <service>
- <name>MAPREDUCE</name>
- <comment>Apache Hadoop Distributed Processing Framework</comment>
- <version>1.2.0.1.3.3.0</version>
- <components>
- <component>
- <name>JOBTRACKER</name>
- <category>MASTER</category>
- <cardinality>1</cardinality>
- <commandScript>
- <script>scripts/jobtracker.py</script>
- <scriptType>PYTHON</scriptType>
- <timeout>600</timeout>
- </commandScript>
- <customCommands>
- <customCommand>
- <name>DECOMMISSION</name>
- <commandScript>
- <script>scripts/jobtracker.py</script>
- <scriptType>PYTHON</scriptType>
- <timeout>600</timeout>
- </commandScript>
- </customCommand>
- </customCommands>
- </component>
-
- <component>
- <name>TASKTRACKER</name>
- <category>SLAVE</category>
- <cardinality>1+</cardinality>
- <commandScript>
- <script>scripts/tasktracker.py</script>
- <scriptType>PYTHON</scriptType>
- <timeout>600</timeout>
- </commandScript>
- </component>
-
- <component>
- <name>MAPREDUCE_CLIENT</name>
- <category>CLIENT</category>
- <cardinality>0+</cardinality>
- <commandScript>
- <script>scripts/client.py</script>
- <scriptType>PYTHON</scriptType>
- <timeout>600</timeout>
- </commandScript>
- </component>
-
- <component>
- <name>HISTORYSERVER</name>
- <category>MASTER</category>
- <cardinality>1</cardinality>
- <auto-deploy>
- <enabled>true</enabled>
- <co-locate>MAPREDUCE/JOBTRACKER</co-locate>
- </auto-deploy>
- <commandScript>
- <script>scripts/historyserver.py</script>
- <scriptType>PYTHON</scriptType>
- <timeout>600</timeout>
- </commandScript>
- </component>
- </components>
-
- <commandScript>
- <script>scripts/service_check.py</script>
- <scriptType>PYTHON</scriptType>
- <timeout>300</timeout>
- </commandScript>
-
- <configuration-dependencies>
- <config-type>capacity-scheduler</config-type>
- <config-type>core-site</config-type>
- <config-type>global</config-type>
- <config-type>mapred-site</config-type>
- <config-type>mapred-queue-acls</config-type>
- <config-type>mapreduce-log4j</config-type>
- </configuration-dependencies>
- </service>
-
- </services>
-</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/client.py
deleted file mode 100644
index 79c644d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/client.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import sys
-from resource_management import *
-
-from mapreduce import mapreduce
-from service import service
-
-class Client(Script):
-
- def install(self, env):
- self.install_packages(env)
- self.configure(env)
-
- def configure(self, env):
- import params
- env.set_params(params)
- mapreduce()
-
- def status(self, env):
- raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
- Client().execute()
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/historyserver.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/historyserver.py
deleted file mode 100644
index 972a767..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/historyserver.py
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import sys
-from resource_management import *
-
-from mapreduce import mapreduce
-from service import service
-
-class Historyserver(Script):
- def install(self, env):
- self.install_packages(env)
- self.configure(env)
-
- def configure(self, env):
- import params
- env.set_params(params)
- mapreduce()
-
- def start(self, env):
- import params
- env.set_params(params)
- self.configure(env)
- service('historyserver',
- action='start'
- )
-
- def stop(self, env):
- import params
- env.set_params(params)
-
- service('historyserver',
- action='stop'
- )
-
- def status(self, env):
- import status_params
- env.set_params(status_params)
- check_process_status(status_params.historyserver_pid_file)
-
-if __name__ == "__main__":
- Historyserver().execute()
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/jobtracker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/jobtracker.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/jobtracker.py
deleted file mode 100644
index 5cd41ae..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/jobtracker.py
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-
-from mapreduce import mapreduce
-from service import service
-
-class Jobtracker(Script):
- def install(self, env):
- self.install_packages(env)
- self.configure(env)
-
- def configure(self, env):
- import params
- env.set_params(params)
- mapreduce()
-
- def start(self, env):
- import params
- env.set_params(params)
- self.configure(env) # FOR SECURITY
- service('jobtracker',
- action='start'
- )
-
- def stop(self, env):
- import params
- env.set_params(params)
-
- service('jobtracker',
- action='stop'
- )
-
- def status(self, env):
- import status_params
- env.set_params(status_params)
- check_process_status(status_params.jobtracker_pid_file)
- pass
-
- def decommission(self, env):
- import params
-
- env.set_params(params)
-
- mapred_user = params.mapred_user
- conf_dir = params.conf_dir
- user_group = params.user_group
-
- File(params.exclude_file_path,
- content=Template("exclude_hosts_list.j2"),
- owner=mapred_user,
- group=user_group
- )
-
- ExecuteHadoop('mradmin -refreshNodes',
- user=mapred_user,
- conf_dir=conf_dir,
- kinit_override=True)
- pass
-
-if __name__ == "__main__":
- Jobtracker().execute()
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/mapreduce.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/mapreduce.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/mapreduce.py
deleted file mode 100644
index c5fd002..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/mapreduce.py
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-import sys
-
-
-def mapreduce():
- import params
-
- Directory([params.mapred_pid_dir,params.mapred_log_dir],
- owner=params.mapred_user,
- group=params.user_group,
- recursive=True
- )
-
- Directory(params.mapred_local_dir,
- owner=params.mapred_user,
- mode=0755,
- recursive=True
- )
-
- File(params.exclude_file_path,
- owner=params.mapred_user,
- group=params.user_group,
- )
-
- File(params.mapred_hosts_file_path,
- owner=params.mapred_user,
- group=params.user_group,
- )
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/params.py
deleted file mode 100644
index d722124..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/params.py
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-conf_dir = "/etc/hadoop/conf"
-
-mapred_user = status_params.mapred_user
-pid_dir_prefix = status_params.pid_dir_prefix
-mapred_pid_dir = status_params.mapred_pid_dir
-
-historyserver_pid_file = status_params.historyserver_pid_file
-jobtracker_pid_file = status_params.jobtracker_pid_file
-tasktracker_pid_file = status_params.tasktracker_pid_file
-
-hadoop_libexec_dir = '/usr/lib/hadoop/libexec'
-hadoop_bin = "/usr/lib/hadoop/bin"
-user_group = config['configurations']['global']['user_group']
-hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
-mapred_log_dir = format("{hdfs_log_dir_prefix}/{mapred_user}")
-mapred_local_dir = config['configurations']['mapred-site']['mapred.local.dir']
-
-hadoop_jar_location = "/usr/lib/hadoop/"
-smokeuser = config['configurations']['global']['smokeuser']
-security_enabled = config['configurations']['global']['security_enabled']
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-
-#exclude file
-mr_exclude_hosts = default("/clusterHostInfo/decom_tt_hosts", [])
-exclude_file_path = config['configurations']['mapred-site']['mapred.hosts.exclude']
-mapred_hosts_file_path = config['configurations']['mapred-site']['mapred.hosts']
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/service.py
deleted file mode 100644
index f4aa91b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/service.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-
-def service(
- name,
- action='start'):
-
- import params
-
- pid_file = format("{mapred_pid_dir}/hadoop-{mapred_user}-{name}.pid")
- hadoop_daemon = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {hadoop_bin}/hadoop-daemon.sh")
- cmd = format("{hadoop_daemon} --config {conf_dir}")
-
- if action == 'start':
- daemon_cmd = format("{cmd} start {name}")
- no_op = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
- Execute(daemon_cmd,
- user=params.mapred_user,
- not_if=no_op
- )
-
- Execute(no_op,
- user=params.mapred_user,
- not_if=no_op,
- initial_wait=5
- )
- elif action == 'stop':
- daemon_cmd = format("{cmd} stop {name}")
- rm_pid = format("rm -f {pid_file}")
-
- Execute(daemon_cmd,
- user=params.mapred_user
- )
- Execute(rm_pid)
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/service_check.py
deleted file mode 100644
index c0a4a59..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/service_check.py
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-class ServiceCheck(Script):
- def service_check(self, env):
- import params
- env.set_params(params)
-
- jar_location = params.hadoop_jar_location
- input_file = 'mapredsmokeinput'
- output_file = "mapredsmokeoutput"
-
- cleanup_cmd = format("dfs -rmr {output_file} {input_file}")
- create_file_cmd = format("{cleanup_cmd} ; hadoop dfs -put /etc/passwd {input_file}")
- test_cmd = format("fs -test -e {output_file}")
- run_wordcount_job = format("jar {jar_location}/hadoop-examples.jar wordcount {input_file} {output_file}")
-
- if params.security_enabled:
- kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
-
- Execute(kinit_cmd,
- user=params.smokeuser
- )
-
- ExecuteHadoop(create_file_cmd,
- tries=1,
- try_sleep=5,
- user=params.smokeuser,
- conf_dir=params.conf_dir
- )
-
- ExecuteHadoop(run_wordcount_job,
- tries=1,
- try_sleep=5,
- user=params.smokeuser,
- conf_dir=params.conf_dir,
- logoutput=True
- )
-
- ExecuteHadoop(test_cmd,
- user=params.smokeuser,
- conf_dir=params.conf_dir
- )
-
-if __name__ == "__main__":
- ServiceCheck().execute()
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/status_params.py
deleted file mode 100644
index f964a76..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/status_params.py
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-mapred_user = config['configurations']['global']['mapred_user']
-pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
-mapred_pid_dir = format("{pid_dir_prefix}/{mapred_user}")
-
-jobtracker_pid_file = format("{mapred_pid_dir}/hadoop-{mapred_user}-jobtracker.pid")
-tasktracker_pid_file = format("{mapred_pid_dir}/hadoop-{mapred_user}-tasktracker.pid")
-historyserver_pid_file = format("{mapred_pid_dir}/hadoop-{mapred_user}-historyserver.pid")
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/tasktracker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/tasktracker.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/tasktracker.py
deleted file mode 100644
index 77d974b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/tasktracker.py
+++ /dev/null
@@ -1,104 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import sys
-from resource_management import *
-
-from mapreduce import mapreduce
-from service import service
-
-class Tasktracker(Script):
- def install(self, env):
- self.install_packages(env)
- self.configure(env)
-
- def configure(self, env):
- import params
- env.set_params(params)
- mapreduce()
-
- def start(self, env):
- import params
- env.set_params(params)
- self.configure(env) # FOR SECURITY
- service('tasktracker',
- action='start'
- )
-
- def stop(self, env):
- import params
- env.set_params(params)
-
- service('tasktracker',
- action='stop'
- )
-
- def status(self, env):
- import status_params
- env.set_params(status_params)
- check_process_status(status_params.tasktracker_pid_file)
-
-if __name__ == "__main__":
- Tasktracker().execute()
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/templates/exclude_hosts_list.j2
deleted file mode 100644
index 02fc5fe..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/templates/exclude_hosts_list.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-{% for host in mr_exclude_hosts %}
-{{host}}
-{% endfor %}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/configuration/global.xml
deleted file mode 100644
index 61a2b90..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/configuration/global.xml
+++ /dev/null
@@ -1,50 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
- <property>
- <name>nagios_user</name>
- <value>nagios</value>
- <description>Nagios Username.</description>
- </property>
- <property>
- <name>nagios_group</name>
- <value>nagios</value>
- <description>Nagios Group.</description>
- </property>
- <property>
- <name>nagios_web_login</name>
- <value>nagiosadmin</value>
- <description>Nagios web user.</description>
- </property>
- <property>
- <name>nagios_web_password</name>
- <value></value>
- <description>Nagios Admin Password.</description>
- </property>
- <property>
- <name>nagios_contact</name>
- <value></value>
- <description>Hadoop Admin Email.</description>
- </property>
-
-</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/metainfo.xml
deleted file mode 100644
index a4c500d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,106 +0,0 @@
-<?xml version="1.0"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<metainfo>
- <schemaVersion>2.0</schemaVersion>
- <services>
- <service>
- <name>NAGIOS</name>
- <comment>Nagios Monitoring and Alerting system</comment>
- <version>3.5.0</version>
- <components>
- <component>
- <name>NAGIOS_SERVER</name>
- <category>MASTER</category>
- <cardinality>1</cardinality>
- <commandScript>
- <script>scripts/nagios_server.py</script>
- <scriptType>PYTHON</scriptType>
- <timeout>600</timeout>
- </commandScript>
- </component>
- </components>
- <osSpecifics>
- <osSpecific>
- <osType>any</osType>
- <packages>
- <package>
- <type>rpm</type>
- <name>perl</name>
- </package>
- <package>
- <type>rpm</type>
- <name>perl-Net-SNMP</name>
- </package>
- <package>
- <type>rpm</type>
- <name>nagios-plugins-1.4.9</name>
- </package>
- <package>
- <type>rpm</type>
- <name>nagios-3.5.0-99</name>
- </package>
- <package>
- <type>rpm</type>
- <name>nagios-www-3.5.0-99</name>
- </package>
- <package>
- <type>rpm</type>
- <name>nagios-devel-3.5.0-99</name>
- </package>
- <package>
- <type>rpm</type>
- <name>fping</name>
- </package>
- <package>
- <type>rpm</type>
- <name>hdp_mon_nagios_addons</name>
- </package>
- </packages>
- </osSpecific>
- <osSpecific>
- <osType>suse</osType>
- <package>
- <type>rpm</type>
- <name>php5-json</name>
- </package>
- </osSpecific>
- <osSpecific>
- <osType>centos5</osType>
- <package>
- <type>rpm</type>
- <name>php-pecl-json.x86_64</name>
- </package>
- </osSpecific>
- <osSpecific>
- <osType>redhat5</osType>
- <package>
- <type>rpm</type>
- <name>php-pecl-json.x86_64</name>
- </package>
- </osSpecific>
- <osSpecific>
- <osType>oraclelinux5</osType>
- <package>
- <type>rpm</type>
- <name>php-pecl-json.x86_64</name>
- </package>
- </osSpecific>
- </osSpecifics>
- </service>
- </services>
-</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_aggregate.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_aggregate.php b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_aggregate.php
deleted file mode 100644
index f4063fb..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_aggregate.php
+++ /dev/null
@@ -1,243 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
- $options = getopt ("f:s:n:w:c:t:");
- if (!array_key_exists('t', $options) || !array_key_exists('f', $options) || !array_key_exists('w', $options)
- || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
- usage();
- exit(3);
- }
- $status_file=$options['f'];
- $status_code=$options['s'];
- $type=$options['t'];
- $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
- $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
- if ($type == "service" && !array_key_exists('n', $options)) {
- echo "Service description not provided -n option\n";
- exit(3);
- }
- if ($type == "service") {
- $service_name=$options['n'];
- /* echo "DESC: " . $service_name . "\n"; */
- }
-
- $result = array();
- $status_file_content = file_get_contents($status_file);
-
- $counts;
- if ($type == "service") {
- $counts=query_alert_count($status_file_content, $service_name, $status_code);
- } else {
- $counts=query_host_count($status_file_content, $status_code);
- }
-
- if ($counts['total'] == 0) {
- $percent = 0;
- } else {
- $percent = ($counts['actual']/$counts['total'])*100;
- }
- if ($percent >= $crit) {
- echo "CRITICAL: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
- exit (2);
- }
- if ($percent >= $warn) {
- echo "WARNING: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
- exit (1);
- }
- echo "OK: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
- exit(0);
-
-
- # Functions
- /* print usage */
- function usage () {
- echo "Usage: $0 -f <status_file_path> -t type(host/service) -s <status_codes> -n <service description> -w <warn%> -c <crit%>\n";
- }
-
- /* Query host count */
- function query_host_count ($status_file_content, $status_code) {
- $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
- $hostcounts_object = array ();
- $total_hosts = 0;
- $hosts = 0;
- foreach ($matches[0] as $object) {
- $total_hosts++;
- if (getParameter($object, "current_state") == $status_code) {
- $hosts++;
- }
- }
- $hostcounts_object['total'] = $total_hosts;
- $hostcounts_object['actual'] = $hosts;
- return $hostcounts_object;
- }
-
- /* Query Alert counts */
- function query_alert_count ($status_file_content, $service_name, $status_code) {
- $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
- $alertcounts_objects = array ();
- $total_alerts=0;
- $alerts=0;
- foreach ($matches[0] as $object) {
- if (getParameter($object, "service_description") == $service_name) {
- $total_alerts++;
- if (getParameter($object, "current_state") >= $status_code) {
- $alerts++;
- }
- }
- }
- $alertcounts_objects['total'] = $total_alerts;
- $alertcounts_objects['actual'] = $alerts;
- return $alertcounts_objects;
- }
-
- function get_service_type($service_description)
- {
- $pieces = explode("::", $service_description);
- switch ($pieces[0]) {
- case "NAMENODE":
- $pieces[0] = "HDFS";
- break;
- case "JOBTRACKER":
- $pieces[0] = "MAPREDUCE";
- break;
- case "HBASEMASTER":
- $pieces[0] = "HBASE";
- break;
- case "SYSTEM":
- case "HDFS":
- case "MAPREDUCE":
- case "HBASE":
- break;
- default:
- $pieces[0] = "UNKNOWN";
- }
- return $pieces[0];
- }
-
- function getParameter($object, $key)
- {
- $pattern="/\s" . $key . "[\s= ]*([\S, ]*)\n/";
- $num_mat = preg_match($pattern, $object, $matches);
- $value = "";
- if ($num_mat) {
- $value = $matches[1];
- }
- return $value;
- }
-
-function indent($json) {
-
- $result = '';
- $pos = 0;
- $strLen = strlen($json);
- $indentStr = ' ';
- $newLine = "\n";
- $prevChar = '';
- $outOfQuotes = true;
-
- for ($i=0; $i<=$strLen; $i++) {
-
- // Grab the next character in the string.
- $char = substr($json, $i, 1);
-
- // Are we inside a quoted string?
- if ($char == '"' && $prevChar != '\\') {
- $outOfQuotes = !$outOfQuotes;
-
- // If this character is the end of an element,
- // output a new line and indent the next line.
- } else if(($char == '}' || $char == ']') && $outOfQuotes) {
- $result .= $newLine;
- $pos --;
- for ($j=0; $j<$pos; $j++) {
- $result .= $indentStr;
- }
- }
-
- // Add the character to the result string.
- $result .= $char;
-
- // If the last character was the beginning of an element,
- // output a new line and indent the next line.
- if (($char == ',' || $char == '{' || $char == '[') && $outOfQuotes) {
- $result .= $newLine;
- if ($char == '{' || $char == '[') {
- $pos ++;
- }
-
- for ($j = 0; $j < $pos; $j++) {
- $result .= $indentStr;
- }
- }
-
- $prevChar = $char;
- }
-
- return $result;
-}
-
-/* JSON documment format */
-/*
-{
- "programstatus":{
- "last_command_check":"1327385743"
- },
- "hostcounts":{
- "up_nodes":"",
- "down_nodes":""
- },
- "hoststatus":[
- {
- "host_name"="ip-10-242-191-48.ec2.internal",
- "current_state":"0",
- "last_hard_state":"0",
- "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
- "last_check":"1327385564",
- "current_attempt":"1",
- "last_hard_state_change":"1327362079",
- "last_time_up":"1327385574",
- "last_time_down":"0",
- "last_time_unreachable":"0",
- "is_flapping":"0",
- "last_check":"1327385574",
- "servicestatus":[
- ]
- }
- ],
- "servicestatus":[
- {
- "service_type":"HDFS", {HBASE, MAPREDUCE, HIVE, ZOOKEEPER}
- "service_description":"HDFS Current Load",
- "host_name"="ip-10-242-191-48.ec2.internal",
- "current_attempt":"1",
- "current_state":"0",
- "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
- "last_hard_state_change":"1327362079",
- "last_time_ok":"1327385479",
- "last_time_warning":"0",
- "last_time_unknown":"0",
- "last_time_critical":"0",
- "last_check":"1327385574",
- "is_flapping":"0"
- }
- ]
-}
-*/
-
-?>
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_cpu.pl
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_cpu.pl b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_cpu.pl
deleted file mode 100644
index a5680f7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_cpu.pl
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/perl -w
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-use strict;
-use Net::SNMP;
-use Getopt::Long;
-
-# Variable
-my $base_proc = "1.3.6.1.2.1.25.3.3.1";
-my $proc_load = "1.3.6.1.2.1.25.3.3.1.2";
-my $o_host = undef;
-my $o_community = undef;
-my $o_warn= undef;
-my $o_crit= undef;
-my $o_timeout = 15;
-my $o_port = 161;
-
-sub Usage {
- print "Usage: $0 -H <host> -C <snmp_community> -w <warn level> -c <crit level>\n";
-}
-
-Getopt::Long::Configure ("bundling");
-GetOptions(
- 'H:s' => \$o_host,
- 'C:s' => \$o_community,
- 'c:s' => \$o_crit,
- 'w:s' => \$o_warn
- );
-if (!defined $o_host || !defined $o_community || !defined $o_crit || !defined $o_warn) {
- Usage();
- exit 3;
-}
-$o_warn =~ s/\%//g;
-$o_crit =~ s/\%//g;
-alarm ($o_timeout);
-$SIG{'ALRM'} = sub {
- print "Unable to contact host: $o_host\n";
- exit 3;
-};
-
-# Connect to host
-my ($session,$error);
-($session, $error) = Net::SNMP->session(
- -hostname => $o_host,
- -community => $o_community,
- -port => $o_port,
- -timeout => $o_timeout
- );
-if (!defined($session)) {
- printf("Error opening session: %s.\n", $error);
- exit 3;
-}
-
-my $exit_val=undef;
-my $resultat = (Net::SNMP->VERSION < 4) ?
- $session->get_table($base_proc)
- : $session->get_table(Baseoid => $base_proc);
-
-if (!defined($resultat)) {
- printf("ERROR: Description table : %s.\n", $session->error);
- $session->close;
- exit 3;
-}
-
-$session->close;
-
-my ($cpu_used,$ncpu)=(0,0);
-foreach my $key ( keys %$resultat) {
- if ($key =~ /$proc_load/) {
- $cpu_used += $$resultat{$key};
- $ncpu++;
- }
-}
-
-if ($ncpu==0) {
- print "Can't find CPU usage information : UNKNOWN\n";
- exit 3;
-}
-
-$cpu_used /= $ncpu;
-
-print "$ncpu CPU, ", $ncpu==1 ? "load" : "average load";
-printf(" %.1f%%",$cpu_used);
-$exit_val=0;
-
-if ($cpu_used > $o_crit) {
- print " > $o_crit% : CRITICAL\n";
- $exit_val=2;
-} else {
- if ($cpu_used > $o_warn) {
- print " > $o_warn% : WARNING\n";
- $exit_val=1;
- }
-}
-print " < $o_warn% : OK\n" if ($exit_val eq 0);
-exit $exit_val;
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_datanode_storage.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_datanode_storage.php b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_datanode_storage.php
deleted file mode 100644
index dee22b4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_datanode_storage.php
+++ /dev/null
@@ -1,100 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the storage capacity remaining on local datanode storage
- */
-
- include "hdp_nagios_init.php";
-
- $options = getopt ("h:p:w:c:e:k:r:t:s:");
- if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
- || !array_key_exists('c', $options)) {
- usage();
- exit(3);
- }
-
- $host=$options['h'];
- $port=$options['p'];
- $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
- $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
- $keytab_path=$options['k'];
- $principal_name=$options['r'];
- $kinit_path_local=$options['t'];
- $security_enabled=$options['s'];
- $ssl_enabled=$options['e'];
-
- /* Kinit if security enabled */
- $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
- $retcode = $status[0];
- $output = $status[1];
-
- if ($output != 0) {
- echo "CRITICAL: Error doing kinit for nagios. $output";
- exit (2);
- }
-
- $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
- /* Get the json document */
- $ch = curl_init();
- $username = rtrim(`id -un`, "\n");
- curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=DataNode,name=FSDatasetState-*",
- CURLOPT_RETURNTRANSFER => true,
- CURLOPT_HTTPAUTH => CURLAUTH_ANY,
- CURLOPT_USERPWD => "$username:",
- CURLOPT_SSL_VERIFYPEER => FALSE ));
- $json_string = curl_exec($ch);
- $info = curl_getinfo($ch);
- if (intval($info['http_code']) == 401){
- logout();
- $json_string = curl_exec($ch);
- }
- $info = curl_getinfo($ch);
- curl_close($ch);
- $json_array = json_decode($json_string, true);
- $object = $json_array['beans'][0];
- $cap_remain = $object['Remaining']; /* Total capacity - any extenal files created in data directories by non-hadoop app */
- $cap_total = $object['Capacity']; /* Capacity used by all data partitions minus space reserved for M/R */
- if (count($object) == 0) {
- echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
- exit(2);
- }
- $percent_full = ($cap_total - $cap_remain)/$cap_total * 100;
-
- $out_msg = "Capacity:[" . $cap_total .
- "], Remaining Capacity:[" . $cap_remain .
- "], percent_full:[" . $percent_full . "]";
-
- if ($percent_full > $crit) {
- echo "CRITICAL: " . $out_msg . "\n";
- exit (2);
- }
- if ($percent_full > $warn) {
- echo "WARNING: " . $out_msg . "\n";
- exit (1);
- }
- echo "OK: " . $out_msg . "\n";
- exit(0);
-
- /* print usage */
- function usage () {
- echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
- }
-?>
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hdfs_blocks.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hdfs_blocks.php b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hdfs_blocks.php
deleted file mode 100644
index 19347b4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hdfs_blocks.php
+++ /dev/null
@@ -1,115 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the corrupt or missing blocks % is > threshod
- * check_jmx -H hostaddress -p port -w 1% -c 1%
- */
-
- include "hdp_nagios_init.php";
-
- $options = getopt ("h:p:w:c:s:e:k:r:t:u:");
- if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
- || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
- usage();
- exit(3);
- }
-
- $hosts=$options['h'];
- $port=$options['p'];
- $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
- $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
- $nn_jmx_property=$options['s'];
- $keytab_path=$options['k'];
- $principal_name=$options['r'];
- $kinit_path_local=$options['t'];
- $security_enabled=$options['u'];
- $ssl_enabled=$options['e'];
-
- /* Kinit if security enabled */
- $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
- $retcode = $status[0];
- $output = $status[1];
-
- if ($output != 0) {
- echo "CRITICAL: Error doing kinit for nagios. $output";
- exit (2);
- }
-
- $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
- foreach (preg_split('/,/', $hosts) as $host) {
- /* Get the json document */
-
- $ch = curl_init();
- $username = rtrim(`id -un`, "\n");
- curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=".$nn_jmx_property,
- CURLOPT_RETURNTRANSFER => true,
- CURLOPT_HTTPAUTH => CURLAUTH_ANY,
- CURLOPT_USERPWD => "$username:",
- CURLOPT_SSL_VERIFYPEER => FALSE ));
- $json_string = curl_exec($ch);
- $info = curl_getinfo($ch);
- if (intval($info['http_code']) == 401){
- logout();
- $json_string = curl_exec($ch);
- }
- $info = curl_getinfo($ch);
- curl_close($ch);
- $json_array = json_decode($json_string, true);
- $m_percent = 0;
- $c_percent = 0;
- $object = $json_array['beans'][0];
- $missing_blocks = $object['MissingBlocks'];
- $corrupt_blocks = $object['CorruptBlocks'];
- $total_blocks = $object['BlocksTotal'];
- if (count($object) == 0) {
- echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
- exit(2);
- }
- if($total_blocks == 0) {
- $m_percent = 0;
- $c_percent = 0;
- } else {
- $m_percent = ($missing_blocks/$total_blocks)*100;
- $c_percent = ($corrupt_blocks/$total_blocks)*100;
- break;
- }
- }
- $out_msg = "corrupt_blocks:<" . $corrupt_blocks .
- ">, missing_blocks:<" . $missing_blocks .
- ">, total_blocks:<" . $total_blocks . ">";
-
- if ($m_percent > $crit || $c_percent > $crit) {
- echo "CRITICAL: " . $out_msg . "\n";
- exit (2);
- }
- if ($m_percent > $warn || $c_percent > $warn) {
- echo "WARNING: " . $out_msg . "\n";
- exit (1);
- }
- echo "OK: " . $out_msg . "\n";
- exit(0);
-
- /* print usage */
- function usage () {
- echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -s <namenode bean name> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
- }
-?>
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hdfs_capacity.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hdfs_capacity.php b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hdfs_capacity.php
deleted file mode 100644
index af72723..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hdfs_capacity.php
+++ /dev/null
@@ -1,109 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the % HDFS capacity used >= warn and critical limits.
- * check_jmx -H hostaddress -p port -w 1 -c 1
- */
-
- include "hdp_nagios_init.php";
-
- $options = getopt ("h:p:w:c:e:k:r:t:s:");
- if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
- || !array_key_exists('c', $options)) {
- usage();
- exit(3);
- }
-
- $hosts=$options['h'];
- $port=$options['p'];
- $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
- $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
- $keytab_path=$options['k'];
- $principal_name=$options['r'];
- $kinit_path_local=$options['t'];
- $security_enabled=$options['s'];
- $ssl_enabled=$options['e'];
-
- /* Kinit if security enabled */
- $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
- $retcode = $status[0];
- $output = $status[1];
-
- if ($output != 0) {
- echo "CRITICAL: Error doing kinit for nagios. $output";
- exit (2);
- }
-
- $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
- foreach (preg_split('/,/', $hosts) as $host) {
- /* Get the json document */
- $ch = curl_init();
- $username = rtrim(`id -un`, "\n");
- curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState",
- CURLOPT_RETURNTRANSFER => true,
- CURLOPT_HTTPAUTH => CURLAUTH_ANY,
- CURLOPT_USERPWD => "$username:",
- CURLOPT_SSL_VERIFYPEER => FALSE ));
- $json_string = curl_exec($ch);
- $info = curl_getinfo($ch);
- if (intval($info['http_code']) == 401){
- logout();
- $json_string = curl_exec($ch);
- }
- $info = curl_getinfo($ch);
- curl_close($ch);
- $json_array = json_decode($json_string, true);
- $percent = 0;
- $object = $json_array['beans'][0];
- $CapacityUsed = $object['CapacityUsed'];
- $CapacityRemaining = $object['CapacityRemaining'];
- if (count($object) == 0) {
- echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
- exit(2);
- }
- $CapacityTotal = $CapacityUsed + $CapacityRemaining;
- if($CapacityTotal == 0) {
- $percent = 0;
- } else {
- $percent = ($CapacityUsed/$CapacityTotal)*100;
- break;
- }
- }
- $out_msg = "DFSUsedGB:<" . round ($CapacityUsed/(1024*1024*1024),1) .
- ">, DFSTotalGB:<" . round($CapacityTotal/(1024*1024*1024),1) . ">";
-
- if ($percent >= $crit) {
- echo "CRITICAL: " . $out_msg . "\n";
- exit (2);
- }
- if ($percent >= $warn) {
- echo "WARNING: " . $out_msg . "\n";
- exit (1);
- }
- echo "OK: " . $out_msg . "\n";
- exit(0);
-
- /* print usage */
- function usage () {
- echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
- }
-?>
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hive_metastore_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hive_metastore_status.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hive_metastore_status.sh
deleted file mode 100644
index 640c077..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hive_metastore_status.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#The uri is of the form thrift://<hostname>:<port>
-HOST=$1
-PORT=$2
-JAVA_HOME=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then
- NAGIOS_KEYTAB=$5
- NAGIOS_USER=$6
- KINIT_PATH=$7
- out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
- if [[ "$?" -ne 0 ]]; then
- echo "CRITICAL: Error doing kinit for nagios [$out1]";
- exit 2;
- fi
-fi
-HCAT_URL=-Dhive.metastore.uris="thrift://$HOST:$PORT"
-export JAVA_HOME=$JAVA_HOME
-out=`hcat $HCAT_URL -e "show databases" 2>&1`
-if [[ "$?" -ne 0 ]]; then
- echo "CRITICAL: Error accessing Hive Metastore status [$out]";
- exit 2;
-fi
-echo "OK: Hive Metastore status OK";
-exit 0;
http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hue_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hue_status.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hue_status.sh
deleted file mode 100644
index 076d9b3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hue_status.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-status=`/etc/init.d/hue status 2>&1`
-
-if [[ "$?" -ne 0 ]]; then
- echo "WARNING: Hue is stopped";
- exit 1;
-fi
-
-echo "OK: Hue is running";
-exit 0;