You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mesos.apache.org by be...@apache.org on 2012/05/01 01:28:44 UTC

svn commit: r1332469 [1/3] - in /incubator/mesos/trunk: ./ ec2/ hadoop/ hadoop/mesos/ hadoop/mesos/ivy/ hadoop/mesos/src/ hadoop/mesos/src/java/ hadoop/mesos/src/java/org/ hadoop/mesos/src/java/org/apache/ hadoop/mesos/src/java/org/apache/hadoop/ hadoo...

Author: benh
Date: Mon Apr 30 23:28:43 2012
New Revision: 1332469

URL: http://svn.apache.org/viewvc?rev=1332469&view=rev
Log:
Updated the Hadoop tutorial, including adding support for cdh3u3.

Added:
    incubator/mesos/trunk/ec2/Makefile.am
    incubator/mesos/trunk/hadoop/Makefile.am
    incubator/mesos/trunk/hadoop/hadoop-0.20.2-cdh3u3.patch
    incubator/mesos/trunk/hadoop/hadoop-0.20.2-cdh3u3_hadoop-env.sh.patch
    incubator/mesos/trunk/hadoop/hadoop-0.20.2-cdh3u3_mesos.patch
    incubator/mesos/trunk/hadoop/hadoop-0.20.205.0_hadoop-env.sh.patch
    incubator/mesos/trunk/hadoop/hadoop-0.20.205.0_mesos.patch
    incubator/mesos/trunk/hadoop/mapred-site.xml.patch
      - copied, changed from r1332468, incubator/mesos/trunk/hadoop/hadoop-0.20.205.0_conf_mapred-site.xml.patch
    incubator/mesos/trunk/hadoop/mesos/
    incubator/mesos/trunk/hadoop/mesos-executor   (with props)
    incubator/mesos/trunk/hadoop/mesos/build.xml
    incubator/mesos/trunk/hadoop/mesos/ivy/
    incubator/mesos/trunk/hadoop/mesos/ivy.xml
    incubator/mesos/trunk/hadoop/mesos/ivy/libraries.properties
    incubator/mesos/trunk/hadoop/mesos/src/
    incubator/mesos/trunk/hadoop/mesos/src/java/
    incubator/mesos/trunk/hadoop/mesos/src/java/org/
    incubator/mesos/trunk/hadoop/mesos/src/java/org/apache/
    incubator/mesos/trunk/hadoop/mesos/src/java/org/apache/hadoop/
    incubator/mesos/trunk/hadoop/mesos/src/java/org/apache/hadoop/mapred/
    incubator/mesos/trunk/hadoop/mesos/src/java/org/apache/hadoop/mapred/FrameworkExecutor.java
    incubator/mesos/trunk/hadoop/mesos/src/java/org/apache/hadoop/mapred/FrameworkScheduler.java
    incubator/mesos/trunk/hadoop/mesos/src/java/org/apache/hadoop/mapred/HadoopFrameworkMessage.java
    incubator/mesos/trunk/hadoop/mesos/src/java/org/apache/hadoop/mapred/MesosScheduler.java
    incubator/mesos/trunk/hadoop/mesos/src/java/org/apache/hadoop/mapred/MesosTaskTrackerInstrumentation.java
Removed:
    incubator/mesos/trunk/hadoop/TUTORIAL
    incubator/mesos/trunk/hadoop/hadoop-0.20.205.0.tar.gz
    incubator/mesos/trunk/hadoop/hadoop-0.20.205.0_conf_hadoop-env.sh.patch
    incubator/mesos/trunk/hadoop/hadoop-0.20.205.0_conf_mapred-site.xml.patch
Modified:
    incubator/mesos/trunk/Makefile.am
    incubator/mesos/trunk/README
    incubator/mesos/trunk/configure.ac
    incubator/mesos/trunk/hadoop/TUTORIAL.sh
    incubator/mesos/trunk/hadoop/hadoop-0.20.205.0.patch

Modified: incubator/mesos/trunk/Makefile.am
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/Makefile.am?rev=1332469&r1=1332468&r2=1332469&view=diff
==============================================================================
--- incubator/mesos/trunk/Makefile.am (original)
+++ incubator/mesos/trunk/Makefile.am Mon Apr 30 23:28:43 2012
@@ -18,11 +18,11 @@ ACLOCAL_AMFLAGS = -I m4
 
 AUTOMAKE_OPTIONS = foreign
 
-SUBDIRS = . third_party src
+SUBDIRS = . third_party src ec2 hadoop
 
 EXTRA_DIST =
 
-PHONY_TARGETS = # Initialized to empty.
+PHONY_TARGETS =
 
 
 # Since we generate several files in src/ with config.status, make
@@ -31,7 +31,7 @@ all-recursive: src/python/setup.py src/j
 
 
 # Standard stuff.
-EXTRA_DIST += bootstrap DISCLAIMER LICENSE NOTICE README support/colors.sh
+EXTRA_DIST += bootstrap LICENSE NOTICE README support/colors.sh
 
 
 # Extra configure scripts.
@@ -39,30 +39,6 @@ EXTRA_DIST += configure.amazon-linux-64 
   configure.macosx configure.ubuntu-lucid-64 configure.ubuntu-natty-64
 
 
-# We include support for Hadoop on Mesos in the distribution.
-EXTRA_DIST += hadoop/TUTORIAL.sh hadoop/hadoop-0.20.205.0.patch	\
-  hadoop/hadoop-0.20.205.0.tar.gz				\
-  hadoop/hadoop-0.20.205.0_conf_hadoop-env.sh.patch		\
-  hadoop/hadoop-0.20.205.0_conf_mapred-site.xml.patch
-
-# Defines a target which runs the Hadoop tutorial to make sure
-# everything works. At some point we might want to do this
-# automagically (i.e., as part of 'make check'). Note that we set the
-# environment variable TMOUT to 1 so that each prompt in the tutorial
-# will return after 1 second so no interaction from the user is
-# required.
-hadoop: all
-	@if test "$(top_srcdir)" != "$(top_builddir)"; then \
-	  rm -rf hadoop; \
-	  cp -rpf $(srcdir)/hadoop hadoop; \
-	fi
-	@TMOUT=1 JAVA_HOME=$(JAVA_HOME) ./hadoop/TUTORIAL.sh
-
-# TODO(benh): Cleanup (i.e., via 'clean-local') for hadoop target.
-
-PHONY_TARGETS += hadoop
-
-
 if HAS_JAVA
 maven-install:
 	@cd src && $(MAKE) $(AM_MAKEFLAGS) maven-install
@@ -71,47 +47,4 @@ PHONY_TARGETS += maven-install
 endif
 
 
-# EC2 support.
-EXTRA_DIST += ec2/mesos-ec2 ec2/mesos_ec2.py
-
-EXTRA_DIST +=									\
-  ec2/deploy.amazon64/root/ephemeral-hdfs/conf/core-site.xml			\
-  ec2/deploy.amazon64/root/ephemeral-hdfs/conf/hadoop-env.sh			\
-  ec2/deploy.amazon64/root/ephemeral-hdfs/conf/hdfs-site.xml			\
-  ec2/deploy.amazon64/root/ephemeral-hdfs/conf/mapred-site.xml			\
-  ec2/deploy.amazon64/root/ephemeral-hdfs/conf/masters				\
-  ec2/deploy.amazon64/root/ephemeral-hdfs/conf/slaves				\
-  ec2/deploy.amazon64/root/mesos-ec2/cluster-url				\
-  ec2/deploy.amazon64/root/mesos-ec2/copy-dir					\
-  ec2/deploy.amazon64/root/mesos-ec2/create-swap				\
-  ec2/deploy.amazon64/root/mesos-ec2/hadoop-framework-conf/core-site.xml	\
-  ec2/deploy.amazon64/root/mesos-ec2/hadoop-framework-conf/hadoop-env.sh	\
-  ec2/deploy.amazon64/root/mesos-ec2/hadoop-framework-conf/mapred-site.xml	\
-  ec2/deploy.amazon64/root/mesos-ec2/haproxy+apache/haproxy.config.template	\
-  ec2/deploy.amazon64/root/mesos-ec2/hypertable/Capfile				\
-  ec2/deploy.amazon64/root/mesos-ec2/hypertable/hypertable.cfg			\
-  ec2/deploy.amazon64/root/mesos-ec2/masters					\
-  ec2/deploy.amazon64/root/mesos-ec2/mesos-daemon				\
-  ec2/deploy.amazon64/root/mesos-ec2/redeploy-mesos				\
-  ec2/deploy.amazon64/root/mesos-ec2/setup					\
-  ec2/deploy.amazon64/root/mesos-ec2/setup-slave				\
-  ec2/deploy.amazon64/root/mesos-ec2/setup-torque				\
-  ec2/deploy.amazon64/root/mesos-ec2/slaves					\
-  ec2/deploy.amazon64/root/mesos-ec2/ssh-no-keychecking				\
-  ec2/deploy.amazon64/root/mesos-ec2/start-hypertable				\
-  ec2/deploy.amazon64/root/mesos-ec2/start-mesos				\
-  ec2/deploy.amazon64/root/mesos-ec2/stop-hypertable				\
-  ec2/deploy.amazon64/root/mesos-ec2/stop-mesos					\
-  ec2/deploy.amazon64/root/mesos-ec2/zoo					\
-  ec2/deploy.amazon64/root/persistent-hdfs/conf/core-site.xml			\
-  ec2/deploy.amazon64/root/persistent-hdfs/conf/hadoop-env.sh			\
-  ec2/deploy.amazon64/root/persistent-hdfs/conf/hdfs-site.xml			\
-  ec2/deploy.amazon64/root/persistent-hdfs/conf/mapred-site.xml			\
-  ec2/deploy.amazon64/root/persistent-hdfs/conf/masters				\
-  ec2/deploy.amazon64/root/persistent-hdfs/conf/slaves				\
-  ec2/deploy.amazon64/root/spark/conf/spark-env.sh
-
-EXTRA_DIST += ec2/deploy.generic/root/mesos-ec2/ec2-variables.sh
-
-
 .PHONY: $(PHONY_TARGETS)

Modified: incubator/mesos/trunk/README
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/README?rev=1332469&r1=1332468&r2=1332469&view=diff
==============================================================================
--- incubator/mesos/trunk/README (original)
+++ incubator/mesos/trunk/README Mon Apr 30 23:28:43 2012
@@ -112,6 +112,11 @@ Hadoop
 Included in the distribution is a runnable tutorial on using Hadoop on
 Mesos (./hadoop/TUTORIAL.sh). Try it out!
 
+You can also "build" a self-contained distribution of Hadoop with the
+necessary Mesos components by doing 'make hadoop-0.20.205.0' or 'make
+hadoop-0.20.2-cdh3u3' from within [build]/hadoop (this uses the
+tutorial mentioned above).
+
 
 Installing
 ==========

Modified: incubator/mesos/trunk/configure.ac
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/configure.ac?rev=1332469&r1=1332468&r2=1332469&view=diff
==============================================================================
--- incubator/mesos/trunk/configure.ac (original)
+++ incubator/mesos/trunk/configure.ac Mon Apr 30 23:28:43 2012
@@ -73,6 +73,8 @@ AC_CONFIG_COMMANDS_POST([ac_configure_ar
 AC_CONFIG_SUBDIRS([third_party/libprocess])
 
 AC_CONFIG_FILES([Makefile])
+AC_CONFIG_FILES([ec2/Makefile])
+AC_CONFIG_FILES([hadoop/Makefile])
 AC_CONFIG_FILES([src/Makefile])
 AC_CONFIG_FILES([third_party/Makefile])
 

Added: incubator/mesos/trunk/ec2/Makefile.am
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/Makefile.am?rev=1332469&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/Makefile.am (added)
+++ incubator/mesos/trunk/ec2/Makefile.am Mon Apr 30 23:28:43 2012
@@ -0,0 +1,56 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+# EC2 support.
+EXTRA_DIST = mesos-ec2 mesos_ec2.py
+
+EXTRA_DIST += deploy.amazon64/root/ephemeral-hdfs/conf/core-site.xml	\
+  deploy.amazon64/root/ephemeral-hdfs/conf/hadoop-env.sh		\
+  deploy.amazon64/root/ephemeral-hdfs/conf/hdfs-site.xml		\
+  deploy.amazon64/root/ephemeral-hdfs/conf/mapred-site.xml		\
+  deploy.amazon64/root/ephemeral-hdfs/conf/masters			\
+  deploy.amazon64/root/ephemeral-hdfs/conf/slaves			\
+  deploy.amazon64/root/mesos-ec2/cluster-url				\
+  deploy.amazon64/root/mesos-ec2/copy-dir				\
+  deploy.amazon64/root/mesos-ec2/create-swap				\
+  deploy.amazon64/root/mesos-ec2/hadoop-framework-conf/core-site.xml	\
+  deploy.amazon64/root/mesos-ec2/hadoop-framework-conf/hadoop-env.sh	\
+  deploy.amazon64/root/mesos-ec2/hadoop-framework-conf/mapred-site.xml	\
+  deploy.amazon64/root/mesos-ec2/haproxy+apache/haproxy.config.template	\
+  deploy.amazon64/root/mesos-ec2/hypertable/Capfile			\
+  deploy.amazon64/root/mesos-ec2/hypertable/hypertable.cfg		\
+  deploy.amazon64/root/mesos-ec2/masters				\
+  deploy.amazon64/root/mesos-ec2/mesos-daemon				\
+  deploy.amazon64/root/mesos-ec2/redeploy-mesos				\
+  deploy.amazon64/root/mesos-ec2/setup					\
+  deploy.amazon64/root/mesos-ec2/setup-slave				\
+  deploy.amazon64/root/mesos-ec2/setup-torque				\
+  deploy.amazon64/root/mesos-ec2/slaves					\
+  deploy.amazon64/root/mesos-ec2/ssh-no-keychecking			\
+  deploy.amazon64/root/mesos-ec2/start-hypertable			\
+  deploy.amazon64/root/mesos-ec2/start-mesos				\
+  deploy.amazon64/root/mesos-ec2/stop-hypertable			\
+  deploy.amazon64/root/mesos-ec2/stop-mesos				\
+  deploy.amazon64/root/mesos-ec2/zoo					\
+  deploy.amazon64/root/persistent-hdfs/conf/core-site.xml		\
+  deploy.amazon64/root/persistent-hdfs/conf/hadoop-env.sh		\
+  deploy.amazon64/root/persistent-hdfs/conf/hdfs-site.xml		\
+  deploy.amazon64/root/persistent-hdfs/conf/mapred-site.xml		\
+  deploy.amazon64/root/persistent-hdfs/conf/masters			\
+  deploy.amazon64/root/persistent-hdfs/conf/slaves			\
+  deploy.amazon64/root/spark/conf/spark-env.sh
+
+EXTRA_DIST += deploy.generic/root/mesos-ec2/ec2-variables.sh

Added: incubator/mesos/trunk/hadoop/Makefile.am
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/hadoop/Makefile.am?rev=1332469&view=auto
==============================================================================
--- incubator/mesos/trunk/hadoop/Makefile.am (added)
+++ incubator/mesos/trunk/hadoop/Makefile.am Mon Apr 30 23:28:43 2012
@@ -0,0 +1,78 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+EXTRA_DIST = TUTORIAL.sh hadoop-0.20.2-cdh3u3.patch			\
+  hadoop-0.20.2-cdh3u3_hadoop-env.sh.patch				\
+  hadoop-0.20.2-cdh3u3_mesos.patch hadoop-0.20.205.0.patch		\
+  hadoop-0.20.205.0_hadoop-env.sh.patch hadoop-0.20.205.0_mesos.patch	\
+  mapred-site.xml.patch mesos-executor mesos/build.xml			\
+  mesos/ivy/libraries.properties mesos/ivy.xml				\
+  mesos/src/java/org/apache/hadoop/mapred/FrameworkExecutor.java	\
+  mesos/src/java/org/apache/hadoop/mapred/FrameworkScheduler.java	\
+  mesos/src/java/org/apache/hadoop/mapred/HadoopFrameworkMessage.java	\
+  mesos/src/java/org/apache/hadoop/mapred/MesosScheduler.java		\
+  mesos/src/java/org/apache/hadoop/mapred/MesosTaskTrackerInstrumentation.java
+
+# Defines some targets to run the Hadoop tutorial using a specified
+# distribution. At some point we might want to do this automagically
+# (i.e., as part of 'make check'). Note that we set the environment
+# variable TMOUT to 1 so that each prompt in the tutorial will return
+# after 1 second so no interaction from the user is required.
+hadoop-0.20.205.0:
+	if test "$(top_srcdir)" != "$(top_builddir)"; then \
+          cp -p $(srcdir)/TUTORIAL.sh .; \
+          cp -p $(srcdir)/hadoop-0.20.205.0.patch .; \
+          cp -p $(srcdir)/hadoop-0.20.205.0_hadoop-env.sh.patch .; \
+          cp -p $(srcdir)/hadoop-0.20.205.0_mesos.patch .; \
+          cp -p $(srcdir)/mapred-site.xml.patch .; \
+          cp -rp $(srcdir)/mesos .; \
+          cp -p $(srcdir)/mesos-executor .; \
+        fi
+	-rm -rf hadoop-0.20.205.0
+	@TMOUT=1 JAVA_HOME=$(JAVA_HOME) ./TUTORIAL.sh
+
+hadoop-0.20.2-cdh3u3:
+	if test "$(top_srcdir)" != "$(top_builddir)"; then \
+          cp -p $(srcdir)/TUTORIAL.sh .; \
+          cp -p $(srcdir)/hadoop-0.20.2-cdh3u3.patch .; \
+          cp -p $(srcdir)/hadoop-0.20.2-cdh3u3_hadoop-env.sh.patch .; \
+          cp -p $(srcdir)/hadoop-0.20.2-cdh3u3_mesos.patch .; \
+          cp -p $(srcdir)/mapred-site.xml.patch .; \
+          cp -rp $(srcdir)/mesos .; \
+          cp -p $(srcdir)/mesos-executor .; \
+        fi
+	-rm -rf hadoop-0.20.2-cdh3u3
+	@TMOUT=1 JAVA_HOME=$(JAVA_HOME) ./TUTORIAL.sh 0.20.2-cdh3u3
+
+
+clean-local:
+	-rm -rf hadoop-0.20.2-cdh3u3
+	-rm -f hadoop-0.20.2-cdh3u3.tar.gz
+	-rm -rf hadoop-0.20.205.0
+	-rm -f hadoop-0.20.205.0.tar.gz
+	-rm -f TUTORIAL.sh
+	-rm -f hadoop-0.20.2-cdh3u3.patch
+	-rm -f hadoop-0.20.2-cdh3u3_hadoop-env.sh.patch
+	-rm -f hadoop-0.20.2-cdh3u3_mesos.patch
+	-rm -f hadoop-0.20.205.0.patch
+	-rm -f hadoop-0.20.205.0_hadoop-env.sh.patch
+	-rm -f hadoop-0.20.205.0_mesos.patch
+	-rm -f mapred-site.xml.patch
+	-rm -rf mesos
+	-rm -f mesos-executor
+
+
+.PHONY: hadoop-0.20.205.0 hadoop-0.20.2-cdh3u3

Modified: incubator/mesos/trunk/hadoop/TUTORIAL.sh
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/hadoop/TUTORIAL.sh?rev=1332469&r1=1332468&r2=1332469&view=diff
==============================================================================
--- incubator/mesos/trunk/hadoop/TUTORIAL.sh (original)
+++ incubator/mesos/trunk/hadoop/TUTORIAL.sh Mon Apr 30 23:28:43 2012
@@ -1,7 +1,15 @@
 #!/bin/bash
 
-# The Hadoop version for this tutorial.
-hadoop=hadoop-0.20.205.0
+# Determine the Hadoop distribution to use.
+if test -z "${1}"; then
+    distribution="0.20.205.0"
+    url="http://apache.cs.utah.edu/hadoop/common/hadoop-0.20.205.0"
+elif test "${1}" = "0.20.2-cdh3u3"; then
+    distribution="0.20.2-cdh3u3"
+    url="http://archive.cloudera.com/cdh/3"
+fi
+
+hadoop="hadoop-${distribution}"
 
 # The potentially running JobTracker, that we need to kill.
 jobtracker_pid=
@@ -31,20 +39,22 @@ cd `dirname ${0}`
 # Include wonderful colors for our tutorial!
 test -f ../support/colors.sh && . ../support/colors.sh
 
-# Make sure we have all the necessary files we need.
-files="TUTORIAL.sh \
-  hadoop-0.20.205.0.patch \
-  hadoop-0.20.205.0.tar.gz \
-  hadoop-0.20.205.0_conf_hadoop-env.sh.patch \
-  hadoop-0.20.205.0_conf_mapred-site.xml.patch"
+# Make sure we have all the necessary files/directories we need.
+resources="TUTORIAL.sh \
+  ${hadoop}.patch \
+  ${hadoop}_hadoop-env.sh.patch \
+  ${hadoop}_mesos.patch \
+  mapred-site.xml.patch \
+  mesos \
+  mesos-executor"
 
-for file in `echo ${files}`; do
-    if test ! -f ${file}; then
+for resource in `echo ${resources}`; do
+    if test ! -e ${resource}; then
         cat <<__EOF__
 
-${RED}We seem to be missing ${file} from the directory containing this
-tutorial and we can't continue without that file. If you haven't made
-any modifications to this directory, please report this to:
+${RED}We seem to be missing ${resource} from the directory containing
+this tutorial and we can't continue without it. If you haven't
+made any modifications to this directory, please report this to:
 
   mesos-dev@incubator.apache.org
 
@@ -81,12 +91,33 @@ __EOF__
 fi
 
 
+# Download Hadoop.
+if test ! -e ${hadoop}.tar.gz; then
+    cat <<__EOF__
+
+We'll try and grab ${hadoop} for you now via:
+
+  $ wget ${url}/${hadoop}.tar.gz
+
+__EOF__
+    read -e -p "${BRIGHT}Hit enter to continue.${NORMAL} "
+    echo
+    wget ${url}/${hadoop}.tar.gz || fail "wget ${url}/${hadoop}.tar.gz"
+else
+    cat <<__EOF__
+
+${RED}It looks like you've already downloaded ${hadoop}.tar.gz, so
+we'll skip that step.${NORMAL}
+
+__EOF__
+fi
+
+
 # Extract the archive.
 if test ! -d ${hadoop}; then
     cat <<__EOF__
 
-We've included the 0.20.205.0 version of Hadoop in this directory
-(${hadoop}.tar.gz). Start by extracting it:
+Let's start by extracting ${hadoop}.tar.gz:
 
   $ tar zxvf ${hadoop}.tar.gz
 
@@ -104,20 +135,39 @@ __EOF__
 fi
 
 
-# Apply the patch.
+# Change into Hadoop directory.
+cat <<__EOF__
+
+Okay, now let's change into the ${hadoop} directory in order to apply
+some patches, copy in the Mesos specific code, and build everything.
+
+  $ cd ${hadoop}
+
+__EOF__
+
+read -e -p "${BRIGHT}Hit enter to continue.${NORMAL} "
+echo
+
+cd ${hadoop} || fail "cd ${hadoop}"
+
+
+# Apply the Hadoop patch.
 cat <<__EOF__
 
 To run Hadoop on Mesos we need to apply a rather minor patch. The
-patch makes a small number of modifications in Hadoop, and adds some
-new code at src/contrib/mesos. (Note that the changes to Hadoop have
-been committed in revisions r1033804 and r987589 so at some point we
-won't need to apply any patch at all.) We'll apply the patch with:
+patch makes a small number of modifications in Hadoop. (Note that the
+changes to Hadoop have been committed in revisions r1033804 and
+r987589 so at some point we won't need to apply any patch at all.)
+We'll apply the patch with:
 
-  $ patch -p2 <${hadoop}.patch
+  $ patch -p1 <../${hadoop}.patch
 
 __EOF__
 
-if test -d ${hadoop}/src/contrib/mesos; then
+# Check and see if the patch has already been applied.
+grep extraData src/mapred/org/apache/hadoop/mapred/Task.java >/dev/null
+
+if test ${?} == "0"; then
     cat <<__EOF__
 
 ${RED}It looks like you've already applied the patch, so we'll skip
@@ -127,37 +177,44 @@ __EOF__
 else
     read -e -p "${BRIGHT}Hit enter to continue.${NORMAL} "
     echo
-    patch -p2 <${hadoop}.patch || fail "patch -p2 <${hadoop}.patch"
+    patch -p1 <../${hadoop}.patch || fail "patch -p1 <../${hadoop}.patch"
 fi
 
-if test ! -x ${hadoop}/bin/mesos-executor; then
-    cat <<__EOF__
 
-We'll also need to make one of the new files executable via:
+# Copy over the Mesos contrib component (and mesos-executor) and apply
+# the patch to build the contrib.
+cat <<__EOF__
+
+Now we'll copy over the Mesos contrib components. In addition, we'll
+need to edit ivy/libraries.properties and src/contrib/build.xml to
+hook the Mesos contrib componenet into the build. We've included a
+patch to do that for you:
 
-  $ chmod +x ${hadoop}/bin/mesos-executor
+  $ cp -r ../mesos src/contrib
+  $ cp -p ../mesos-executor bin
+  $ patch -p1 <../${hadoop}_mesos.patch
 
 __EOF__
-    read -e -p "${BRIGHT}Hit enter to continue.${NORMAL} "
-    echo
-    chmod +x ${hadoop}/bin/mesos-executor || \
-        fail "chmod +x ${hadoop}/bin/mesos-executor"
-fi
 
+cp -r ../mesos src/contrib || fail "cp -r ../mesos src/contrib"
+cp -p ../mesos-executor bin || fail "cp -p ../mesos-executor bin"
 
-# Change into Hadoop directory.
-cat <<__EOF__
+# Check and see if the patch has already been applied.
+grep mesos src/contrib/build.xml >/dev/null
 
-Okay, now let's change into the directory in order to build Hadoop.
+if test ${?} == "0"; then
+    cat <<__EOF__
 
-  $ cd ${hadoop}
+${RED}It looks like you've already applied the patch, so we'll skip
+applying it now.${NORMAL}
 
 __EOF__
-
-read -e -p "${BRIGHT}Hit enter to continue.${NORMAL} "
-echo
-
-cd ${hadoop} || fail "cd ${hadoop}"
+else
+    read -e -p "${BRIGHT}Hit enter to continue.${NORMAL} "
+    echo
+    patch -p1 <../${hadoop}_mesos.patch || \
+        fail "patch -p1 <../${hadoop}_mesos.patch"
+fi
 
 
 # Determine MESOS_BUILD_DIR.
@@ -205,27 +262,104 @@ Using ${BRIGHT}${MESOS_BUILD_DIR}${NORMA
 
 __EOF__
 
+LIBRARY=${MESOS_BUILD_DIR}/src/.libs/libmesos.so
+
+if test ! -f ${LIBRARY}; then
+    LIBRARY=${MESOS_BUILD_DIR}/src/.libs/libmesos.dylib
+fi
+
+if test ! -f ${LIBRARY}; then
+    cat <<__EOF__
+
+${RED}We seem to be having trouble locating the native library (it's
+not at ${MESOS_BUILD_DIR}/src/.libs/libmesos.so or
+${MESOS_BUILD_DIR}/src/.libs/libmesos.dylib).
+
+Have you already built Mesos? If you have, please report this to:
+
+  mesos-dev@incubator.apache.org
+
+(Remember to include as much debug information as possible.)${NORMAL}
+
+__EOF__
+    exit 1
+fi
+
+# Determine the "platform name" to copy the native library.
+cat <<__EOF__ >PlatformName.java
+public class PlatformName {
+  public static void main(String[] args) {
+    System.out.println(System.getProperty("os.name") + "-" +
+      System.getProperty("os.arch") + "-" +
+      System.getProperty("sun.arch.data.model"));
+    System.exit(0);
+  }
+}
+__EOF__
+
+${JAVA_HOME}/bin/javac PlatformName.java || \
+  fail "${JAVA_HOME}/bin/javac PlatformName.java"
+
+PLATFORM=`${JAVA_HOME}/bin/java -Xmx32m PlatformName | sed -e "s/ /_/g"`
+
+rm PlatformName.*
+
+
+# Copy over libraries.
+MESOS_JAR=`echo ${MESOS_BUILD_DIR}/src/mesos-*.jar`
+PROTOBUF_JAR=`echo ${MESOS_BUILD_DIR}/protobuf-*.jar`
+
+cat <<__EOF__
+
+Now we'll copy over the necessary libraries we need from the build
+directory.
+
+  $ cp ${PROTOBUF_JAR} lib
+  $ cp ${MESOS_JAR} lib
+  $ mkdir -p lib/native/${PLATFORM}
+  $ cp ${LIBRARY} lib/native/${PLATFORM}
+
+__EOF__
+
+cp ${PROTOBUF_JAR} lib || fail "cp ${PROTOBUF_JAR} lib"
+cp ${MESOS_JAR} lib || fail "cp ${MESOS_JAR} lib"
+mkdir -p lib/native/${PLATFORM} || fail "mkdir -p lib/native/${PLATFORM}"
+cp ${LIBRARY} lib/native/${PLATFORM} || \
+    fail "cp ${LIBRARY} lib/native/${PLATFORM}"
+
+if test ${distribution} = "0.20.205.0"; then
+    cat <<__EOF__
+
+The Apache distribution requires that we also copy some libraries to
+multiple places. :/
 
-VERSION=`echo @PACKAGE_VERSION@ | ${MESOS_BUILD_DIR}/config.status --file=-:-`
+  $ cp ${PROTOBUF_JAR} share/hadoop/lib
+  $ cp ${MESOS_JAR} share/hadoop/lib
+  $ cp ${LIBRARY} lib
+
+__EOF__
+
+    cp ${PROTOBUF_JAR} share/hadoop/lib || \
+        fail "cp ${PROTOBUF_JAR} share/hadoop/lib"
+    cp ${MESOS_JAR} share/hadoop/lib || \
+        fail "cp ${MESOS_JAR} share/hadoop/lib"
+    cp ${LIBRARY} lib || fail "cp ${LIBRARY} lib"
+fi
 
 
 # Build with ant.
 cat <<__EOF__
 
-Okay, let's try building Hadoop now! We need to let the build system
-know where the Mesos JAR is located by using the MESOS_JAR environment
-variable (i.e., MESOS_JAR=\${MESOS_BUILD_DIR}/src/mesos-${VERSION}.jar). We
-can put it on the command line with 'ant like this:
+Okay, let's try building Hadoop and the Mesos contrib classes:
 
-  $ MESOS_JAR=${MESOS_BUILD_DIR}/src/mesos-${VERSION}.jar ant
+  $ ant
 
 __EOF__
 
 read -e -p "${BRIGHT}Hit enter to continue.${NORMAL} "
 echo
 
-MESOS_JAR=${MESOS_BUILD_DIR}/src/mesos-${VERSION}.jar ant || \
-    fail "MESOS_JAR=${MESOS_BUILD_DIR}/src/mesos-${VERSION}.jar ant"
+ant || fail "ant"
 
 
 # Apply conf/mapred-site.xml patch.
@@ -253,22 +387,22 @@ The 'mapred.jobtracker.taskScheduler' pr
 If you've alredy got a Mesos master running you can use that for
 'mapred.mesos.master', but for this tutorial well just use 'local' in
 order to bring up a Mesos "cluster" within the process. To connect to
-a remote master simply use the Mesos URL used to connect the slave to
-the master (e.g., mesos://master@localhost:5050).
+a remote master simply use the URL used to connect the slave to the
+master (e.g., localhost:5050).
 
 We've got a prepared patch for conf/mapred-site.xml that makes the
 changes necessary to get everything running. We can apply that patch
 like so:
 
-  $ patch -p3 <../${hadoop}_conf_mapred-site.xml.patch
+  $ patch -p1 <../mapred-site.xml.patch
 
 __EOF__
 
 read -e -p "${BRIGHT}Hit enter to continue.${NORMAL} "
 echo
 
-patch --dry-run --silent --force -p3 \
-    <../${hadoop}_conf_mapred-site.xml.patch 1>/dev/null 2>&1
+patch --dry-run --silent --force -p1 \
+    <../mapred-site.xml.patch 1>/dev/null 2>&1
 
 if test ${?} == "1"; then
     cat <<__EOF__
@@ -287,40 +421,30 @@ read -e -p "${BRIGHT}Patch conf/mapred-s
 echo
 test -z ${REPLY} && REPLY=${DEFAULT}
 if test ${REPLY} == "Y" -o ${REPLY} == "y"; then
-    patch -p3 <../${hadoop}_conf_mapred-site.xml.patch || \
-        fail "patch -p3 <../${hadoop}_conf_mapred-site.xml.patch"
+    patch -p1 <../mapred-site.xml.patch || \
+        fail "patch -p1 <../mapred-site.xml.patch"
 fi
 
+
+
 # Apply conf/hadoop-env.sh patch.
 cat <<__EOF__
 
-Now in order to actually run Hadoop we need to set up our environment
-appropriately for Hadoop. We can do this in conf/hadoop-env.sh This
-includes:
-
-  (1) Setting JAVA_HOME (unnecessary if JAVA_HOME is set in your environment).
-  (2) Adding the Mesos contrib class files to HADOOP_CLASSPATH.
-  (3) Adding mesos-${VERSION}.jar to the HADOOP_CLASSPATH.
-  (4) Adding protobuf-2.4.1.jar to the HADOOP_CLASSPATH.
-  (5) Setting MESOS_NATIVE_LIBRARY to point to the native library.
-
-We've got a prepared patch for conf/hadoop-env.sh that makes the
-necessary changes. We can apply that patch like so:
-
-  $ patch -p3 <../${hadoop}_conf_hadoop-env.sh.patch
-
-(Note that this patch assumes MESOS_BUILD_DIR is '../..' and you'll
-need to specify that on the command line when you try and run the
-JobTracker if that's not the case ... don't worry, we'll remind you
-again later.)
+Most users will need to set JAVA_HOME in conf/hadoop-env.sh, but we'll
+also need to set MESOS_NATIVE_LIBRARY and update the HADOOP_CLASSPATH
+to include the Mesos contrib classfiles. We've prepared a patch for
+conf/hadoop-env.sh that makes the necessary changes. We can apply that
+patch like so:
+
+  $ patch -p1 <../${hadoop}_hadoop-env.sh.patch
 
 __EOF__
 
 read -e -p "${BRIGHT}Hit enter to continue.${NORMAL} "
 echo
 
-patch --dry-run --silent --force -p3 \
-    <../${hadoop}_conf_hadoop-env.sh.patch 1>/dev/null 2>&1
+patch --dry-run --silent --force -p1 \
+    <../${hadoop}_hadoop-env.sh.patch 1>/dev/null 2>&1
 
 if test ${?} == "1"; then
     cat <<__EOF__
@@ -339,8 +463,8 @@ read -e -p "${BRIGHT}Patch conf/hadoop-e
 echo
 test -z ${REPLY} && REPLY=${DEFAULT}
 if test ${REPLY} == "Y" -o ${REPLY} == "y"; then
-    patch -p3 <../${hadoop}_conf_hadoop-env.sh.patch || \
-        fail "patch -p3 <../${hadoop}_conf_hadoop-env.sh.patch"
+    patch -p1 <../${hadoop}_hadoop-env.sh.patch || \
+        fail "patch -p1 <../${hadoop}_hadoop-env.sh.patch"
 fi
 
 
@@ -351,20 +475,12 @@ Let's go ahead and try and start the Job
 
   $ ./bin/hadoop jobtracker
 
-Note that if you applied our conf/hadoop-env.sh patch we assume that
-MESOS_BUILD_DIR is located at '../..'. If this isn't the case (i.e.,
-you specified a different build directory than the default during this
-tutorial) than you'll need to set that variable either directly in
-conf/hadoop-env.sh or on the command line via:
-
-  $ MESOS_BUILD_DIR=/path/to/mesos/build ./bin/hadoop jobtracker
-
 __EOF__
 
 read -e -p "${BRIGHT}Hit enter to continue.${NORMAL} "
 echo
 
-MESOS_BUILD_DIR=${MESOS_BUILD_DIR} ./bin/hadoop jobtracker 1>/dev/null 2>&1 &
+./bin/hadoop jobtracker 1>/dev/null 2>&1 &
 
 jobtracker_pid=${!}
 
@@ -384,7 +500,7 @@ cat <<__EOF__
 
 Alright, now let's run the "wordcount" example via:
 
-  $ ./bin/hadoop jar hadoop-examples-0.20.205.0.jar wordcount \
+  $ ./bin/hadoop jar hadoop-examples-${distribution}.jar wordcount \
   src/contrib/mesos/src/java/org/apache/hadoop/mapred out
 
 __EOF__
@@ -394,7 +510,7 @@ echo
 
 rm -rf out # TODO(benh): Ask about removing this first.
 
-./bin/hadoop jar hadoop-examples-0.20.205.0.jar wordcount \
+./bin/hadoop jar hadoop-examples-${distribution}.jar wordcount \
     src/contrib/mesos/src/java/org/apache/hadoop/mapred out
 
 
@@ -403,6 +519,37 @@ if test ${?} == "0"; then
 
 ${GREEN}Success!${NORMAL} We'll kill the JobTracker and exit.
 
+Summary:
+
+  $ wget ${url}/${hadoop}.tar.gz
+  $ tar zxvf ${hadoop}.tar.gz
+  $ cd ${hadoop}
+  $ patch -p1 <../${hadoop}.patch
+  $ cp -r ../mesos src/contrib
+  $ cp -p ../mesos-executor bin
+  $ patch -p1 <../${hadoop}_mesos.patch
+  $ cp ${PROTOBUF_JAR} lib
+  $ cp ${MESOS_JAR} lib
+  $ mkdir -p lib/native/${PLATFORM}
+  $ cp ${LIBRARY} lib/native/${PLATFORM}
+__EOF__
+
+if test ${distribution} = "0.20.205.0"; then
+    cat <<__EOF__
+  $ cp ${PROTOBUF_JAR} share/hadoop/lib
+  $ cp ${MESOS_JAR} share/hadoop/lib
+  $ cp ${LIBRARY} lib
+__EOF__
+fi
+
+cat <<__EOF__
+  $ ant
+  $ patch -p1 <../mapred-site.xml.patch
+  $ patch -p1 <../${hadoop}_hadoop-env.sh.patch
+
+Remember you'll need to change ${hadoop}/conf/mapred-site.xml to
+connect to a Mesos cluster (the patch just uses 'local').
+
 We hope you found this was helpful!
 
 __EOF__

Added: incubator/mesos/trunk/hadoop/hadoop-0.20.2-cdh3u3.patch
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/hadoop/hadoop-0.20.2-cdh3u3.patch?rev=1332469&view=auto
==============================================================================
--- incubator/mesos/trunk/hadoop/hadoop-0.20.2-cdh3u3.patch (added)
+++ incubator/mesos/trunk/hadoop/hadoop-0.20.2-cdh3u3.patch Mon Apr 30 23:28:43 2012
@@ -0,0 +1,271 @@
+diff --git a/src/mapred/org/apache/hadoop/mapred/JobInProgress.java b/src/mapred/org/apache/hadoop/mapred/JobInProgress.java
+index 14fca03..685afe4 100644
+--- a/src/mapred/org/apache/hadoop/mapred/JobInProgress.java
++++ b/src/mapred/org/apache/hadoop/mapred/JobInProgress.java
+@@ -3487,4 +3487,8 @@ public class JobInProgress {
+     LOG.info("jobToken generated and stored with users keys in "
+         + keysFile.toUri().getPath());
+   }
++
++  int getMaxCacheLevel() {
++    return maxLevel;
++  }
+ }
+diff --git a/src/mapred/org/apache/hadoop/mapred/Task.java b/src/mapred/org/apache/hadoop/mapred/Task.java
+index 2d8a2ef..ee57c71 100644
+--- a/src/mapred/org/apache/hadoop/mapred/Task.java
++++ b/src/mapred/org/apache/hadoop/mapred/Task.java
+@@ -161,6 +161,7 @@ abstract public class Task implements Writable, Configurable {
+   private int numSlotsRequired;
+   protected SecretKey tokenSecret;
+   protected JvmContext jvmContext;
++  protected String extraData = "";
+ 
+   ////////////////////////////////////////////
+   // Constructors
+@@ -423,6 +424,7 @@ abstract public class Task implements Writable, Configurable {
+     out.writeBoolean(writeSkipRecs);
+     out.writeBoolean(taskCleanup); 
+     Text.writeString(out, user);
++    Text.writeString(out, extraData);
+   }
+   
+   public void readFields(DataInput in) throws IOException {
+@@ -447,6 +449,7 @@ abstract public class Task implements Writable, Configurable {
+       setPhase(TaskStatus.Phase.CLEANUP);
+     }
+     user = Text.readString(in);
++    extraData = Text.readString(in);
+   }
+ 
+   @Override
+diff --git a/src/mapred/org/apache/hadoop/mapred/TaskRunner.java b/src/mapred/org/apache/hadoop/mapred/TaskRunner.java
+index 8925eba..d68a8bc 100644
+--- a/src/mapred/org/apache/hadoop/mapred/TaskRunner.java
++++ b/src/mapred/org/apache/hadoop/mapred/TaskRunner.java
+@@ -207,9 +207,14 @@ abstract class TaskRunner extends Thread {
+       File[] logFiles = prepareLogFiles(taskid, t.isTaskCleanupTask());
+       File stdout = logFiles[0];
+       File stderr = logFiles[1];
+-      tracker.getTaskTrackerInstrumentation().reportTaskLaunch(taskid, stdout,
+-                 stderr);
+-      
++      //tracker.getTaskTrackerInstrumentation().reportTaskLaunch(taskid, stdout,
++      //           stderr);
++      List<TaskTrackerInstrumentation> ttInstrumentations =
++       tracker.getTaskTrackerInstrumentations();
++      for (TaskTrackerInstrumentation inst: ttInstrumentations) {
++       inst.reportTaskLaunch(taskid, stdout, stderr);
++      }
++
+       Map<String, String> env = new HashMap<String, String>();
+       errorInfo = getVMEnvironment(errorInfo, workDir, conf, env, taskid,
+                                    logSize);
+@@ -220,11 +225,18 @@ abstract class TaskRunner extends Thread {
+       setupCmds.add(setup);
+       
+       launchJvmAndWait(setupCmds, vargs, stdout, stderr, logSize, workDir);
+-      tracker.getTaskTrackerInstrumentation().reportTaskEnd(t.getTaskID());
++      //tracker.getTaskTrackerInstrumentation().reportTaskEnd(t.getTaskID());
++      for (TaskTrackerInstrumentation inst: ttInstrumentations) {
++       inst.reportTaskEnd(t.getTaskID());
++      }
++
+       if (exitCodeSet) {
+         if (!killed && exitCode != 0) {
+           if (exitCode == 65) {
+-            tracker.getTaskTrackerInstrumentation().taskFailedPing(t.getTaskID());
++            //tracker.getTaskTrackerInstrumentation().taskFailedPing(t.getTaskID());
++           for (TaskTrackerInstrumentation inst: ttInstrumentations) {
++             inst.taskFailedPing(t.getTaskID());
++           }
+           }
+           throw new IOException("Task process exit with nonzero status of " +
+               exitCode + ".");
+diff --git a/src/mapred/org/apache/hadoop/mapred/TaskTracker.java b/src/mapred/org/apache/hadoop/mapred/TaskTracker.java
+index a5ba0f5..cc8a606 100644
+--- a/src/mapred/org/apache/hadoop/mapred/TaskTracker.java
++++ b/src/mapred/org/apache/hadoop/mapred/TaskTracker.java
+@@ -507,11 +507,18 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
+   
+   
+     
+-  private TaskTrackerInstrumentation myInstrumentation = null;
++  //private TaskTrackerInstrumentation myInstrumentation = null;
++  private List<TaskTrackerInstrumentation> instrumentations =
++    new ArrayList<TaskTrackerInstrumentation>();
+ 
+-  public TaskTrackerInstrumentation getTaskTrackerInstrumentation() {
+-    return myInstrumentation;
++
++  //public TaskTrackerInstrumentation getTaskTrackerInstrumentation() {
++  //  return myInstrumentation;
++  //}
++  public List<TaskTrackerInstrumentation> getTaskTrackerInstrumentations() {
++    return instrumentations;
+   }
++
+   
+   /**
+    * A list of tips that should be cleaned up.
+@@ -879,18 +886,30 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
+     //tweak the probe sample size (make it a function of numCopiers)
+     probe_sample_size = this.fConf.getInt("mapred.tasktracker.events.batchsize", 500);
+     
++    Class<?>[] instrumentationClasses = getInstrumentationClasses(fConf);
+     try {
+-      Class<? extends TaskTrackerInstrumentation> metricsInst = getInstrumentationClass(fConf);
+-      java.lang.reflect.Constructor<? extends TaskTrackerInstrumentation> c =
+-        metricsInst.getConstructor(new Class[] {TaskTracker.class} );
+-      this.myInstrumentation = c.newInstance(this);
++      //Class<? extends TaskTrackerInstrumentation> metricsInst = getInstrumentationClass(fConf);
++      //java.lang.reflect.Constructor<? extends TaskTrackerInstrumentation> c =
++      //  metricsInst.getConstructor(new Class[] {TaskTracker.class} );
++      //this.myInstrumentation = c.newInstance(this);
++      for (Class<?> cls: instrumentationClasses) {
++       java.lang.reflect.Constructor<?> c =
++         cls.getConstructor(new Class[] {TaskTracker.class} );
++       TaskTrackerInstrumentation inst =
++         (TaskTrackerInstrumentation) c.newInstance(this);
++       instrumentations.add(inst);
++      }
++
+     } catch(Exception e) {
+       //Reflection can throw lots of exceptions -- handle them all by 
+       //falling back on the default.
+       LOG.error(
+         "Failed to initialize taskTracker metrics. Falling back to default.",
+         e);
+-      this.myInstrumentation = new TaskTrackerMetricsInst(this);
++      //this.myInstrumentation = new TaskTrackerMetricsInst(this);
++      instrumentations.clear();
++      //instrumentations.add(TaskTrackerInstrumentation.create(this));
++      instrumentations.add(new TaskTrackerMetricsInst(this));
+     }
+     
+     // bind address
+@@ -1014,11 +1033,18 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
+     return fConf.getBoolean(JobConf.MR_ACLS_ENABLED, false);
+   }
+ 
++  /*
+   public static Class<? extends TaskTrackerInstrumentation> getInstrumentationClass(
+     Configuration conf) {
+     return conf.getClass("mapred.tasktracker.instrumentation",
+         TaskTrackerMetricsInst.class, TaskTrackerInstrumentation.class);
+   }
++  */
++
++  public static Class<?>[] getInstrumentationClasses(Configuration conf) {
++    return conf.getClasses("mapred.tasktracker.instrumentation",
++                          TaskTrackerInstrumentation.class);
++  }
+ 
+   public static void setInstrumentationClass(
+     Configuration conf, Class<? extends TaskTrackerInstrumentation> t) {
+@@ -1988,7 +2014,10 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
+             reduceTotal--;
+           }
+           try {
+-            myInstrumentation.completeTask(taskStatus.getTaskID());
++            //myInstrumentation.completeTask(taskStatus.getTaskID());
++            for (TaskTrackerInstrumentation inst: instrumentations) {
++              inst.completeTask(taskStatus.getTaskID());
++            }
+           } catch (MetricsException me) {
+             LOG.warn("Caught: " + StringUtils.stringifyException(me));
+           }
+@@ -2153,7 +2182,10 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
+           LOG.info(tip.getTask().getTaskID() + ": " + msg);
+           ReflectionUtils.logThreadInfo(LOG, "lost task", 30);
+           tip.reportDiagnosticInfo(msg);
+-          myInstrumentation.timedoutTask(tip.getTask().getTaskID());
++          //myInstrumentation.timedoutTask(tip.getTask().getTaskID());
++          for (TaskTrackerInstrumentation inst: instrumentations) {
++            inst.timedoutTask(tip.getTask().getTaskID());
++          }
+           purgeTask(tip, true);
+         }
+       }
+@@ -2259,6 +2291,15 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
+       if (tip.getTask().isMapTask()) {
+         indexCache.removeMap(tip.getTask().getTaskID().toString());
+       }
++      // Report the task as killed to Instrumentation objects
++      TaskStatus status = (TaskStatus) tip.getStatus().clone();
++      TaskStatus.State state =
++        (wasFailure ? TaskStatus.State.FAILED : TaskStatus.State.KILLED);
++      status.setRunState(state);
++      for (TaskTrackerInstrumentation inst: instrumentations) {
++        inst.statusUpdate(tip.getTask(), status);
++      }
++
+     }
+   }
+ 
+@@ -2916,6 +2957,9 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
+       LOG.info("Task " + task.getTaskID() + " is done.");
+       LOG.info("reported output size for " + task.getTaskID() +  "  was " + taskStatus.getOutputSize());
+ 
++      for (TaskTrackerInstrumentation inst: instrumentations) {
++        inst.statusUpdate(task, taskStatus);
++      }
+     }
+     
+     public boolean wasKilled() {
+@@ -3120,6 +3164,9 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
+       } catch (IOException ie) {
+       }
+ 
++      for (TaskTrackerInstrumentation inst: instrumentations) {
++        inst.statusUpdate(task, taskStatus);
++      }
+     }
+     
+ 
+@@ -3258,6 +3305,9 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
+       taskStatus.setFinishTime(System.currentTimeMillis());
+       removeFromMemoryManager(task.getTaskID());
+       releaseSlot();
++      for (TaskTrackerInstrumentation inst: instrumentations) {
++        inst.statusUpdate(task, taskStatus);
++      }
+       notifyTTAboutTaskCompletion();
+     }
+     
+@@ -3290,6 +3340,9 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
+                              failure);
+         runningTasks.put(task.getTaskID(), this);
+         mapTotal++;
++        for (TaskTrackerInstrumentation inst: instrumentations) {
++          inst.statusUpdate(task, taskStatus);
++        }
+       } else {
+         LOG.warn("Output already reported lost:"+task.getTaskID());
+       }
+@@ -3473,6 +3526,9 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
+         return false;
+       }
+       tip.reportProgress(taskStatus);
++      for (TaskTrackerInstrumentation inst: instrumentations) {
++        inst.statusUpdate(tip.getTask(), taskStatus);
++      }
+       return true;
+     } else {
+       LOG.warn("Progress from unknown child task: "+taskid);
+diff --git a/src/mapred/org/apache/hadoop/mapred/TaskTrackerInstrumentation.java b/src/mapred/org/apache/hadoop/mapred/TaskTrackerInstrumentation.java
+index e9e863f..20ecc67 100644
+--- a/src/mapred/org/apache/hadoop/mapred/TaskTrackerInstrumentation.java
++++ b/src/mapred/org/apache/hadoop/mapred/TaskTrackerInstrumentation.java
+@@ -62,4 +62,11 @@ class TaskTrackerInstrumentation  {
+    */
+   public void reportTaskEnd(TaskAttemptID t) {}
+    
++  /**
++   * Called when a task changes status.
++   * @param task the task whose status changed
++   * @param taskStatus the new status of the task
++   */
++  public void statusUpdate(Task task, TaskStatus taskStatus) {}
++
+ }

Added: incubator/mesos/trunk/hadoop/hadoop-0.20.2-cdh3u3_hadoop-env.sh.patch
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/hadoop/hadoop-0.20.2-cdh3u3_hadoop-env.sh.patch?rev=1332469&view=auto
==============================================================================
--- incubator/mesos/trunk/hadoop/hadoop-0.20.2-cdh3u3_hadoop-env.sh.patch (added)
+++ incubator/mesos/trunk/hadoop/hadoop-0.20.2-cdh3u3_hadoop-env.sh.patch Mon Apr 30 23:28:43 2012
@@ -0,0 +1,13 @@
+diff --git a/conf/hadoop-env.sh b/conf/hadoop-env.sh
+index ada5bef..76aaf48 100644
+--- a/conf/hadoop-env.sh
++++ b/conf/hadoop-env.sh
+@@ -9,7 +9,7 @@
+ # export JAVA_HOME=/usr/lib/j2sdk1.6-sun
+ 
+ # Extra Java CLASSPATH elements.  Optional.
+-# export HADOOP_CLASSPATH="<extra_entries>:$HADOOP_CLASSPATH"
++export HADOOP_CLASSPATH=${HADOOP_HOME}/build/contrib/mesos/classes
+
+ # The maximum amount of heap to use, in MB. Default is 1000.
+ # export HADOOP_HEAPSIZE=2000

Added: incubator/mesos/trunk/hadoop/hadoop-0.20.2-cdh3u3_mesos.patch
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/hadoop/hadoop-0.20.2-cdh3u3_mesos.patch?rev=1332469&view=auto
==============================================================================
--- incubator/mesos/trunk/hadoop/hadoop-0.20.2-cdh3u3_mesos.patch (added)
+++ incubator/mesos/trunk/hadoop/hadoop-0.20.2-cdh3u3_mesos.patch Mon Apr 30 23:28:43 2012
@@ -0,0 +1,22 @@
+diff --git a/ivy/libraries.properties b/ivy/libraries.properties
+index 0b3c715..6b7770b 100644
+--- a/ivy/libraries.properties
++++ b/ivy/libraries.properties
+@@ -75,3 +75,5 @@ slf4j-log4j12.version=1.4.3
+ wagon-http.version=1.0-beta-2
+ xmlenc.version=0.52
+ xerces.version=1.4.4
++
++protobuf-java.version=2.4.1
+diff --git a/src/contrib/build.xml b/src/contrib/build.xml
+index e41c132..593aecd 100644
+--- a/src/contrib/build.xml
++++ b/src/contrib/build.xml
+@@ -57,6 +57,7 @@
+       <fileset dir="." includes="capacity-scheduler/build.xml"/>
+       <fileset dir="." includes="mrunit/build.xml"/>
+       <fileset dir="." includes="gridmix/build.xml"/>
++      <fileset dir="." includes="mesos/build.xml"/>
+     </subant>
+      <available file="${build.contrib.dir}/testsfailed" property="testsfailed"/>
+      <fail if="testsfailed">Tests failed!</fail>