You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mesos.apache.org by be...@apache.org on 2011/06/05 10:54:52 UTC

svn commit: r1132176 - in /incubator/mesos/trunk/ec2: ./ deploy.centos64/ deploy.centos64/root/ deploy.centos64/root/ephemeral-hdfs/ deploy.centos64/root/ephemeral-hdfs/conf/ deploy.centos64/root/mesos-ec2/ deploy.centos64/root/mesos-ec2/hadoop-framewo...

Author: benh
Date: Sun Jun  5 08:54:50 2011
New Revision: 1132176

URL: http://svn.apache.org/viewvc?rev=1132176&view=rev
Log:
Added support for CentOS AMIs with Hypertable and both ephemeral and
persistent HDFS instances.

Added:
    incubator/mesos/trunk/ec2/deploy.centos64/
    incubator/mesos/trunk/ec2/deploy.centos64/root/
    incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/
    incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/
    incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/core-site.xml
    incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/hadoop-env.sh
    incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/hdfs-site.xml
    incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/mapred-site.xml
    incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/masters
    incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/slaves
    incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/
    incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/approve-master-key   (with props)
    incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/cluster-url
    incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/copy-dir   (with props)
    incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/hadoop-framework-conf/
    incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/hadoop-framework-conf/core-site.xml
    incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/hadoop-framework-conf/hadoop-env.sh
    incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/hadoop-framework-conf/mapred-site.xml
    incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/haproxy+apache/
    incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/haproxy+apache/haproxy.config.template
    incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/hypertable/
    incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/hypertable/Capfile
    incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/hypertable/hypertable.cfg
    incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/masters
    incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/mesos-daemon   (with props)
    incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/redeploy-mesos   (with props)
    incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/setup   (with props)
    incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/setup-slave
      - copied, changed from r1132175, incubator/mesos/trunk/ec2/deploy.lucid64/root/mesos-ec2/setup-slave
    incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/setup-torque   (with props)
    incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/slaves
    incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/ssh-no-keychecking   (with props)
    incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/start-hypertable   (with props)
    incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/start-mesos   (with props)
    incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/stop-hypertable   (with props)
    incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/stop-mesos   (with props)
    incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/zoo
    incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/
    incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/
    incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/core-site.xml
    incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/hadoop-env.sh
    incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/hdfs-site.xml
    incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/mapred-site.xml
    incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/masters
    incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/slaves
Modified:
    incubator/mesos/trunk/ec2/deploy.lucid64/root/mesos-ec2/setup-slave
    incubator/mesos/trunk/ec2/mesos_ec2.py

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/core-site.xml
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/core-site.xml?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/core-site.xml (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/core-site.xml Sun Jun  5 08:54:50 2011
@@ -0,0 +1,23 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+  <property>
+    <name>hadoop.tmp.dir</name>
+    <value>/mnt/ephemeral-hdfs</value>
+  </property>
+
+  <property>
+    <name>fs.default.name</name>
+    <value>hdfs://{{active_master}}:9000</value>
+  </property>
+
+  <property>
+    <name>io.file.buffer.size</name>
+    <value>65536</value>
+  </property>
+
+</configuration>

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/hadoop-env.sh
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/hadoop-env.sh?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/hadoop-env.sh (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/hadoop-env.sh Sun Jun  5 08:54:50 2011
@@ -0,0 +1,59 @@
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME=/usr/java/default
+
+# Extra Java CLASSPATH elements.  Optional.
+# export HADOOP_CLASSPATH=
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE=1500
+
+# Extra Java runtime options.  Empty by default.
+# export HADOOP_OPTS=-server
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS"
+export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS"
+export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS"
+export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS"
+export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS"
+# export HADOOP_TASKTRACKER_OPTS=
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+# export HADOOP_CLIENT_OPTS
+
+# Extra ssh options.  Empty by default.
+# export HADOOP_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HADOOP_CONF_DIR"
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+# export HADOOP_LOG_DIR=${HADOOP_HOME}/logs
+export HADOOP_LOG_DIR=/mnt/ephemeral-hdfs/logs
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR=/var/hadoop/ephemeral-hdfs/pids
+
+# A string representing this instance of hadoop. $USER by default.
+# export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+# export HADOOP_NICENESS=10
+
+ulimit -n 16000

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/hdfs-site.xml
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/hdfs-site.xml?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/hdfs-site.xml (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/hdfs-site.xml Sun Jun  5 08:54:50 2011
@@ -0,0 +1,31 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<configuration>
+
+  <property>
+    <name>dfs.replication</name>
+    <value>3</value>
+  </property>
+
+  <property>
+    <name>dfs.block.size</name>
+    <value>134217728</value>
+  </property>
+
+  <property>
+    <name>dfs.data.dir</name>
+    <value>{{hdfs_data_dirs}}</value>
+  </property>
+
+  <property>
+    <name>dfs.namenode.handler.count</name>
+    <value>25</value>
+  </property>
+
+  <property>
+    <name>dfs.datanode.handler.count</name>
+    <value>8</value>
+  </property>
+
+</configuration>

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/mapred-site.xml
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/mapred-site.xml?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/mapred-site.xml (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/mapred-site.xml Sun Jun  5 08:54:50 2011
@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+  <property>
+    <name>mapred.job.tracker</name>
+    <value>{{active_master}}:9001</value>
+  </property>
+
+  <property>
+    <name>mapred.tasktracker.map.tasks.maximum</name>
+    <value>4</value>
+    <description>The maximum number of map tasks that will be run
+    simultaneously by a task tracker.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.tasktracker.reduce.tasks.maximum</name>
+    <value>2</value>
+    <description>The maximum number of reduce tasks that will be run
+    simultaneously by a task tracker.
+    </description>
+  </property>
+
+</configuration>

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/masters
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/masters?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/masters (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/masters Sun Jun  5 08:54:50 2011
@@ -0,0 +1 @@
+{{active_master}}

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/slaves
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/slaves?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/slaves (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/ephemeral-hdfs/conf/slaves Sun Jun  5 08:54:50 2011
@@ -0,0 +1 @@
+{{slave_list}}

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/approve-master-key
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/approve-master-key?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/approve-master-key (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/approve-master-key Sun Jun  5 08:54:50 2011
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+SSH_OPTS="-o StrictHostKeyChecking=no -o ConnectTimeout=5"
+
+ssh $SSH_OPTS {{active_master}} echo -n

Propchange: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/approve-master-key
------------------------------------------------------------------------------
    svn:executable = *

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/cluster-url
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/cluster-url?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/cluster-url (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/cluster-url Sun Jun  5 08:54:50 2011
@@ -0,0 +1 @@
+{{cluster_url}}

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/copy-dir
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/copy-dir?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/copy-dir (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/copy-dir Sun Jun  5 08:54:50 2011
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+if [[ "$#" != "1" ]] ; then
+  echo "Usage: copy-dir <dir>"
+  exit 1
+fi
+
+DIR=`readlink -f "$1"`
+DIR=`echo "$DIR"|sed 's@/$@@'`
+DEST=`dirname "$DIR"`
+
+SLAVES=`cat /root/mesos-ec2/slaves`
+
+SSH_OPTS="-o StrictHostKeyChecking=no -o ConnectTimeout=5"
+
+echo "RSYNC'ing $DIR to slaves..."
+for slave in $SLAVES; do
+    echo $slave
+    rsync -e "ssh $SSH_OPTS" -az "$DIR" "$slave:$DEST" & sleep 0.5
+done
+wait

Propchange: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/copy-dir
------------------------------------------------------------------------------
    svn:executable = *

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/hadoop-framework-conf/core-site.xml
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/hadoop-framework-conf/core-site.xml?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/hadoop-framework-conf/core-site.xml (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/hadoop-framework-conf/core-site.xml Sun Jun  5 08:54:50 2011
@@ -0,0 +1,23 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+  <property>
+    <name>hadoop.tmp.dir</name>
+    <value>/mnt/hadoop-framework</value>
+  </property>
+
+  <property>
+    <name>fs.default.name</name>
+    <value>hdfs://{{active_master}}:9000</value>
+  </property>
+
+  <property>
+    <name>io.file.buffer.size</name>
+    <value>65536</value>
+  </property>
+
+</configuration>

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/hadoop-framework-conf/hadoop-env.sh
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/hadoop-framework-conf/hadoop-env.sh?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/hadoop-framework-conf/hadoop-env.sh (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/hadoop-framework-conf/hadoop-env.sh Sun Jun  5 08:54:50 2011
@@ -0,0 +1,59 @@
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME=/usr/java/default
+
+# Extra Java CLASSPATH elements.  Optional.
+# export HADOOP_CLASSPATH=
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE=1500
+
+# Extra Java runtime options.  Empty by default.
+# export HADOOP_OPTS=-server
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS"
+export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS"
+export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS"
+export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS"
+export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS"
+# export HADOOP_TASKTRACKER_OPTS=
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+# export HADOOP_CLIENT_OPTS
+
+# Extra ssh options.  Empty by default.
+# export HADOOP_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HADOOP_CONF_DIR"
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+# export HADOOP_LOG_DIR=${HADOOP_HOME}/logs
+export HADOOP_LOG_DIR=/mnt/hadoop-logs
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+# export HADOOP_PID_DIR=/var/hadoop/pids
+
+# A string representing this instance of hadoop. $USER by default.
+# export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+# export HADOOP_NICENESS=10
+
+ulimit -n 10000

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/hadoop-framework-conf/mapred-site.xml
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/hadoop-framework-conf/mapred-site.xml?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/hadoop-framework-conf/mapred-site.xml (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/hadoop-framework-conf/mapred-site.xml Sun Jun  5 08:54:50 2011
@@ -0,0 +1,86 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+  <property>
+    <name>mapred.job.tracker</name>
+    <value>{{active_master}}:9001</value>
+  </property>
+
+  <property>
+    <name>mapred.local.dir</name>
+    <value>{{mapred_local_dirs}}</value>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.taskScheduler</name>
+    <value>org.apache.hadoop.mapred.MesosScheduler</value>
+  </property>
+
+  <property>
+    <name>mapred.mesos.master</name>
+    <value>{{cluster_url}}</value>
+  </property>
+
+  <property>
+    <name>io.file.buffer.size</name>
+    <value>65536</value>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker.handler.count</name>
+    <value>20</value>
+  </property>
+
+  <property>
+    <name>tasktracker.http.threads</name>
+    <value>50</value>
+  </property>
+
+  <property>
+    <name>mapred.child.java.opts</name>
+    <value>-Xmx1512m</value>
+  </property>
+
+  <property>
+    <name>mapred.job.reuse.jvm.num.tasks</name>
+    <value>-1</value>
+  </property>
+
+  <property>
+    <name>io.sort.factor</name>
+    <value>15</value>
+  </property>
+
+  <property>
+    <name>io.sort.mb</name>
+    <value>100</value>
+  </property>
+
+  <property>
+    <name>mapred.mesos.localitywait</name>
+    <value>5000</value>
+  </property>
+
+  <property>
+    <name>mapred.tasktracker.map.tasks.maximum</name>
+    <value>6</value>
+  </property>
+
+  <property>
+    <name>mapred.tasktracker.reduce.tasks.maximum</name>
+    <value>6</value>
+  </property>
+
+  <property>
+    <name>io.sort.mb</name>
+    <value>200</value>
+    <description>The total amount of buffer memory to use while sorting
+    files, in megabytes.  By default, gives each merge stream 1MB, which
+    should minimize seeks.</description>
+  </property>
+
+</configuration>

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/haproxy+apache/haproxy.config.template
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/haproxy%2Bapache/haproxy.config.template?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/haproxy+apache/haproxy.config.template (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/haproxy+apache/haproxy.config.template Sun Jun  5 08:54:50 2011
@@ -0,0 +1,8 @@
+listen webfarm {{active_master}}:80
+       timeout server 7500
+       timeout client 7500
+       timeout connect 7500
+       mode http
+       balance roundrobin
+       option httpchk HEAD /index.html HTTP/1.0
+       stats uri /stats

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/hypertable/Capfile
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/hypertable/Capfile?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/hypertable/Capfile (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/hypertable/Capfile Sun Jun  5 08:54:50 2011
@@ -0,0 +1,417 @@
+set :source_machine, "{{active_master}}"
+set :install_dir,  "/opt/hypertable"
+set :hypertable_version, "0.9.4.2"
+set :default_dfs, "hadoop"
+set :default_config, "/root/mesos-ec2/hypertable/hypertable.cfg"
+set :default_additional_args, ""
+set :hbase_home, "/opt/hbase/current"
+set :default_client_multiplier, 1
+set :default_test_driver, "hypertable"
+set :default_test_args, ""
+
+role :source, "{{active_master}}"
+role :master, "{{active_master}}"
+role :hyperspace, "{{active_master}}"
+open("/root/mesos-ec2/slaves").each do |slave|
+  role :slave, slave
+end
+role :thriftbroker
+role :spare
+role :localhost, "{{active_master}}"
+role :test_client
+role :test_dispatcher
+
+######################### END OF USER CONFIGURATION ############################
+
+def install_machines
+  (roles[:master].servers | \
+   roles[:hyperspace].servers | \
+   roles[:slave].servers | \
+   roles[:thriftbroker].servers | \
+   roles[:spare].servers | \
+   roles[:test_client].servers | \
+   roles[:test_dispatcher].servers) - roles[:source].servers
+end
+
+role(:install) { install_machines }
+
+set(:dfs) do
+  "#{default_dfs}"
+end unless exists?(:dfs)
+
+set(:config) do
+  "#{default_config}"
+end unless exists?(:config)
+
+set(:additional_args) do
+  "#{default_additional_args}"
+end unless exists?(:additional_args)
+
+set(:test_driver) do
+  "#{default_test_driver}"
+end unless exists?(:test_driver)
+
+set(:test_args) do
+  "#{default_test_args}"
+end unless exists?(:test_args)
+
+set(:client_multiplier) do
+  "#{default_client_multiplier}".to_i
+end unless exists?(:client_multiplier)
+
+set :config_file, "#{config}".split('/')[-1]
+set :config_option, \
+    "--config=#{install_dir}/#{hypertable_version}/conf/#{config_file}"
+
+ desc <<-DESC
+    Copies config file to installation on localhost.
+    This task runs on localhost and copies the config file specified \
+    by the variable 'config' (default=#{config}) \
+    to the installation directory specified by the variable 'install_dir' \
+    (default=#{install_dir})
+ DESC
+task :copy_config, :roles => :localhost do
+  run("rsync #{config} #{install_dir}/#{hypertable_version}/conf/")
+end
+
+ desc <<-DESC
+    rsyncs installation directory to cluster.  For each machine in the \
+    cluster, his commannd  rsyncs the installation from the source \
+    installation machine specified by the variable 'source_machine' \
+    (default=#{source_machine})
+ DESC
+task :rsync, :roles => :install do
+  run <<-CMD
+     rsync -av --exclude=log --exclude=run --exclude=demo --exclude=fs --exclude=hyperspace/ #{source_machine}:#{install_dir}/#{hypertable_version} #{install_dir} &&
+     rsync -av --exclude=log --exclude=run --exclude=demo --exclude=fs --exclude=hyperspace/ #{source_machine}:#{install_dir}/#{hypertable_version}/conf/ #{install_dir}/#{hypertable_version}/conf
+  CMD
+end
+
+ desc <<-DESC
+    sets up the symbolic link 'current' in the installation area \
+    to point to the directory of the current version
+    (default=#{hypertable_version})
+ DESC
+task :set_current, :roles => [:install, :source] do
+  run <<-CMD
+   cd #{install_dir} &&
+   rm -f current &&
+   ln -s #{hypertable_version} current
+  CMD
+end
+
+ desc <<-DESC
+    Distributes installation.  This task copiles the config file and \
+    then rsyncs the installation to each machine in the cluster
+ DESC
+task :dist do
+  transaction do
+    copy_config
+    rsync
+  end
+end
+
+ desc <<-DESC
+    Distributes and fhsizes the installation, then copies
+    config and rsyncs
+ DESC
+task :fhsize do
+  transaction do
+    rsync
+    fhsize_install
+    copy_config
+    rsync
+  end
+end
+
+
+ desc <<-DESC
+    fhsize's the installations
+ DESC
+task :fhsize_install, :roles => [:install, :source] do
+  run <<-CMD
+     #{install_dir}/#{hypertable_version}/bin/fhsize.sh
+  CMD
+end
+
+desc "Verify that upgrade is OK."
+task :qualify_upgrade, :roles => :source do
+  run <<-CMD
+     #{install_dir}/#{hypertable_version}/bin/upgrade-ok.sh #{install_dir}/current #{hypertable_version}
+  CMD
+end
+
+ desc <<-DESC
+    Upgrades installation.  Stops servers, copies config, rsyncs
+    the installation, then copies hyperspace and the rangeserver
+    state in the run/ directory to new installation
+ DESC
+task :upgrade do
+  transaction do
+    qualify_upgrade
+    stop
+    copy_config
+    rsync
+    upgrade_hyperspace
+    upgrade_rangeservers
+    set_current
+  end
+end
+
+ desc <<-DESC
+    Upgrades (copies) the Hyperspace database from the current
+    installation to the new installation specified by the
+    hypertable_version (#{hypertable_version})
+ DESC
+task :upgrade_hyperspace, :roles => :hyperspace do
+  run <<-CMD
+    cp -dpR #{install_dir}/current/hyperspace \
+       #{install_dir}/#{hypertable_version}
+  CMD
+end
+
+ desc <<-DESC
+    Upgrades (copies) the RangeServers by copying the contents
+    of the run directory from the current installation to
+    installation specified by the hypertable_version
+    (#{hypertable_version})
+ DESC
+task :upgrade_rangeservers, :roles => :slave do
+  run <<-CMD
+    cp -dpR #{install_dir}/current/run \
+       #{install_dir}/#{hypertable_version}
+  CMD
+end
+
+desc "Starts all processes."
+task :start do
+  transaction do
+    start_hyperspace
+    start_master
+    start_slaves
+  end
+end
+
+desc "Starts hyperspace processes."
+task :start_hyperspace, :roles => :hyperspace do
+  run <<-CMD
+   #{install_dir}/current/bin/start-hyperspace.sh \
+      #{config_option}
+  CMD
+end
+
+desc "Starts master processes."
+task :start_master, :roles => :master do
+  run <<-CMD
+   #{install_dir}/current/bin/start-dfsbroker.sh #{dfs} \
+      #{config_option} &&
+   #{install_dir}/current/bin/start-master.sh #{config_option} &&
+   #{install_dir}/current/bin/start-monitoring.sh
+  CMD
+end
+
+desc "Starts slave processes."
+task :start_slaves, :roles => :slave do
+  run <<-CMD
+   #{install_dir}/current/bin/random-wait.sh 5 &&
+   #{install_dir}/current/bin/start-dfsbroker.sh #{dfs} \
+      #{config_option} &&
+   #{install_dir}/current/bin/start-rangeserver.sh \
+      #{config_option} &&
+   #{install_dir}/current/bin/start-thriftbroker.sh \
+      #{config_option}
+  CMD
+end
+
+desc "Starts ThriftBroker processes."
+task :start_thriftbrokers, :roles => :thriftbroker do
+  run <<-CMD
+   #{install_dir}/current/bin/random-wait.sh 5 &&
+   #{install_dir}/current/bin/start-dfsbroker.sh #{dfs} \
+      #{config_option} &&
+   #{install_dir}/current/bin/start-thriftbroker.sh \
+      #{config_option}
+  CMD
+end
+
+
+desc "Starts DFS brokers."
+task :start_dfsbrokers, :roles => [:master, :slave] do
+  run "#{install_dir}/current/bin/start-dfsbroker.sh #{dfs} \
+      #{config_option}"
+end
+
+desc "Stops all servers."
+task :stop do
+  transaction do
+    stop_slaves
+    stop_master
+    stop_hyperspace
+  end
+end
+
+desc "Stops slave processes."
+task :stop_slaves, :roles => :slave do
+  run <<-CMD
+  #{install_dir}/current/bin/stop-servers.sh --no-hyperspace --no-master #{additional_args}
+  CMD
+end
+
+desc "Stops master processes."
+task :stop_master, :roles => :master do
+  run <<-CMD
+  #{install_dir}/current/bin/stop-servers.sh --no-hyperspace --no-rangeserver #{additional_args} &&
+  #{install_dir}/current/bin/stop-monitoring.sh
+  CMD
+end
+
+desc "Stops hyperspace processes."
+task :stop_hyperspace, :roles => :hyperspace do
+  run <<-CMD 
+  #{install_dir}/current/bin/stop-hyperspace.sh
+  CMD
+end
+
+desc "Stops ThriftBroker processes."
+task :stop_thriftbrokers, :roles => :thriftbroker do
+  run <<-CMD 
+  #{install_dir}/current/bin/stop-servers.sh --no-hyperspace --no-master --no-rangeserver
+  CMD
+end
+
+desc "Cleans hyperspace & rangeservers, removing all tables."
+task :cleandb do
+  transaction do
+    clean_master
+    clean_hyperspace
+    clean_slaves
+  end  
+end
+
+desc "Cleans master state but not hyperspace."
+task :clean_master, :roles => :master do
+  run <<-CMD
+   #{install_dir}/current/bin/start-dfsbroker.sh #{dfs} \
+      #{config_option} && \
+   #{install_dir}/current/bin/clean-database.sh #{config_option} ;
+  CMD
+end
+
+desc "Cleans hyperspace."
+task :clean_hyperspace, :roles => :hyperspace do
+  run <<-CMD
+   #{install_dir}/current/bin/clean-hyperspace.sh
+  CMD
+end
+
+desc "Cleans rangeservers and master state but not hyperspace."
+task :clean_slaves, :roles => :slave do
+  run <<-CMD
+   #{install_dir}/current/bin/stop-servers.sh --no-hyperspace --no-master &&
+   rm -rf #{install_dir}/current/run/*
+  CMD
+end
+
+desc "Reports status for all processes."
+task :status do
+  transaction do
+    dfs_status
+    master_status
+    hyperspace_status
+    rangeserver_status
+  end
+end
+
+desc "Get status for dfs processes."
+task :dfs_status, :roles => [:master, :slave] do
+  run <<-CMD
+   #{install_dir}/current/bin/ht serverup dfsbroker
+  CMD
+end
+
+desc "Get status for Hypertable.Master process."
+task :master_status, :roles => [:master] do
+  run <<-CMD
+   #{install_dir}/current/bin/ht serverup master
+  CMD
+end
+
+desc "Get status for Hyperspace.Master process."
+task :hyperspace_status, :roles => [:hyperspace] do
+  run <<-CMD
+   #{install_dir}/current/bin/ht serverup hyperspace
+  CMD
+end
+
+desc "Get status for rangeserver processes."
+task :rangeserver_status, :roles => [:slave] do
+  run <<-CMD
+   #{install_dir}/current/bin/ht serverup rangeserver
+  CMD
+end
+
+set :default_dumpfile, "/tmp/rsdump.txt"
+
+set(:dumpfile) do
+  "#{default_dumpfile}"
+end unless exists?(:dumpfile)
+
+desc "Run dump command on each rangeserver"
+task :rangeserver_dump, :roles => [:slave] do
+  run <<-CMD
+   echo "dump NOKEYS '#{dumpfile}';" | #{install_dir}/current/bin/ht ht_rsclient --batch #{config_option}
+  CMD
+end
+
+
+if "#{test_driver}" == "hypertable"
+  set :thrift_broker_command, "#{install_dir}/current/bin/start-thriftbroker.sh #{config_option}"
+  set :start_test_client_command, "#{install_dir}/current/bin/start-test-client.sh --count #{client_multiplier} #{roles[:test_dispatcher].servers[0]}"
+  set :run_test_dispatcher_command, "#{install_dir}/current/bin/jrun --pidfile #{install_dir}/#{hypertable_version}/run/Hypertable.TestDispatcher.pid org.hypertable.examples.PerformanceTest.Dispatcher --driver=#{test_driver} --clients=#{roles[:test_client].servers.length*client_multiplier} #{test_args}"
+  set :stop_test_args, ""
+elsif "#{test_driver}" == "hbase"
+  set :thrift_broker_command, "echo -n"
+  set :start_test_client_command, "#{install_dir}/current/bin/start-test-client.sh --jrun-opts \"--add-to-classpath #{hbase_home}/conf\" --count #{client_multiplier} #{roles[:test_dispatcher].servers[0]}"
+  set :run_test_dispatcher_command, "#{install_dir}/current/bin/jrun --pidfile #{install_dir}/#{hypertable_version}/run/Hypertable.TestDispatcher.pid --add-to-classpath #{hbase_home}/conf org.hypertable.examples.PerformanceTest.Dispatcher --driver=#{test_driver} --clients=#{roles[:test_client].servers.length*client_multiplier} #{test_args}"
+  set :stop_test_args, "--no-thriftbroker --no-dfsbroker"
+else
+  set :thrift_broker_command, "echo Invalid test driver - #{test_driver}"
+  set :start_test_client_command, "echo Invalid test driver - #{test_driver}"
+  set :run_test_dispatcher_command, "echo Invalid test driver - #{test_driver}"
+  set :stop_test_args, "--no-thriftbroker --no-dfsbroker"
+end
+
+desc "Starts test clients."
+task :start_test_clients, :roles => :test_client do
+  run <<-CMD
+   #{install_dir}/current/bin/random-wait.sh 5 &&
+   #{thrift_broker_command} &&
+   #{start_test_client_command}
+  CMD
+end
+
+desc "Run test dispatcher."
+task :run_test_dispatcher, :roles => :test_dispatcher do
+  run <<-CMD
+   #{thrift_broker_command} &&
+   #{run_test_dispatcher_command}
+  CMD
+end
+
+desc "Stops test."
+task :stop_test, :roles => [:test_client, :test_dispatcher] do
+  run <<-CMD
+  #{install_dir}/current/bin/stop-servers.sh --no-hyperspace --no-master --no-rangeserver #{stop_test_args}
+  CMD
+end
+
+desc "Run test"
+task :run_test do
+  transaction do
+    stop_test
+    start_test_clients
+    run_test_dispatcher
+  end
+end
+
+

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/hypertable/hypertable.cfg
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/hypertable/hypertable.cfg?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/hypertable/hypertable.cfg (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/hypertable/hypertable.cfg Sun Jun  5 08:54:50 2011
@@ -0,0 +1,43 @@
+#
+# hypertable.cfg
+#
+
+# HDFS Broker
+HdfsBroker.Port=38030
+HdfsBroker.fs.default.name=hdfs://{{active_master}}:9010
+HdfsBroker.Workers=20
+
+# Ceph Broker
+CephBroker.Port=38030
+CephBroker.Workers=20
+CephBroker.MonAddr=10.0.1.245:6789
+
+# Local Broker
+DfsBroker.Local.Port=38030
+DfsBroker.Local.Root=fs/local
+
+# DFS Broker - for clients
+DfsBroker.Host=localhost
+DfsBroker.Port=38030
+
+# Hyperspace
+Hyperspace.Replica.Host={{active_master}}
+Hyperspace.Replica.Port=38040
+Hyperspace.Replica.Dir=hyperspace
+Hyperspace.Replica.Workers=20
+
+# Hypertable.Master
+Hypertable.Master.Host={{active_master}}
+Hypertable.Master.Port=38050
+Hypertable.Master.Workers=20
+
+
+# Hypertable.RangeServer
+Hypertable.RangeServer.Port=38060
+
+Hyperspace.KeepAlive.Interval=30000
+Hyperspace.Lease.Interval=1000000
+Hyperspace.GracePeriod=200000
+
+# ThriftBroker
+ThriftBroker.Port=38080

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/masters
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/masters?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/masters (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/masters Sun Jun  5 08:54:50 2011
@@ -0,0 +1 @@
+{{master_list}}

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/mesos-daemon
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/mesos-daemon?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/mesos-daemon (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/mesos-daemon Sun Jun  5 08:54:50 2011
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+# Set up MESOS_HOME in order to find projd
+export MESOS_HOME=/root/mesos
+
+# Set MESOS_PUBLIC_DNS so slaves can be linked in master web UI
+export MESOS_PUBLIC_DNS=`wget -q -O - http://instance-data.ec2.internal/latest/meta-data/public-hostname`
+
+# Set PATH to include Scala
+export PATH=$PATH:/root/scala-2.8.0.final/bin
+
+# Set HADOOP_HOME variable to allow slaves to get executors from HDFS
+export HADOOP_HOME=/root/hadoop-0.20.2
+
+ulimit -n 8192
+
+PROGRAM=$1
+shift
+
+EXTRA_OPTS=""
+if [ "$PROGRAM" == "mesos-slave" ]; then
+  # Compute CPU resources (if not specified).
+  if [[ "$*" != *--cpus* ]]; then
+    CPUS=`grep processor /proc/cpuinfo | wc -l`
+    EXTRA_OPTS="$EXTRA_OPTS --cpus=$CPUS"
+  fi
+
+  # Compute memory resources (if not specified).
+  if [[ "$*" != *--mem* ]]; then
+    MEM_KB=`cat /proc/meminfo | grep MemTotal | awk '{print $2}'`
+    MEM=$[(MEM_KB - 1024 * 1024) / 1024]
+    EXTRA_OPTS="$EXTRA_OPTS --mem=$MEM"
+  fi
+fi
+
+cd $MESOS_HOME/bin
+nohup ./$PROGRAM --log_dir=/mnt/mesos-logs --work_dir=/mnt/mesos-work $EXTRA_OPTS $@ </dev/null >/mnt/mesos-logs/$PROGRAM.out 2>&1 &

Propchange: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/mesos-daemon
------------------------------------------------------------------------------
    svn:executable = *

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/redeploy-mesos
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/redeploy-mesos?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/redeploy-mesos (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/redeploy-mesos Sun Jun  5 08:54:50 2011
@@ -0,0 +1,24 @@
+#!/bin/bash
+cd /root/mesos-ec2
+
+MASTERS=`cat masters`
+NUM_MASTERS=`cat masters | wc -l`
+SLAVES=`cat slaves`
+
+SSH_OPTS="-o StrictHostKeyChecking=no -o ConnectTimeout=5"
+
+if [[ $NUM_MASTERS -gt 1 ]]; then
+  echo "RSYNC'ing /root/mesos to masters..."
+  for master in $MASTERS; do
+    echo $master
+    rsync -e "ssh $SSH_OPTS" -az --exclude '*.d' --exclude '*.o' --exclude '*.cpp' --exclude '*.hpp' --exclude '*.pyc' --exclude 'mesos/frameworks/hadoop-0.20.0/logs/*' --exclude 'mesos/work' --exclude 'mesos/logs' --exclude 'mesos/test_output' /root/mesos $master:/root & sleep 0.3
+  done
+  wait
+fi
+
+echo "RSYNC'ing /root/mesos to slaves..."
+for slave in $SLAVES; do
+  echo $slave
+  rsync -e "ssh $SSH_OPTS" -az --exclude '*.d' --exclude '*.o' --exclude '*.cpp' --exclude '*.hpp' --exclude '*.pyc' --exclude 'mesos/frameworks/hadoop-0.20.0/logs/*' --exclude 'mesos/work' --exclude 'mesos/logs' --exclude 'mesos/test_output' /root/mesos $slave:/root & sleep 0.3
+done
+wait

Propchange: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/redeploy-mesos
------------------------------------------------------------------------------
    svn:executable = *

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/setup
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/setup?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/setup (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/setup Sun Jun  5 08:54:50 2011
@@ -0,0 +1,262 @@
+#!/bin/bash
+
+# Make sure we are in the mesos-ec2 directory
+cd /root/mesos-ec2
+
+# Set hostname based on EC2 private DNS name, so that it is set correctly
+# even if the instance is restarted with a different private DNS name
+PRIVATE_DNS=`wget -q -O - http://instance-data.ec2.internal/latest/meta-data/local-hostname`
+hostname $PRIVATE_DNS
+echo $PRIVATE_DNS > /etc/hostname
+export HOSTNAME=$PRIVATE_DNS  # Fix the bash built-in hostname variable too
+
+echo "Setting up Mesos master on `hostname`..."
+
+# Read command-line arguments
+OS_NAME=$1
+DOWNLOAD_METHOD=$2
+BRANCH=$3
+
+MASTERS_FILE="masters"
+MASTERS=`cat $MASTERS_FILE`
+NUM_MASTERS=`cat $MASTERS_FILE | wc -l`
+SLAVES=`cat slaves`
+ZOOS=`cat zoo`
+
+if [[ $ZOOS = *NONE* ]]; then
+  NUM_ZOOS=0
+  ZOOS=""
+else
+  NUM_ZOOS=`cat zoo | wc -l`
+fi
+
+# Scripts that get used for/while running Mesos.
+SCRIPTS="approve-master-key
+         copy-dir
+         mesos-daemon
+         redeploy-mesos
+         setup-slave              
+         ssh-no-keychecking
+         start-hypertable
+         start-mesos
+         stop-hypertable
+         stop-mesos"
+
+EPHEMERAL_HDFS=/root/ephemeral-hdfs
+PERSISTENT_HDFS=/root/persistent-hdfs
+
+#TODO(*): update config scripts to have conditionals for handling different
+#         platforms
+JAVA_HOME=/usr/java/default
+
+SSH_OPTS="-o StrictHostKeyChecking=no -o ConnectTimeout=5"
+
+if [[ `tty` == "not a tty" ]] ; then
+    echo "Expecting a tty or pty! (use the ssh -t option)."
+    exit 1
+fi
+
+echo "Setting executable permissions on scripts..."
+for s in $SCRIPTS; do chmod u+x $s; done
+
+echo "Running setup-slave on master to mount filesystems, etc..."
+./setup-slave
+
+echo "SSH'ing to master machine(s) to approve key(s)..."
+for master in $MASTERS; do
+  echo $master
+  ssh $SSH_OPTS $master echo -n &
+  sleep 0.3
+done
+ssh $SSH_OPTS localhost echo -n &
+ssh $SSH_OPTS `hostname` echo -n &
+wait
+
+if [[ $NUM_ZOOS != 0 ]] ; then
+  echo "SSH'ing to ZooKeeper server(s) to approve keys..."
+  zid=1
+  for zoo in $ZOO; do
+    echo $zoo
+    ssh $SSH_OPTS $zoo echo -n \; mkdir -p /tmp/zookeeper \; echo $zid \> /tmp/zookeeper/myid &
+    zid=$(($zid+1))
+    sleep 0.3
+  done
+fi
+
+echo "SSH'ing to slaves to approve keys..."
+for slave in $SLAVES; do
+  echo $slave
+  ssh $SSH_OPTS $slave echo -n &
+  sleep 0.3
+done
+
+echo "Waiting for ssh commands to finish..."
+wait
+
+if [[ $NUM_MASTERS -gt 1 ]] ; then
+  echo "RSYNC'ing /root/mesos-ec2 to other master servers..."
+  for master in `cat $MASTERS_FILE | sed '1d'`; do
+      echo $master
+      rsync -e "ssh $SSH_OPTS" -az /root/mesos-ec2 $master:/root & sleep 0.3
+  done
+  wait
+fi
+
+if [[ $NUM_ZOOS != 0 ]] ; then
+  echo "RSYNC'ing /root/mesos-ec2 to ZooKeeper servers..."
+  for zoo in $ZOOS; do
+      echo $zoo
+      rsync -e "ssh $SSH_OPTS" -az /root/mesos-ec2 $zoo:/root & sleep 0.3
+  done
+  wait
+fi
+
+echo "RSYNC'ing /root/mesos-ec2 to slaves..."
+for slave in $SLAVES; do
+  echo $slave
+  rsync -e "ssh $SSH_OPTS" -az /root/mesos-ec2 $slave:/root &
+  scp $SSH_OPTS ~/.ssh/id_rsa $slave:.ssh &
+  sleep 0.3
+done
+wait
+
+echo "Running slave setup script on slave and zookeeper nodes..."
+for node in $SLAVES $ZOO; do
+  echo $node
+  ssh -t $SSH_OPTS root@$node "mesos-ec2/setup-slave" & sleep 0.3
+done
+wait
+
+if [[ $NUM_MASTERS -gt 1 ]] ; then
+  echo "Running slave setup script on other masters..."
+  for master in `cat $MASTERS_FILE | sed '1d'`; do
+    echo $master
+    rsync -e "ssh $SSH_OPTS" mesos-ec2/setup-slave & sleep 0.3
+  done
+  wait
+  echo "RSYNC'ing HDFS config files to other masters..."
+  for master in `cat $MASTERS_FILE | sed '1d'`; do
+    echo $master
+    rsync -e "ssh $SSH_OPTS" -az $EPHEMERAL_HDFS/conf $master:$EPHEMERAL_HDFS &
+    rsync -e "ssh $SSH_OPTS" -az $PERSISTENT_HDFS/conf $master:$PERSISTENT_HDFS &
+    sleep 0.3
+  done
+  wait
+fi
+
+echo "RSYNC'ing HDFS config files to slaves..."
+for slave in $SLAVES; do
+  echo $slave
+  rsync -e "ssh $SSH_OPTS" -az $EPHEMERAL_HDFS/conf $slave:$EPHEMERAL_HDFS &
+  rsync -e "ssh $SSH_OPTS" -az $PERSISTENT_HDFS/conf $slave:$PERSISTENT_HDFS &
+  sleep 0.3
+done
+wait
+
+DOWNLOADED=0
+
+if [[ "$DOWNLOAD_METHOD" == "git" ]] ; then
+  # change git's ssh command so it does not ask to accept a keys
+  export GIT_SSH=/root/mesos-ec2/ssh-no-keychecking
+  REPOSITORY=git://github.com/mesos/mesos.git
+  echo "Checking out Mesos from $REPOSITORY"
+  pushd /root > /dev/null 2>&1
+  rm -rf mesos mesos.tgz
+  # Set git SSH command to a script that uses -o StrictHostKeyChecking=no
+  git clone $REPOSITORY mesos
+  pushd mesos 2>&1
+  git checkout -b $BRANCH --track origin/$BRANCH
+  popd > /dev/null 2>&1
+  popd > /dev/null 2>&1
+  DOWNLOADED=1
+fi
+
+# Build Mesos if we downloaded it
+if [[ "$DOWNLOADED" == "1" ]] ; then
+  echo "Building Mesos..."
+  pushd /root/mesos > /dev/null 2>&1
+  ./configure.template.ubuntu-lucid-64
+  make clean
+  make
+  popd > /dev/null 2>&1
+  if [ -d /root/spark ] ; then
+    echo "Building Spark..."
+    pushd /root/spark > /dev/null 2>&1
+    MESOS_HOME=/root/mesos make all native
+    popd > /dev/null 2>&1
+  fi
+  echo "Building Hadoop framework..."
+  pushd /root/mesos/frameworks/hadoop-0.20.2 > /dev/null 2>&1
+  ant
+  ant examples
+  popd > /dev/null 2>&1
+fi
+
+echo "Setting up Hadoop framework config files..."
+cp hadoop-framework-conf/* /root/mesos/frameworks/hadoop-0.20.2/conf
+
+echo "Setting up haproxy+apache framework config files..."
+cp haproxy+apache/* /root/mesos/frameworks/haproxy+apache
+
+echo "Setting up Spark config files..."
+# TODO: This currently overwrites whatever the user wrote there; on
+# the other hand, we also don't want to leave an old file created by
+# us because it would have the wrong hostname for HDFS etc
+mkdir -p /root/spark/conf
+echo "-Dspark.dfs=hdfs://$HOSTNAME:9000" \
+     > /root/spark/conf/java-opts
+
+echo "Redeploying /root/mesos..."
+./redeploy-mesos
+
+echo "Setting up NFS..."
+if [ ! -e /nfs ] ; then
+  mkdir -p /mnt/nfs
+  rm -fr /nfs
+  ln -s /mnt/nfs /nfs
+fi
+if ! grep -e '^/nfs ' /etc/exports; then
+  echo "/nfs    10.0.0.0/8(ro,async,no_subtree_check)" >> /etc/exports
+fi
+/sbin/service portmap start
+/sbin/service nfs start
+# Unexport and re-export everything in /etc/exports because, if we are
+# restarting a stopped EC2 instance, we might have had an entry for /nfs in
+# /etc/exports before we created /mnt/nfs.
+exportfs -ua
+exportfs -a
+
+echo "Mounting NFS on slaves..."
+for slave in $SLAVES; do
+  echo $slave
+  ssh -t $SSH_OPTS root@$slave "mkdir -p /nfs; service portmap start; service nfs start; mount $HOSTNAME:/nfs /nfs" & sleep 0.3
+done
+wait
+
+echo "Formatting ephemeral HDFS namenode..."
+$EPHEMERAL_HDFS/bin/hadoop namenode -format
+
+echo "Starting ephemeral HDFS..."
+$EPHEMERAL_HDFS/bin/start-dfs.sh
+
+if [[ ! -e /vol/persistent-hdfs/dfs/name ]] ; then
+  echo "Formatting persistent HDFS namenode..."
+  $PERSISTENT_HDFS/bin/hadoop namenode -format
+fi
+
+echo "Starting persistent HDFS..."
+$PERSISTENT_HDFS/bin/start-dfs.sh
+
+sleep 1
+
+if [[ $NUM_ZOOS != 0 ]]; then
+  echo "Starting ZooKeeper quorum..."
+  for zoo in $ZOOS; do
+    ssh $SSH_OPTS $zoo "/root/mesos/third_party/zookeeper-*/bin/zkServer.sh start </dev/null >/dev/null" & sleep 0.1
+  done
+  wait
+  sleep 2
+fi
+
+echo "Starting Mesos cluster..."
+./start-mesos

Propchange: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/setup
------------------------------------------------------------------------------
    svn:executable = *

Copied: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/setup-slave (from r1132175, incubator/mesos/trunk/ec2/deploy.lucid64/root/mesos-ec2/setup-slave)
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/setup-slave?p2=incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/setup-slave&p1=incubator/mesos/trunk/ec2/deploy.lucid64/root/mesos-ec2/setup-slave&r1=1132175&r2=1132176&rev=1132176&view=diff
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.lucid64/root/mesos-ec2/setup-slave (original)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/setup-slave Sun Jun  5 08:54:50 2011
@@ -21,13 +21,14 @@ XFS_MOUNT_OPTS="defaults,noatime,nodirat
 # (for example /mnt, /mnt2, and so on)
 function create_hadoop_dirs {
   location=$1
-  mkdir -p $location/hdfs/dfs $location/hadoop/tmp
+  mkdir -p $location/ephemeral-hdfs/data $location/hadoop/tmp
   mkdir -p $location/hadoop/mrlocal $location/hadoop/mrlocal2
 }
 
 # Set up Hadoop and Mesos directories in /mnt
 create_hadoop_dirs /mnt
-mkdir -p /mnt/hdfs-logs
+mkdir -p /mnt/ephemeral-hdfs/logs
+mkdir -p /mnt/persistent-hdfs/logs
 mkdir -p /mnt/hadoop-logs
 mkdir -p /mnt/mesos-logs
 mkdir -p /mnt/mesos-work
@@ -74,3 +75,6 @@ fi
 # Remove ~/.ssh/known_hosts because it gets polluted as you start/stop many
 # clusters (new machines tend to come up under old hostnames)
 rm -f /root/.ssh/known_hosts
+
+# SSH to the master to approve its key (needed for Capistrano to work)
+./approve-master-key

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/setup-torque
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/setup-torque?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/setup-torque (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/setup-torque Sun Jun  5 08:54:50 2011
@@ -0,0 +1,110 @@
+#!/bin/bash
+
+cd /root/mesos-ec2
+
+MASTERS=`cat master`
+SLAVES_FILE="slaves"
+SLAVES=`cat $SLAVES_FILE`
+
+SCHEDULER_ITERATION=5
+
+#These seem to be broken, i.e. missing directories after install
+#ssh $MASTERS "apt-get install -y torque-server"
+#ssh $MASTERS "apt-get install -y torque-scheduler"
+#ssh $MASTERS "apt-get install -y torque-client"
+
+#install torque: download/unzip torque
+function installtorque {
+	pushd ~
+	echo "downloading and installing torque on master"
+	#wget http://www.clusterresources.com/downloads/torque/torque-2.4.7.tar.gz
+	rm -rf torque-2.4.7.tar.gz
+	wget http://mesos.berkeley.edu/torque-2.4.7.tar.gz
+	tar xzf torque-2.4.7.tar.gz
+	pushd torque-2.4.7
+	./configure --prefix=/usr
+	make -j8
+	make install
+	popd;popd
+}
+
+function setuptorque {
+	pushd ~/torque-2.4.7
+	echo "running ldconfig on master"
+	ldconfig
+        #./torque.setup root # Note: sets some defaults for batch queue
+	qterm
+        yes|./torque.setup root localhost # Note: sets some defaults for batch queue
+
+	#WARNING: allow root to qsub for debug purposes only, may be dangerous
+	qmgr -c 'set server acl_roots+=root@*' #allow root to submit jobs
+	qmgr -c "set server scheduler_iteration=$SCHEDULER_ITERATION"
+	#qmgr -c 's s allow_node_submit=true' #other hosts can submit too
+
+	NUM_SLAVES=`cat ~/mesos-ec2/slaves|wc -l`
+	#the server be restarted after this
+	qmgr -c "set queue batch resources_available.nodect=$NUM_SLAVES"
+	#qmgr -c "set server resources_available.nodect=$NUM_SLAVES"
+	qterm
+        pbs_server
+
+	touch ~/.rhosts
+	echo `hostname` |cat >> ~/.rhosts
+	echo `hostname -f` |cat >> ~/.rhosts
+	echo localhost |cat >> ~/.rhosts
+
+	popd
+}
+
+
+function installslaves {
+	pushd ~/torque-2.4.7
+	echo "building packages for slave"
+	make packages
+	#install torque-mom on slave nodes
+	apt-get install -y dsh
+	
+        echo "copying slave install packages to nfs"
+	mkdir /nfs/torque
+	cp torque-package-mom-linux-x86_64.sh /nfs/torque/torque-package-mom-linux-x86_64.sh
+	cp torque-package-mom-linux-x86_64.sh /nfs/torque/torque-package-clients-linux-x86_64.sh
+
+	echo "installing torque mom and clients package on slaves"
+	for i in `cat $SLAVES_FILE`; do ssh $i /nfs/torque/torque-package-mom-linux-x86_64.sh --install; ldconfig; done
+	for i in `cat $SLAVES_FILE`; do ssh $i /nfs/torque/torque-package-clients-linux-x86_64.sh --install; ldconfig; done
+
+	echo "Running ldconfig on slaves"
+	dsh -f $SLAVES_FILE ldconfig
+	popd
+}
+
+function installmpi {
+        #setup mpich2 on all of the cluster nodes
+        ./setup-mpi
+
+        #setup prologue script
+        cp ./prologue.setup-mpi-master /var/spool/torque/mom_priv/prologue
+        cp ./epilogue.kill-mpi-ring /var/spool/torque/mom_priv/epilogue
+        
+	for i in `cat $SLAVES_FILE`; do scp ./prologue.setup-mpi-master $i:/var/spool/torque/mom_priv/prologue; done
+	for i in `cat $SLAVES_FILE`; do scp ./epilogue.kill-mpi-ring $i:/var/spool/torque/mom_priv/epilogue; done
+}
+
+function installmaui {
+	pushd ~
+	#http://www.clusterresources.com/download/maui/maui-3.3.tar.gz
+	rm -rf mesos-maui-3.3.tar
+	wget http://mesos.berkeley.edu/mesos-maui-3.3.tar
+	tar -xf mesos-maui-3.3.tar
+	pushd maui-3.3
+	./configure
+	make
+	make install
+	/usr/local/maui/sbin/maui
+}
+
+installtorque
+setuptorque
+installslaves
+installmpi
+installmaui

Propchange: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/setup-torque
------------------------------------------------------------------------------
    svn:executable = *

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/slaves
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/slaves?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/slaves (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/slaves Sun Jun  5 08:54:50 2011
@@ -0,0 +1 @@
+{{slave_list}}

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/ssh-no-keychecking
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/ssh-no-keychecking?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/ssh-no-keychecking (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/ssh-no-keychecking Sun Jun  5 08:54:50 2011
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+# Utility script that exec's SSH without key checking so that we can check
+# out code from GitHub without prompting the user.
+
+exec ssh -o StrictHostKeyChecking=no $@

Propchange: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/ssh-no-keychecking
------------------------------------------------------------------------------
    svn:executable = *

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/start-hypertable
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/start-hypertable?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/start-hypertable (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/start-hypertable Sun Jun  5 08:54:50 2011
@@ -0,0 +1,4 @@
+#!/bin/bash
+
+cd /root/mesos-ec2/hypertable
+cap dist && cap start

Propchange: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/start-hypertable
------------------------------------------------------------------------------
    svn:executable = *

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/start-mesos
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/start-mesos?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/start-mesos (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/start-mesos Sun Jun  5 08:54:50 2011
@@ -0,0 +1,54 @@
+#!/bin/bash
+cd /root/mesos-ec2
+
+MASTERS=`cat masters`
+ACTIVE_MASTER=`cat masters | head -1`
+SLAVES=`cat slaves`
+ZOOS=`cat zoo`
+
+
+if [[ $ZOOS = *NONE* ]]; then
+  NUM_ZOOS=0
+else
+  NUM_ZOOS=`cat zoo | wc -l`
+fi
+
+SSH_OPTS="-o StrictHostKeyChecking=no -o ConnectTimeout=5"
+
+cluster_url=`cat cluster-url`
+
+echo "Running with cluster URL: "$cluster_url
+
+if [[ $NUM_ZOOS != 0 ]]; then
+  masterid=1
+  for master in $MASTERS; do
+    echo "Starting master $masterid on $master"
+    ssh $SSH_OPTS $master "/root/mesos-ec2/mesos-daemon mesos-master -p 5050 -u $cluster_url $@ </dev/null >/dev/null" & sleep 0.3
+    masterid=$(($masterid+1))
+  done
+  wait
+else
+  echo "Starting master on $ACTIVE_MASTER"
+  ssh $SSH_OPTS $ACTIVE_MASTER "/root/mesos-ec2/mesos-daemon mesos-master -p 5050 $@ </dev/null >/dev/null"
+fi
+
+sleep 2
+
+for slave in $SLAVES; do
+  echo "Starting slave on $slave"
+  ssh $SSH_OPTS $slave "/root/mesos-ec2/mesos-daemon mesos-slave -u ${cluster_url} </dev/null >/dev/null" &
+  sleep 0.3
+done
+wait
+
+if [[ $NUM_ZOOS != 0 ]]; then
+  echo "ZooKeeper is running at"
+  for zoo in $ZOOS; do
+    echo "      $zoo:2181"
+  done
+fi
+
+echo "Everything's started! You can view the master Web UI at"
+for master in $MASTERS; do
+  echo "      http://$master:8080"
+done

Propchange: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/start-mesos
------------------------------------------------------------------------------
    svn:executable = *

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/stop-hypertable
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/stop-hypertable?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/stop-hypertable (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/stop-hypertable Sun Jun  5 08:54:50 2011
@@ -0,0 +1,4 @@
+#!/bin/bash
+
+cd /root/mesos-ec2/hypertable
+cap stop

Propchange: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/stop-hypertable
------------------------------------------------------------------------------
    svn:executable = *

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/stop-mesos
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/stop-mesos?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/stop-mesos (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/stop-mesos Sun Jun  5 08:54:50 2011
@@ -0,0 +1,21 @@
+#!/bin/bash
+cd /root/mesos-ec2
+
+MASTERS=`cat masters`
+SLAVES=`cat slaves`
+
+SSH_OPTS="-o StrictHostKeyChecking=no -o ConnectTimeout=5"
+
+for slave in $SLAVES; do
+  echo "Stopping slave on $slave"
+  ssh $SSH_OPTS $slave pkill mesos-slave &
+  sleep 0.1
+done
+wait
+
+for master in $MASTERS; do
+  echo "Stopping master on $master"
+  ssh $SSH_OPTS $master pkill mesos-master &
+  sleep 0.1
+done
+wait

Propchange: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/stop-mesos
------------------------------------------------------------------------------
    svn:executable = *

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/zoo
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/zoo?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/zoo (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/mesos-ec2/zoo Sun Jun  5 08:54:50 2011
@@ -0,0 +1 @@
+{{zoo_list}}

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/core-site.xml
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/core-site.xml?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/core-site.xml (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/core-site.xml Sun Jun  5 08:54:50 2011
@@ -0,0 +1,23 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+  <property>
+    <name>hadoop.tmp.dir</name>
+    <value>/vol/persistent-hdfs</value>
+  </property>
+
+  <property>
+    <name>fs.default.name</name>
+    <value>hdfs://{{active_master}}:9010</value>
+  </property>
+
+  <property>
+    <name>io.file.buffer.size</name>
+    <value>65536</value>
+  </property>
+
+</configuration>

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/hadoop-env.sh
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/hadoop-env.sh?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/hadoop-env.sh (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/hadoop-env.sh Sun Jun  5 08:54:50 2011
@@ -0,0 +1,59 @@
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME=/usr/java/default
+
+# Extra Java CLASSPATH elements.  Optional.
+# export HADOOP_CLASSPATH=
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE=1000
+
+# Extra Java runtime options.  Empty by default.
+# export HADOOP_OPTS=-server
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS"
+export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS"
+export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS"
+export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS"
+export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS"
+# export HADOOP_TASKTRACKER_OPTS=
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+# export HADOOP_CLIENT_OPTS
+
+# Extra ssh options.  Empty by default.
+# export HADOOP_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HADOOP_CONF_DIR"
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+# export HADOOP_LOG_DIR=${HADOOP_HOME}/logs
+export HADOOP_LOG_DIR=/mnt/persistent-hdfs/logs
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR=/var/hadoop/persistent-hdfs/pids
+
+# A string representing this instance of hadoop. $USER by default.
+# export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+# export HADOOP_NICENESS=10
+
+ulimit -n 16000

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/hdfs-site.xml
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/hdfs-site.xml?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/hdfs-site.xml (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/hdfs-site.xml Sun Jun  5 08:54:50 2011
@@ -0,0 +1,71 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<configuration>
+
+  <property>
+    <name>dfs.replication</name>
+    <value>2</value>
+  </property>
+
+  <property>
+    <name>dfs.block.size</name>
+    <value>134217728</value>
+  </property>
+
+  <property>
+    <name>dfs.secondary.http.address</name>
+    <value>0.0.0.0:60090</value>
+    <description>
+      The secondary namenode http server address and port.
+      If the port is 0 then the server will start on a free port.
+    </description>
+  </property>
+  
+  <property>
+    <name>dfs.datanode.address</name>
+    <value>0.0.0.0:60010</value>
+    <description>
+      The address where the datanode server will listen to.
+      If the port is 0 then the server will start on a free port.
+    </description>
+  </property>
+  
+  <property>
+    <name>dfs.datanode.http.address</name>
+    <value>0.0.0.0:60075</value>
+    <description>
+      The datanode http server address and port.
+      If the port is 0 then the server will start on a free port.
+    </description>
+  </property>
+  
+  <property>
+    <name>dfs.datanode.ipc.address</name>
+    <value>0.0.0.0:60020</value>
+    <description>
+      The datanode ipc server address and port.
+      If the port is 0 then the server will start on a free port.
+    </description>
+  </property>
+  
+  <property>
+    <name>dfs.http.address</name>
+    <value>0.0.0.0:60070</value>
+    <description>
+      The address and the base port where the dfs namenode web ui will listen on.
+      If the port is 0 then the server will start on a free port.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.handler.count</name>
+    <value>25</value>
+  </property>
+
+  <property>
+    <name>dfs.datanode.handler.count</name>
+    <value>8</value>
+  </property>
+
+</configuration>

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/mapred-site.xml
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/mapred-site.xml?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/mapred-site.xml (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/mapred-site.xml Sun Jun  5 08:54:50 2011
@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+  <property>
+    <name>mapred.job.tracker</name>
+    <value>{{active_master}}:9001</value>
+  </property>
+
+  <property>
+    <name>mapred.tasktracker.map.tasks.maximum</name>
+    <value>4</value>
+    <description>The maximum number of map tasks that will be run
+    simultaneously by a task tracker.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.tasktracker.reduce.tasks.maximum</name>
+    <value>2</value>
+    <description>The maximum number of reduce tasks that will be run
+    simultaneously by a task tracker.
+    </description>
+  </property>
+
+</configuration>

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/masters
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/masters?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/masters (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/masters Sun Jun  5 08:54:50 2011
@@ -0,0 +1 @@
+{{active_master}}

Added: incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/slaves
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/slaves?rev=1132176&view=auto
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/slaves (added)
+++ incubator/mesos/trunk/ec2/deploy.centos64/root/persistent-hdfs/conf/slaves Sun Jun  5 08:54:50 2011
@@ -0,0 +1 @@
+{{slave_list}}

Modified: incubator/mesos/trunk/ec2/deploy.lucid64/root/mesos-ec2/setup-slave
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/deploy.lucid64/root/mesos-ec2/setup-slave?rev=1132176&r1=1132175&r2=1132176&view=diff
==============================================================================
--- incubator/mesos/trunk/ec2/deploy.lucid64/root/mesos-ec2/setup-slave (original)
+++ incubator/mesos/trunk/ec2/deploy.lucid64/root/mesos-ec2/setup-slave Sun Jun  5 08:54:50 2011
@@ -74,3 +74,5 @@ fi
 # Remove ~/.ssh/known_hosts because it gets polluted as you start/stop many
 # clusters (new machines tend to come up under old hostnames)
 rm -f /root/.ssh/known_hosts
+
+./approve-master-key

Modified: incubator/mesos/trunk/ec2/mesos_ec2.py
URL: http://svn.apache.org/viewvc/incubator/mesos/trunk/ec2/mesos_ec2.py?rev=1132176&r1=1132175&r2=1132176&view=diff
==============================================================================
--- incubator/mesos/trunk/ec2/mesos_ec2.py (original)
+++ incubator/mesos/trunk/ec2/mesos_ec2.py Sun Jun  5 08:54:50 2011
@@ -134,6 +134,8 @@ def launch_cluster(conn, opts, cluster_n
     master_group.authorize('tcp', 8080, 8081, '0.0.0.0/0')
     master_group.authorize('tcp', 50030, 50030, '0.0.0.0/0')
     master_group.authorize('tcp', 50070, 50070, '0.0.0.0/0')
+    master_group.authorize('tcp', 60070, 60070, '0.0.0.0/0')
+    master_group.authorize('tcp', 38090, 38090, '0.0.0.0/0')
   if slave_group.rules == []: # Group was just now created
     slave_group.authorize(src_group=master_group)
     slave_group.authorize(src_group=slave_group)
@@ -142,6 +144,8 @@ def launch_cluster(conn, opts, cluster_n
     slave_group.authorize('tcp', 8080, 8081, '0.0.0.0/0')
     slave_group.authorize('tcp', 50060, 50060, '0.0.0.0/0')
     slave_group.authorize('tcp', 50075, 50075, '0.0.0.0/0')
+    slave_group.authorize('tcp', 60060, 60060, '0.0.0.0/0')
+    slave_group.authorize('tcp', 60075, 60075, '0.0.0.0/0')
   if zoo_group.rules == []: # Group was just now created
     zoo_group.authorize(src_group=master_group)
     zoo_group.authorize(src_group=slave_group)
@@ -276,8 +280,8 @@ def wait_for_cluster(conn, master_res, s
   wait_for_instances(conn, slave_res)
   if zoo_res != None:
     wait_for_instances(conn, zoo_res)
-  print "Waiting 40 more seconds..."
-  time.sleep(40)
+  print "Waiting 60 more seconds..."
+  time.sleep(60)
 
 
 # Get number of local disks available for a given EC2 instance type.
@@ -299,11 +303,11 @@ def deploy_files(conn, root_dir, opts, m
   active_master = master_res.instances[0].public_dns_name
 
   num_disks = get_num_disks(opts.instance_type)
-  hdfs_data_dirs = "/mnt/hdfs/dfs/data"
+  hdfs_data_dirs = "/mnt/ephemeral-hdfs/data"
   mapred_local_dirs = "/mnt/hadoop/mrlocal"
   if num_disks > 1:
     for i in range(2, num_disks + 1):
-      hdfs_data_dirs += ",/mnt%d/hdfs/dfs/data" % i
+      hdfs_data_dirs += ",/mnt%d/ephemeral-hdfs/data" % i
       mapred_local_dirs += ",/mnt%d/hadoop/mrlocal" % i
 
   if zoo_res != None: