You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mesos.apache.org by ya...@apache.org on 2015/07/07 19:51:16 UTC

[3/5] mesos git commit: Removed obsolete ec2 scripts.

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.centos64/root/mesos-ec2/hypertable/Capfile
----------------------------------------------------------------------
diff --git a/ec2/deploy.centos64/root/mesos-ec2/hypertable/Capfile b/ec2/deploy.centos64/root/mesos-ec2/hypertable/Capfile
deleted file mode 100644
index e309971..0000000
--- a/ec2/deploy.centos64/root/mesos-ec2/hypertable/Capfile
+++ /dev/null
@@ -1,426 +0,0 @@
-set :source_machine, "{{active_master}}"
-set :install_dir,  "/opt/hypertable"
-set :hypertable_version, "0.9.4.3"
-set :default_dfs, "hadoop"
-set :default_config, "/root/mesos-ec2/hypertable/hypertable.cfg"
-set :default_additional_args, ""
-set :hbase_home, "/opt/hbase/current"
-set :default_client_multiplier, 1
-set :default_test_driver, "hypertable"
-set :default_test_args, ""
-
-role :source, "{{active_master}}"
-role :master, "{{active_master}}"
-role :hyperspace, "{{active_master}}"
-open("/root/mesos-ec2/slaves").each do |slave|
-  role :slave, slave
-end
-role :thriftbroker
-role :spare
-role :localhost, "{{active_master}}"
-role :test_client
-role :test_dispatcher
-
-######################### END OF USER CONFIGURATION ############################
-
-def install_machines
-  (roles[:master].servers | \
-   roles[:hyperspace].servers | \
-   roles[:slave].servers | \
-   roles[:thriftbroker].servers | \
-   roles[:spare].servers | \
-   roles[:test_client].servers | \
-   roles[:test_dispatcher].servers) - roles[:source].servers
-end
-
-role(:install) { install_machines }
-
-set(:dfs) do
-  "#{default_dfs}"
-end unless exists?(:dfs)
-
-set(:config) do
-  "#{default_config}"
-end unless exists?(:config)
-
-set(:additional_args) do
-  "#{default_additional_args}"
-end unless exists?(:additional_args)
-
-set(:test_driver) do
-  "#{default_test_driver}"
-end unless exists?(:test_driver)
-
-set(:test_args) do
-  "#{default_test_args}"
-end unless exists?(:test_args)
-
-set(:client_multiplier) do
-  "#{default_client_multiplier}".to_i
-end unless exists?(:client_multiplier)
-
-set :config_file, "#{config}".split('/')[-1]
-set :config_option, \
-    "--config=#{install_dir}/#{hypertable_version}/conf/#{config_file}"
-
- desc <<-DESC
-    Copies config file to installation on localhost.
-    This task runs on localhost and copies the config file specified \
-    by the variable 'config' (default=#{config}) \
-    to the installation directory specified by the variable 'install_dir' \
-    (default=#{install_dir})
- DESC
-task :copy_config, :roles => :localhost do
-  run("rsync -e \"ssh -o StrictHostKeyChecking=no\" #{config} #{install_dir}/#{hypertable_version}/conf/")
-end
-
- desc <<-DESC
-    rsyncs installation directory to cluster.  For each machine in the \
-    cluster, his commannd  rsyncs the installation from the source \
-    installation machine specified by the variable 'source_machine' \
-    (default=#{source_machine})
- DESC
-task :rsync, :roles => :install do
-  run <<-CMD
-     rsync -av -e "ssh -o StrictHostKeyChecking=no" --exclude=log --exclude=run --exclude=demo --exclude=fs --exclude=hyperspace/ #{source_machine}:#{install_dir}/#{hypertable_version} #{install_dir} &&
-     rsync -av -e "ssh -o StrictHostKeyChecking=no" --exclude=log --exclude=run --exclude=demo --exclude=fs --exclude=hyperspace/ #{source_machine}:#{install_dir}/#{hypertable_version}/conf/ #{install_dir}/#{hypertable_version}/conf
-  CMD
-end
-
- desc <<-DESC
-    sets up the symbolic link 'current' in the installation area \
-    to point to the directory of the current version
-    (default=#{hypertable_version})
- DESC
-task :set_current, :roles => [:install, :source] do
-  run <<-CMD
-   cd #{install_dir} &&
-   rm -f current &&
-   ln -s #{hypertable_version} current
-  CMD
-end
-
- desc <<-DESC
-    Distributes installation.  This task copiles the config file and \
-    then rsyncs the installation to each machine in the cluster
- DESC
-task :dist do
-  transaction do
-    copy_config
-    rsync
-  end
-end
-
- desc <<-DESC
-    Distributes and fhsizes the installation, then copies
-    config and rsyncs
- DESC
-task :fhsize do
-  transaction do
-    rsync
-    fhsize_install
-    copy_config
-    rsync
-  end
-end
-
-
- desc <<-DESC
-    fhsize's the installations
- DESC
-task :fhsize_install, :roles => [:install, :source] do
-  run <<-CMD
-     #{install_dir}/#{hypertable_version}/bin/fhsize.sh
-  CMD
-end
-
-desc "Verify that upgrade is OK."
-task :qualify_upgrade, :roles => :source do
-  run <<-CMD
-     #{install_dir}/#{hypertable_version}/bin/upgrade-ok.sh #{install_dir}/current #{hypertable_version}
-  CMD
-end
-
- desc <<-DESC
-    Upgrades installation.  Stops servers, copies config, rsyncs
-    the installation, then copies hyperspace and the rangeserver
-    state in the run/ directory to new installation
- DESC
-task :upgrade do
-  transaction do
-    qualify_upgrade
-    stop
-    copy_config
-    rsync
-    upgrade_hyperspace
-    upgrade_rangeservers
-    set_current
-  end
-end
-
- desc <<-DESC
-    Upgrades (copies) the Hyperspace database from the current
-    installation to the new installation specified by the
-    hypertable_version (#{hypertable_version})
- DESC
-task :upgrade_hyperspace, :roles => :hyperspace do
-  run <<-CMD
-    cp -dpR #{install_dir}/current/hyperspace \
-       #{install_dir}/#{hypertable_version}
-  CMD
-end
-
- desc <<-DESC
-    Upgrades (copies) the RangeServers by copying the contents
-    of the run directory from the current installation to
-    installation specified by the hypertable_version
-    (#{hypertable_version})
- DESC
-task :upgrade_rangeservers, :roles => :slave do
-  run <<-CMD
-    cp -dpR #{install_dir}/current/run \
-       #{install_dir}/#{hypertable_version}
-  CMD
-end
-
-desc "Starts all processes."
-task :start do
-  transaction do
-    start_hyperspace
-    start_master
-    start_slaves
-    start_master_thriftbroker
-  end
-end
-
-desc "Starts hyperspace processes."
-task :start_hyperspace, :roles => :hyperspace do
-  run <<-CMD
-   #{install_dir}/current/bin/start-hyperspace.sh \
-      #{config_option}
-  CMD
-end
-
-desc "Starts master processes."
-task :start_master, :roles => :master do
-  run <<-CMD
-   #{install_dir}/current/bin/start-dfsbroker.sh #{dfs} \
-      #{config_option} &&
-   #{install_dir}/current/bin/start-master.sh #{config_option} &&
-   #{install_dir}/current/bin/start-monitoring.sh
-  CMD
-end
-
-desc "Starts ThriftBroker on master."
-task :start_master_thriftbroker, :roles => :master do
-  run <<-CMD
-   #{install_dir}/current/bin/start-thriftbroker.sh \
-      #{config_option}
-  CMD
-end
-
-desc "Starts slave processes."
-task :start_slaves, :roles => :slave do
-  run <<-CMD
-   #{install_dir}/current/bin/random-wait.sh 5 &&
-   #{install_dir}/current/bin/start-dfsbroker.sh #{dfs} \
-      #{config_option} &&
-   #{install_dir}/current/bin/start-rangeserver.sh \
-      #{config_option} &&
-   #{install_dir}/current/bin/start-thriftbroker.sh \
-      #{config_option}
-  CMD
-end
-
-desc "Starts ThriftBroker processes."
-task :start_thriftbrokers, :roles => :thriftbroker do
-  run <<-CMD
-   #{install_dir}/current/bin/random-wait.sh 5 &&
-   #{install_dir}/current/bin/start-dfsbroker.sh #{dfs} \
-      #{config_option} &&
-   #{install_dir}/current/bin/start-thriftbroker.sh \
-      #{config_option}
-  CMD
-end
-
-
-desc "Starts DFS brokers."
-task :start_dfsbrokers, :roles => [:master, :slave] do
-  run "#{install_dir}/current/bin/start-dfsbroker.sh #{dfs} \
-      #{config_option}"
-end
-
-desc "Stops all servers."
-task :stop do
-  transaction do
-    stop_slaves
-    stop_master
-    stop_hyperspace
-  end
-end
-
-desc "Stops slave processes."
-task :stop_slaves, :roles => :slave do
-  run <<-CMD
-  #{install_dir}/current/bin/stop-servers.sh --no-hyperspace --no-master #{additional_args}
-  CMD
-end
-
-desc "Stops master processes."
-task :stop_master, :roles => :master do
-  run <<-CMD
-  #{install_dir}/current/bin/stop-servers.sh --no-hyperspace --no-rangeserver #{additional_args} &&
-  #{install_dir}/current/bin/stop-monitoring.sh
-  CMD
-end
-
-desc "Stops hyperspace processes."
-task :stop_hyperspace, :roles => :hyperspace do
-  run <<-CMD 
-  #{install_dir}/current/bin/stop-hyperspace.sh
-  CMD
-end
-
-desc "Stops ThriftBroker processes."
-task :stop_thriftbrokers, :roles => :thriftbroker do
-  run <<-CMD 
-  #{install_dir}/current/bin/stop-servers.sh --no-hyperspace --no-master --no-rangeserver
-  CMD
-end
-
-desc "Cleans hyperspace & rangeservers, removing all tables."
-task :cleandb do
-  transaction do
-    clean_master
-    clean_hyperspace
-    clean_slaves
-  end  
-end
-
-desc "Cleans master state but not hyperspace."
-task :clean_master, :roles => :master do
-  run <<-CMD
-   #{install_dir}/current/bin/start-dfsbroker.sh #{dfs} \
-      #{config_option} && \
-   #{install_dir}/current/bin/clean-database.sh #{config_option} ;
-  CMD
-end
-
-desc "Cleans hyperspace."
-task :clean_hyperspace, :roles => :hyperspace do
-  run <<-CMD
-   #{install_dir}/current/bin/clean-hyperspace.sh
-  CMD
-end
-
-desc "Cleans rangeservers and master state but not hyperspace."
-task :clean_slaves, :roles => :slave do
-  run <<-CMD
-   #{install_dir}/current/bin/stop-servers.sh --no-hyperspace --no-master &&
-   rm -rf #{install_dir}/current/run/*
-  CMD
-end
-
-desc "Reports status for all processes."
-task :status do
-  transaction do
-    dfs_status
-    master_status
-    hyperspace_status
-    rangeserver_status
-  end
-end
-
-desc "Get status for dfs processes."
-task :dfs_status, :roles => [:master, :slave] do
-  run <<-CMD
-   #{install_dir}/current/bin/ht serverup dfsbroker
-  CMD
-end
-
-desc "Get status for Hypertable.Master process."
-task :master_status, :roles => [:master] do
-  run <<-CMD
-   #{install_dir}/current/bin/ht serverup master
-  CMD
-end
-
-desc "Get status for Hyperspace.Master process."
-task :hyperspace_status, :roles => [:hyperspace] do
-  run <<-CMD
-   #{install_dir}/current/bin/ht serverup hyperspace
-  CMD
-end
-
-desc "Get status for rangeserver processes."
-task :rangeserver_status, :roles => [:slave] do
-  run <<-CMD
-   #{install_dir}/current/bin/ht serverup rangeserver
-  CMD
-end
-
-set :default_dumpfile, "/tmp/rsdump.txt"
-
-set(:dumpfile) do
-  "#{default_dumpfile}"
-end unless exists?(:dumpfile)
-
-desc "Run dump command on each rangeserver"
-task :rangeserver_dump, :roles => [:slave] do
-  run <<-CMD
-   echo "dump NOKEYS '#{dumpfile}';" | #{install_dir}/current/bin/ht ht_rsclient --batch #{config_option}
-  CMD
-end
-
-
-if "#{test_driver}" == "hypertable"
-  set :thrift_broker_command, "#{install_dir}/current/bin/start-thriftbroker.sh #{config_option}"
-  set :start_test_client_command, "#{install_dir}/current/bin/start-test-client.sh --count #{client_multiplier} #{roles[:test_dispatcher].servers[0]}"
-  set :run_test_dispatcher_command, "#{install_dir}/current/bin/jrun --pidfile #{install_dir}/#{hypertable_version}/run/Hypertable.TestDispatcher.pid org.hypertable.examples.PerformanceTest.Dispatcher --driver=#{test_driver} --clients=#{roles[:test_client].servers.length*client_multiplier} #{test_args}"
-  set :stop_test_args, ""
-elsif "#{test_driver}" == "hbase"
-  set :thrift_broker_command, "true"
-  set :start_test_client_command, "#{install_dir}/current/bin/start-test-client.sh --jrun-opts \"--add-to-classpath #{hbase_home}/conf\" --count #{client_multiplier} #{roles[:test_dispatcher].servers[0]}"
-  set :run_test_dispatcher_command, "#{install_dir}/current/bin/jrun --pidfile #{install_dir}/#{hypertable_version}/run/Hypertable.TestDispatcher.pid --add-to-classpath #{hbase_home}/conf org.hypertable.examples.PerformanceTest.Dispatcher --driver=#{test_driver} --clients=#{roles[:test_client].servers.length*client_multiplier} #{test_args}"
-  set :stop_test_args, "--no-thriftbroker --no-dfsbroker"
-else
-  set :thrift_broker_command, "echo Invalid test driver - #{test_driver}"
-  set :start_test_client_command, "echo Invalid test driver - #{test_driver}"
-  set :run_test_dispatcher_command, "echo Invalid test driver - #{test_driver}"
-  set :stop_test_args, "--no-thriftbroker --no-dfsbroker"
-end
-
-desc "Starts test clients."
-task :start_test_clients, :roles => :test_client do
-  run <<-CMD
-   #{install_dir}/current/bin/random-wait.sh 5 &&
-   #{thrift_broker_command} &&
-   #{start_test_client_command}
-  CMD
-end
-
-desc "Run test dispatcher."
-task :run_test_dispatcher, :roles => :test_dispatcher do
-  run <<-CMD
-   #{thrift_broker_command} &&
-   #{run_test_dispatcher_command}
-  CMD
-end
-
-desc "Stops test."
-task :stop_test, :roles => [:test_client, :test_dispatcher] do
-  run <<-CMD
-  #{install_dir}/current/bin/stop-servers.sh --no-hyperspace --no-master --no-rangeserver #{stop_test_args}
-  CMD
-end
-
-desc "Run test"
-task :run_test do
-  transaction do
-    stop_test
-    start_test_clients
-    run_test_dispatcher
-  end
-end
-
-

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.centos64/root/mesos-ec2/hypertable/hypertable.cfg
----------------------------------------------------------------------
diff --git a/ec2/deploy.centos64/root/mesos-ec2/hypertable/hypertable.cfg b/ec2/deploy.centos64/root/mesos-ec2/hypertable/hypertable.cfg
deleted file mode 100644
index 934897a..0000000
--- a/ec2/deploy.centos64/root/mesos-ec2/hypertable/hypertable.cfg
+++ /dev/null
@@ -1,43 +0,0 @@
-#
-# hypertable.cfg
-#
-
-# HDFS Broker
-HdfsBroker.Port=38030
-HdfsBroker.fs.default.name=hdfs://{{active_master}}:9010
-HdfsBroker.Workers=20
-
-# Ceph Broker
-CephBroker.Port=38030
-CephBroker.Workers=20
-CephBroker.MonAddr=10.0.1.245:6789
-
-# Local Broker
-DfsBroker.Local.Port=38030
-DfsBroker.Local.Root=fs/local
-
-# DFS Broker - for clients
-DfsBroker.Host=localhost
-DfsBroker.Port=38030
-
-# Hyperspace
-Hyperspace.Replica.Host={{active_master}}
-Hyperspace.Replica.Port=38040
-Hyperspace.Replica.Dir=hyperspace
-Hyperspace.Replica.Workers=20
-
-# Hypertable.Master
-Hypertable.Master.Host={{active_master}}
-Hypertable.Master.Port=38050
-Hypertable.Master.Workers=20
-
-
-# Hypertable.RangeServer
-Hypertable.RangeServer.Port=38060
-
-Hyperspace.KeepAlive.Interval=30000
-Hyperspace.Lease.Interval=1000000
-Hyperspace.GracePeriod=200000
-
-# ThriftBroker
-ThriftBroker.Port=38080

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.centos64/root/mesos-ec2/masters
----------------------------------------------------------------------
diff --git a/ec2/deploy.centos64/root/mesos-ec2/masters b/ec2/deploy.centos64/root/mesos-ec2/masters
deleted file mode 100644
index c531652..0000000
--- a/ec2/deploy.centos64/root/mesos-ec2/masters
+++ /dev/null
@@ -1 +0,0 @@
-{{master_list}}

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.centos64/root/mesos-ec2/mesos-daemon
----------------------------------------------------------------------
diff --git a/ec2/deploy.centos64/root/mesos-ec2/mesos-daemon b/ec2/deploy.centos64/root/mesos-ec2/mesos-daemon
deleted file mode 100755
index 177265e..0000000
--- a/ec2/deploy.centos64/root/mesos-ec2/mesos-daemon
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-
-# Set up MESOS_HOME in order to find projd
-export MESOS_HOME=/root/mesos
-
-# Set MESOS_PUBLIC_DNS so slaves can be linked in master web UI
-export MESOS_PUBLIC_DNS=`wget -q -O - http://instance-data.ec2.internal/latest/meta-data/public-hostname`
-
-# Set PATH to include Scala
-export PATH=$PATH:/root/scala-2.8.0.final/bin
-
-# Set HADOOP_HOME variable to allow slaves to get executors from HDFS
-export HADOOP_HOME=/root/hadoop-0.20.2
-
-ulimit -n 8192
-
-PROGRAM=$1
-shift
-
-EXTRA_OPTS=""
-if [ "$PROGRAM" == "mesos-slave" ]; then
-  # Compute CPU resources (if not specified).
-  if [[ "$*" != *--cpus* ]]; then
-    CPUS=`grep processor /proc/cpuinfo | wc -l`
-    EXTRA_OPTS="$EXTRA_OPTS --cpus=$CPUS"
-  fi
-
-  # Compute memory resources (if not specified).
-  if [[ "$*" != *--mem* ]]; then
-    MEM_KB=`cat /proc/meminfo | grep MemTotal | awk '{print $2}'`
-    MEM=$[(MEM_KB - 1024 * 1024) / 1024]
-    EXTRA_OPTS="$EXTRA_OPTS --mem=$MEM"
-  fi
-fi
-
-cd $MESOS_HOME/bin
-nohup ./$PROGRAM --log_dir=/mnt/mesos-logs --work_dir=/mnt/mesos-work $EXTRA_OPTS $@ </dev/null >/mnt/mesos-logs/$PROGRAM.out 2>&1 &

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.centos64/root/mesos-ec2/redeploy-mesos
----------------------------------------------------------------------
diff --git a/ec2/deploy.centos64/root/mesos-ec2/redeploy-mesos b/ec2/deploy.centos64/root/mesos-ec2/redeploy-mesos
deleted file mode 100755
index 941d783..0000000
--- a/ec2/deploy.centos64/root/mesos-ec2/redeploy-mesos
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-cd /root/mesos-ec2
-
-MASTERS=`cat masters`
-NUM_MASTERS=`cat masters | wc -l`
-SLAVES=`cat slaves`
-
-SSH_OPTS="-o StrictHostKeyChecking=no -o ConnectTimeout=5"
-
-if [[ $NUM_MASTERS -gt 1 ]]; then
-  echo "RSYNC'ing /root/mesos to masters..."
-  for master in $MASTERS; do
-    echo $master
-    rsync -e "ssh $SSH_OPTS" -az --exclude '*.d' --exclude '*.o' --exclude '*.cpp' --exclude '*.hpp' --exclude '*.pyc' --exclude 'mesos/frameworks/hadoop-0.20.0/logs/*' --exclude 'mesos/work' --exclude 'mesos/logs' --exclude 'mesos/test_output' /root/mesos $master:/root & sleep 0.3
-  done
-  wait
-fi
-
-echo "RSYNC'ing /root/mesos to slaves..."
-for slave in $SLAVES; do
-  echo $slave
-  rsync -e "ssh $SSH_OPTS" -az --exclude '*.d' --exclude '*.o' --exclude '*.cpp' --exclude '*.hpp' --exclude '*.pyc' --exclude 'mesos/frameworks/hadoop-0.20.0/logs/*' --exclude 'mesos/work' --exclude 'mesos/logs' --exclude 'mesos/test_output' /root/mesos $slave:/root & sleep 0.3
-done
-wait

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.centos64/root/mesos-ec2/setup
----------------------------------------------------------------------
diff --git a/ec2/deploy.centos64/root/mesos-ec2/setup b/ec2/deploy.centos64/root/mesos-ec2/setup
deleted file mode 100755
index f06ed21..0000000
--- a/ec2/deploy.centos64/root/mesos-ec2/setup
+++ /dev/null
@@ -1,247 +0,0 @@
-#!/bin/bash
-
-# Make sure we are in the mesos-ec2 directory
-cd /root/mesos-ec2
-
-# Set hostname based on EC2 private DNS name, so that it is set correctly
-# even if the instance is restarted with a different private DNS name
-PRIVATE_DNS=`wget -q -O - http://instance-data.ec2.internal/latest/meta-data/local-hostname`
-hostname $PRIVATE_DNS
-echo $PRIVATE_DNS > /etc/hostname
-export HOSTNAME=$PRIVATE_DNS  # Fix the bash built-in hostname variable too
-
-echo "Setting up Mesos master on `hostname`..."
-
-# Read command-line arguments
-OS_NAME=$1
-DOWNLOAD_METHOD=$2
-BRANCH=$3
-SWAP_MB=$4
-
-MASTERS_FILE="masters"
-MASTERS=`cat $MASTERS_FILE`
-NUM_MASTERS=`cat $MASTERS_FILE | wc -l`
-OTHER_MASTERS=`cat $MASTERS_FILE | sed '1d'`
-SLAVES=`cat slaves`
-ZOOS=`cat zoo`
-
-if [[ $ZOOS = *NONE* ]]; then
-  NUM_ZOOS=0
-  ZOOS=""
-else
-  NUM_ZOOS=`cat zoo | wc -l`
-fi
-
-# Scripts that get used for/while running Mesos.
-SCRIPTS="copy-dir
-         create-swap
-         mesos-daemon
-         redeploy-mesos
-         setup-slave              
-         ssh-no-keychecking
-         start-hypertable
-         start-mesos
-         stop-hypertable
-         stop-mesos"
-
-EPHEMERAL_HDFS=/root/ephemeral-hdfs
-PERSISTENT_HDFS=/root/persistent-hdfs
-
-#TODO(*): update config scripts to have conditionals for handling different
-#         platforms
-JAVA_HOME=/usr/java/default
-
-SSH_OPTS="-o StrictHostKeyChecking=no -o ConnectTimeout=5"
-
-if [[ `tty` == "not a tty" ]] ; then
-    echo "Expecting a tty or pty! (use the ssh -t option)."
-    exit 1
-fi
-
-echo "Setting executable permissions on scripts..."
-for s in $SCRIPTS; do chmod u+x $s; done
-
-echo "Running setup-slave on master to mount filesystems, etc..."
-./setup-slave $SWAP_MB
-
-echo "SSH'ing to master machine(s) to approve key(s)..."
-for master in $MASTERS; do
-  echo $master
-  ssh $SSH_OPTS $master true &
-  sleep 0.3
-done
-ssh $SSH_OPTS localhost true &
-ssh $SSH_OPTS `hostname` true &
-wait
-
-if [[ $NUM_ZOOS != 0 ]] ; then
-  echo "SSH'ing to ZooKeeper server(s) to approve keys..."
-  zid=1
-  for zoo in $ZOO; do
-    echo $zoo
-    ssh $SSH_OPTS $zoo true \; mkdir -p /tmp/zookeeper \; echo $zid \> /tmp/zookeeper/myid &
-    zid=$(($zid+1))
-    sleep 0.3
-  done
-fi
-
-# Try to SSH to each cluster node to approve their key. Since some nodes may
-# be slow in starting, we retry failed slaves up to 3 times.
-TODO="$SLAVES $ZOO $OTHER_MASTERS" # List of nodes to try (initially all)
-TRIES="0"                          # Number of times we've tried so far
-echo "SSH'ing to other cluster nodes to approve keys..."
-while [ "e$TODO" != "e" ] && [ $TRIES -lt 4 ] ; do
-  NEW_TODO=
-  for slave in $TODO; do
-    echo $slave
-    ssh $SSH_OPTS $slave true
-    if [ $? != 0 ] ; then
-        NEW_TODO="$NEW_TODO $slave"
-    fi
-  done
-  TRIES=$[$TRIES + 1]
-  if [ "e$NEW_TODO" != "e" ] && [ $TRIES -lt 4 ] ; then
-      sleep 15
-      TODO="$NEW_TODO"
-      echo "Re-attempting SSH to cluster nodes to approve keys..."
-  else
-      break;
-  fi
-done
-
-echo "RSYNC'ing /root/mesos-ec2 to other cluster nodes..."
-for node in $SLAVES $ZOO $OTHER_MASTERS; do
-  echo $node
-  rsync -e "ssh $SSH_OPTS" -az /root/mesos-ec2 $node:/root &
-  scp $SSH_OPTS ~/.ssh/id_rsa $node:.ssh &
-  sleep 0.3
-done
-wait
-
-echo "Running slave setup script on other cluster nodes..."
-for node in $SLAVES $ZOO $OTHER_MASTERS; do
-  echo $node
-  ssh -t $SSH_OPTS root@$node "mesos-ec2/setup-slave $SWAP_MB" & sleep 0.3
-done
-wait
-
-echo "RSYNC'ing HDFS config files to other cluster nodes..."
-for node in $SLAVES $ZOO $OTHER_MASTERS; do
-  echo $node
-  rsync -e "ssh $SSH_OPTS" -az $EPHEMERAL_HDFS/conf $node:$EPHEMERAL_HDFS &
-  rsync -e "ssh $SSH_OPTS" -az $PERSISTENT_HDFS/conf $node:$PERSISTENT_HDFS &
-  sleep 0.3
-done
-wait
-
-DOWNLOADED=0
-
-if [[ "$DOWNLOAD_METHOD" == "git" ]] ; then
-  # change git's ssh command so it does not ask to accept a keys
-  export GIT_SSH=/root/mesos-ec2/ssh-no-keychecking
-  REPOSITORY=git://github.com/mesos/mesos.git
-  echo "Checking out Mesos from $REPOSITORY"
-  pushd /root > /dev/null 2>&1
-  rm -rf mesos mesos.tgz
-  # Set git SSH command to a script that uses -o StrictHostKeyChecking=no
-  git clone $REPOSITORY mesos
-  pushd mesos 2>&1
-  git checkout -b $BRANCH --track origin/$BRANCH
-  popd > /dev/null 2>&1
-  popd > /dev/null 2>&1
-  DOWNLOADED=1
-fi
-
-# Build Mesos if we downloaded it
-if [[ "$DOWNLOADED" == "1" ]] ; then
-  echo "Building Mesos..."
-  pushd /root/mesos > /dev/null 2>&1
-  ./configure.ubuntu-lucid-64
-  make clean
-  make
-  popd > /dev/null 2>&1
-  if [ -d /root/spark ] ; then
-    echo "Building Spark..."
-    pushd /root/spark > /dev/null 2>&1
-    MESOS_HOME=/root/mesos make all native
-    popd > /dev/null 2>&1
-  fi
-  echo "Building Hadoop framework..."
-  pushd /root/mesos/frameworks/hadoop-0.20.2 > /dev/null 2>&1
-  ant
-  ant examples
-  popd > /dev/null 2>&1
-fi
-
-echo "Setting up Hadoop framework config files..."
-cp hadoop-framework-conf/* /root/mesos/frameworks/hadoop-0.20.2/conf
-
-echo "Setting up haproxy+apache framework config files..."
-cp haproxy+apache/* /root/mesos/frameworks/haproxy+apache
-
-echo "Setting up Spark config files..."
-# TODO: This currently overwrites whatever the user wrote there; on
-# the other hand, we also don't want to leave an old file created by
-# us because it would have the wrong hostname for HDFS etc
-mkdir -p /root/spark/conf
-echo "-Dspark.dfs=hdfs://$HOSTNAME:9000" \
-     > /root/spark/conf/java-opts
-
-echo "Redeploying /root/mesos..."
-./redeploy-mesos
-
-echo "Setting up NFS..."
-if [ ! -e /nfs ] ; then
-  mkdir -p /mnt/nfs
-  rm -fr /nfs
-  ln -s /mnt/nfs /nfs
-fi
-if ! grep -e '^/nfs ' /etc/exports; then
-  echo "/nfs    10.0.0.0/8(ro,async,no_subtree_check)" >> /etc/exports
-fi
-/sbin/service portmap start
-/sbin/service nfs start
-# Unexport and re-export everything in /etc/exports because, if we are
-# restarting a stopped EC2 instance, we might have had an entry for /nfs in
-# /etc/exports before we created /mnt/nfs.
-exportfs -ua
-exportfs -a
-
-echo "Mounting NFS on slaves..."
-for slave in $SLAVES; do
-  echo $slave
-  ssh -t $SSH_OPTS root@$slave "mkdir -p /nfs; service portmap start; service nfs start; mount $HOSTNAME:/nfs /nfs" & sleep 0.3
-done
-wait
-
-echo "Formatting ephemeral HDFS namenode..."
-$EPHEMERAL_HDFS/bin/hadoop namenode -format
-
-echo "Starting ephemeral HDFS..."
-$EPHEMERAL_HDFS/bin/start-dfs.sh
-
-if [[ ! -e /vol/persistent-hdfs/dfs/name ]] ; then
-  echo "Formatting persistent HDFS namenode..."
-  $PERSISTENT_HDFS/bin/hadoop namenode -format
-fi
-
-echo "Starting persistent HDFS..."
-$PERSISTENT_HDFS/bin/start-dfs.sh
-
-sleep 1
-
-if [[ $NUM_ZOOS != 0 ]]; then
-  echo "Starting ZooKeeper quorum..."
-  for zoo in $ZOOS; do
-    ssh $SSH_OPTS $zoo "/root/mesos/third_party/zookeeper-*/bin/zkServer.sh start </dev/null >/dev/null" & sleep 0.1
-  done
-  wait
-  sleep 5
-fi
-
-echo "Stopping any existing Mesos cluster..."
-./stop-mesos
-sleep 2
-
-echo "Starting Mesos cluster..."
-./start-mesos

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.centos64/root/mesos-ec2/setup-slave
----------------------------------------------------------------------
diff --git a/ec2/deploy.centos64/root/mesos-ec2/setup-slave b/ec2/deploy.centos64/root/mesos-ec2/setup-slave
deleted file mode 100755
index bbb8d34..0000000
--- a/ec2/deploy.centos64/root/mesos-ec2/setup-slave
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/bin/bash
-
-# Make sure we are in the mesos-ec2 directory
-cd /root/mesos-ec2
-
-# Set hostname based on EC2 private DNS name, so that it is set correctly
-# even if the instance is restarted with a different private DNS name
-PRIVATE_DNS=`wget -q -O - http://instance-data.ec2.internal/latest/meta-data/local-hostname`
-hostname $PRIVATE_DNS
-echo $PRIVATE_DNS > /etc/hostname
-HOSTNAME=$PRIVATE_DNS  # Fix the bash built-in hostname variable too
-
-echo "Setting up Mesos slave on `hostname`..."
-
-# Read command-line arguments
-SWAP_MB=$1
-
-# Mount options to use for ext3 and xfs disks (the ephemeral disks
-# are ext3, but we use xfs for EBS volumes to format them faster)
-EXT3_MOUNT_OPTS="defaults,noatime,nodiratime"
-XFS_MOUNT_OPTS="defaults,noatime,nodiratime,allocsize=8m"
-
-# Create Hadoop and HDFS directories in a given parent directory
-# (for example /mnt, /mnt2, and so on)
-function create_hadoop_dirs {
-  location=$1
-  mkdir -p $location/ephemeral-hdfs/data $location/hadoop/tmp
-  mkdir -p $location/hadoop/mrlocal $location/hadoop/mrlocal2
-}
-
-# Set up Hadoop and Mesos directories in /mnt
-create_hadoop_dirs /mnt
-mkdir -p /mnt/ephemeral-hdfs/logs
-mkdir -p /mnt/persistent-hdfs/logs
-mkdir -p /mnt/hadoop-logs
-mkdir -p /mnt/mesos-logs
-mkdir -p /mnt/mesos-work
-
-# Mount any ephemeral volumes we might have beyond /mnt
-function setup_extra_volume {
-  device=$1
-  mount_point=$2
-  if [[ -e $device && ! -e $mount_point ]]; then
-    mkdir -p $mount_point
-    mount -o $EXT3_MOUNT_OPTS $device $mount_point
-    echo "$device $mount_point auto $EXT3_MOUNT_OPTS 0 0" >> /etc/fstab
-  fi
-  if [[ -e $mount_point ]]; then
-    create_hadoop_dirs $mount_point
-  fi
-}
-setup_extra_volume /dev/sdc /mnt2
-setup_extra_volume /dev/sdd /mnt3
-setup_extra_volume /dev/sde /mnt4
-
-# Mount cgroup file system
-if [[ ! -e /cgroup ]]; then
-  mkdir -p /cgroup
-  mount -t cgroup none /cgroup
-  echo "none /cgroup cgroup defaults 0 0" >> /etc/fstab
-fi
-
-# Format and mount EBS volume (/dev/sdv) as /vol if the device exists
-# and we have not already created /vol
-if [[ -e /dev/sdv && ! -e /vol ]]; then
-  mkdir /vol
-  if mkfs.xfs -q /dev/sdv; then
-    mount -o $XFS_MOUNT_OPTS /dev/sdv /vol
-    echo "/dev/sdv /vol xfs $XFS_MOUNT_OPTS 0 0" >> /etc/fstab
-  else
-    # mkfs.xfs is not installed on this machine or has failed;
-    # delete /vol so that the user doesn't think we successfully
-    # mounted the EBS volume
-    rmdir /vol
-  fi
-fi
-
-# Remove ~/.ssh/known_hosts because it gets polluted as you start/stop many
-# clusters (new machines tend to come up under old hostnames)
-rm -f /root/.ssh/known_hosts
-
-# Create swap space on /mnt
-/root/mesos-ec2/create-swap $SWAP_MB

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.centos64/root/mesos-ec2/setup-torque
----------------------------------------------------------------------
diff --git a/ec2/deploy.centos64/root/mesos-ec2/setup-torque b/ec2/deploy.centos64/root/mesos-ec2/setup-torque
deleted file mode 100755
index 2ac8fd3..0000000
--- a/ec2/deploy.centos64/root/mesos-ec2/setup-torque
+++ /dev/null
@@ -1,110 +0,0 @@
-#!/bin/bash
-
-cd /root/mesos-ec2
-
-MASTERS=`cat master`
-SLAVES_FILE="slaves"
-SLAVES=`cat $SLAVES_FILE`
-
-SCHEDULER_ITERATION=5
-
-#These seem to be broken, i.e. missing directories after install
-#ssh $MASTERS "apt-get install -y torque-server"
-#ssh $MASTERS "apt-get install -y torque-scheduler"
-#ssh $MASTERS "apt-get install -y torque-client"
-
-#install torque: download/unzip torque
-function installtorque {
-	pushd ~
-	echo "downloading and installing torque on master"
-	#wget http://www.clusterresources.com/downloads/torque/torque-2.4.7.tar.gz
-	rm -rf torque-2.4.7.tar.gz
-	wget http://mesos.berkeley.edu/torque-2.4.7.tar.gz
-	tar xzf torque-2.4.7.tar.gz
-	pushd torque-2.4.7
-	./configure --prefix=/usr
-	make -j8
-	make install
-	popd;popd
-}
-
-function setuptorque {
-	pushd ~/torque-2.4.7
-	echo "running ldconfig on master"
-	ldconfig
-        #./torque.setup root # Note: sets some defaults for batch queue
-	qterm
-        yes|./torque.setup root localhost # Note: sets some defaults for batch queue
-
-	#WARNING: allow root to qsub for debug purposes only, may be dangerous
-	qmgr -c 'set server acl_roots+=root@*' #allow root to submit jobs
-	qmgr -c "set server scheduler_iteration=$SCHEDULER_ITERATION"
-	#qmgr -c 's s allow_node_submit=true' #other hosts can submit too
-
-	NUM_SLAVES=`cat ~/mesos-ec2/slaves|wc -l`
-	#the server be restarted after this
-	qmgr -c "set queue batch resources_available.nodect=$NUM_SLAVES"
-	#qmgr -c "set server resources_available.nodect=$NUM_SLAVES"
-	qterm
-        pbs_server
-
-	touch ~/.rhosts
-	echo `hostname` |cat >> ~/.rhosts
-	echo `hostname -f` |cat >> ~/.rhosts
-	echo localhost |cat >> ~/.rhosts
-
-	popd
-}
-
-
-function installslaves {
-	pushd ~/torque-2.4.7
-	echo "building packages for slave"
-	make packages
-	#install torque-mom on slave nodes
-	apt-get install -y dsh
-	
-        echo "copying slave install packages to nfs"
-	mkdir /nfs/torque
-	cp torque-package-mom-linux-x86_64.sh /nfs/torque/torque-package-mom-linux-x86_64.sh
-	cp torque-package-mom-linux-x86_64.sh /nfs/torque/torque-package-clients-linux-x86_64.sh
-
-	echo "installing torque mom and clients package on slaves"
-	for i in `cat $SLAVES_FILE`; do ssh $i /nfs/torque/torque-package-mom-linux-x86_64.sh --install; ldconfig; done
-	for i in `cat $SLAVES_FILE`; do ssh $i /nfs/torque/torque-package-clients-linux-x86_64.sh --install; ldconfig; done
-
-	echo "Running ldconfig on slaves"
-	dsh -f $SLAVES_FILE ldconfig
-	popd
-}
-
-function installmpi {
-        #setup mpich2 on all of the cluster nodes
-        ./setup-mpi
-
-        #setup prologue script
-        cp ./prologue.setup-mpi-master /var/spool/torque/mom_priv/prologue
-        cp ./epilogue.kill-mpi-ring /var/spool/torque/mom_priv/epilogue
-        
-	for i in `cat $SLAVES_FILE`; do scp ./prologue.setup-mpi-master $i:/var/spool/torque/mom_priv/prologue; done
-	for i in `cat $SLAVES_FILE`; do scp ./epilogue.kill-mpi-ring $i:/var/spool/torque/mom_priv/epilogue; done
-}
-
-function installmaui {
-	pushd ~
-	#http://www.clusterresources.com/download/maui/maui-3.3.tar.gz
-	rm -rf mesos-maui-3.3.tar
-	wget http://mesos.berkeley.edu/mesos-maui-3.3.tar
-	tar -xf mesos-maui-3.3.tar
-	pushd maui-3.3
-	./configure
-	make
-	make install
-	/usr/local/maui/sbin/maui
-}
-
-installtorque
-setuptorque
-installslaves
-installmpi
-installmaui

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.centos64/root/mesos-ec2/slaves
----------------------------------------------------------------------
diff --git a/ec2/deploy.centos64/root/mesos-ec2/slaves b/ec2/deploy.centos64/root/mesos-ec2/slaves
deleted file mode 100644
index 05f969e..0000000
--- a/ec2/deploy.centos64/root/mesos-ec2/slaves
+++ /dev/null
@@ -1 +0,0 @@
-{{slave_list}}

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.centos64/root/mesos-ec2/ssh-no-keychecking
----------------------------------------------------------------------
diff --git a/ec2/deploy.centos64/root/mesos-ec2/ssh-no-keychecking b/ec2/deploy.centos64/root/mesos-ec2/ssh-no-keychecking
deleted file mode 100755
index 3daf46f..0000000
--- a/ec2/deploy.centos64/root/mesos-ec2/ssh-no-keychecking
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/sh
-
-# Utility script that exec's SSH without key checking so that we can check
-# out code from GitHub without prompting the user.
-
-exec ssh -o StrictHostKeyChecking=no $@

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.centos64/root/mesos-ec2/start-hypertable
----------------------------------------------------------------------
diff --git a/ec2/deploy.centos64/root/mesos-ec2/start-hypertable b/ec2/deploy.centos64/root/mesos-ec2/start-hypertable
deleted file mode 100755
index 0e2593f..0000000
--- a/ec2/deploy.centos64/root/mesos-ec2/start-hypertable
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-
-cd /opt/hypertable/current
-
-
-if [ ! -h run ]; then
-    rm -rf run
-    mkdir -p /vol/hypertable/run
-    ln -s /vol/hypertable/run
-fi
-
-if [ ! -h log ]; then
-    rm -rf log
-    mkdir -p /vol/hypertable/log
-    ln -s /vol/hypertable/log
-fi
-
-if [ ! -h hyperspace ]; then
-    rm -rf hyperspace
-    mkdir -p /vol/hypertable/hyperspace
-    ln -s /vol/hypertable/hyperspace
-fi
-
-cd /root/mesos-ec2/hypertable
-cap dist && cap start

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.centos64/root/mesos-ec2/start-mesos
----------------------------------------------------------------------
diff --git a/ec2/deploy.centos64/root/mesos-ec2/start-mesos b/ec2/deploy.centos64/root/mesos-ec2/start-mesos
deleted file mode 100755
index cc309cc..0000000
--- a/ec2/deploy.centos64/root/mesos-ec2/start-mesos
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/bin/bash
-cd /root/mesos-ec2
-
-MASTERS=`cat masters`
-ACTIVE_MASTER=`cat masters | head -1`
-SLAVES=`cat slaves`
-ZOOS=`cat zoo`
-
-
-if [[ $ZOOS = *NONE* ]]; then
-  NUM_ZOOS=0
-else
-  NUM_ZOOS=`cat zoo | wc -l`
-fi
-
-SSH_OPTS="-o StrictHostKeyChecking=no -o ConnectTimeout=5"
-
-cluster_url=`cat cluster-url`
-
-echo "Running with cluster URL: "$cluster_url
-
-if [[ $NUM_ZOOS != 0 ]]; then
-  masterid=1
-  for master in $MASTERS; do
-    echo "Starting master $masterid on $master"
-    ssh $SSH_OPTS $master "/root/mesos-ec2/mesos-daemon mesos-master -p 5050 -u $cluster_url $@ </dev/null >/dev/null" & sleep 0.3
-    masterid=$(($masterid+1))
-  done
-  wait
-else
-  echo "Starting master on $ACTIVE_MASTER"
-  ssh $SSH_OPTS $ACTIVE_MASTER "/root/mesos-ec2/mesos-daemon mesos-master -p 5050 $@ </dev/null >/dev/null"
-fi
-
-sleep 5
-
-for slave in $SLAVES; do
-  echo "Starting slave on $slave"
-  ssh $SSH_OPTS $slave "/root/mesos-ec2/mesos-daemon mesos-slave -u ${cluster_url} </dev/null >/dev/null" &
-  sleep 0.3
-done
-wait
-
-if [[ $NUM_ZOOS != 0 ]]; then
-  echo "ZooKeeper is running at"
-  for zoo in $ZOOS; do
-    echo "      $zoo:2181"
-  done
-fi
-
-echo "Everything's started! You can view the master Web UI at"
-for master in $MASTERS; do
-  echo "      http://$master:8080"
-done

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.centos64/root/mesos-ec2/stop-hypertable
----------------------------------------------------------------------
diff --git a/ec2/deploy.centos64/root/mesos-ec2/stop-hypertable b/ec2/deploy.centos64/root/mesos-ec2/stop-hypertable
deleted file mode 100755
index 7280dc1..0000000
--- a/ec2/deploy.centos64/root/mesos-ec2/stop-hypertable
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/bash
-
-cd /root/mesos-ec2/hypertable
-cap stop

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.centos64/root/mesos-ec2/stop-mesos
----------------------------------------------------------------------
diff --git a/ec2/deploy.centos64/root/mesos-ec2/stop-mesos b/ec2/deploy.centos64/root/mesos-ec2/stop-mesos
deleted file mode 100755
index 9fdb875..0000000
--- a/ec2/deploy.centos64/root/mesos-ec2/stop-mesos
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-cd /root/mesos-ec2
-
-MASTERS=`cat masters`
-SLAVES=`cat slaves`
-
-SSH_OPTS="-o StrictHostKeyChecking=no -o ConnectTimeout=5"
-
-for slave in $SLAVES; do
-  echo "Stopping slave on $slave"
-  ssh $SSH_OPTS $slave pkill mesos-slave &
-  sleep 0.1
-done
-wait
-
-for master in $MASTERS; do
-  echo "Stopping master on $master"
-  ssh $SSH_OPTS $master pkill mesos-master &
-  sleep 0.1
-done
-wait

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.centos64/root/mesos-ec2/zoo
----------------------------------------------------------------------
diff --git a/ec2/deploy.centos64/root/mesos-ec2/zoo b/ec2/deploy.centos64/root/mesos-ec2/zoo
deleted file mode 100644
index efc961b..0000000
--- a/ec2/deploy.centos64/root/mesos-ec2/zoo
+++ /dev/null
@@ -1 +0,0 @@
-{{zoo_list}}

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.centos64/root/persistent-hdfs/conf/core-site.xml
----------------------------------------------------------------------
diff --git a/ec2/deploy.centos64/root/persistent-hdfs/conf/core-site.xml b/ec2/deploy.centos64/root/persistent-hdfs/conf/core-site.xml
deleted file mode 100644
index b23aef2..0000000
--- a/ec2/deploy.centos64/root/persistent-hdfs/conf/core-site.xml
+++ /dev/null
@@ -1,23 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-  <property>
-    <name>hadoop.tmp.dir</name>
-    <value>/vol/persistent-hdfs</value>
-  </property>
-
-  <property>
-    <name>fs.default.name</name>
-    <value>hdfs://{{active_master}}:9010</value>
-  </property>
-
-  <property>
-    <name>io.file.buffer.size</name>
-    <value>65536</value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.centos64/root/persistent-hdfs/conf/hadoop-env.sh
----------------------------------------------------------------------
diff --git a/ec2/deploy.centos64/root/persistent-hdfs/conf/hadoop-env.sh b/ec2/deploy.centos64/root/persistent-hdfs/conf/hadoop-env.sh
deleted file mode 100644
index 854c61f..0000000
--- a/ec2/deploy.centos64/root/persistent-hdfs/conf/hadoop-env.sh
+++ /dev/null
@@ -1,59 +0,0 @@
-# Set Hadoop-specific environment variables here.
-
-# The only required environment variable is JAVA_HOME.  All others are
-# optional.  When running a distributed configuration it is best to
-# set JAVA_HOME in this file, so that it is correctly defined on
-# remote nodes.
-
-# The java implementation to use.  Required.
-export JAVA_HOME=/usr/java/default
-
-# Extra Java CLASSPATH elements.  Optional.
-# export HADOOP_CLASSPATH=
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-export HADOOP_HEAPSIZE=1000
-
-# Extra Java runtime options.  Empty by default.
-# export HADOOP_OPTS=-server
-export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true"
-
-# Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS"
-export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS"
-export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS"
-export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS"
-export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS"
-# export HADOOP_TASKTRACKER_OPTS=
-# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-# export HADOOP_CLIENT_OPTS
-
-# Extra ssh options.  Empty by default.
-# export HADOOP_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HADOOP_CONF_DIR"
-export HADOOP_SSH_OPTS="-o ConnectTimeout=5"
-
-# Where log files are stored.  $HADOOP_HOME/logs by default.
-# export HADOOP_LOG_DIR=${HADOOP_HOME}/logs
-export HADOOP_LOG_DIR=/mnt/persistent-hdfs/logs
-
-# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
-# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
-
-# host:path where hadoop code should be rsync'd from.  Unset by default.
-# export HADOOP_MASTER=master:/home/$USER/src/hadoop
-
-# Seconds to sleep between slave commands.  Unset by default.  This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HADOOP_SLAVE_SLEEP=0.1
-
-# The directory where pid files are stored. /tmp by default.
-export HADOOP_PID_DIR=/var/hadoop/persistent-hdfs/pids
-
-# A string representing this instance of hadoop. $USER by default.
-# export HADOOP_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes.  See 'man nice'.
-# export HADOOP_NICENESS=10
-
-ulimit -n 16000

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.centos64/root/persistent-hdfs/conf/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ec2/deploy.centos64/root/persistent-hdfs/conf/hdfs-site.xml b/ec2/deploy.centos64/root/persistent-hdfs/conf/hdfs-site.xml
deleted file mode 100644
index 86c9f78..0000000
--- a/ec2/deploy.centos64/root/persistent-hdfs/conf/hdfs-site.xml
+++ /dev/null
@@ -1,71 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<configuration>
-
-  <property>
-    <name>dfs.replication</name>
-    <value>2</value>
-  </property>
-
-  <property>
-    <name>dfs.block.size</name>
-    <value>134217728</value>
-  </property>
-
-  <property>
-    <name>dfs.secondary.http.address</name>
-    <value>0.0.0.0:60090</value>
-    <description>
-      The secondary namenode http server address and port.
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-  
-  <property>
-    <name>dfs.datanode.address</name>
-    <value>0.0.0.0:60010</value>
-    <description>
-      The address where the datanode server will listen to.
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-  
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:60075</value>
-    <description>
-      The datanode http server address and port.
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-  
-  <property>
-    <name>dfs.datanode.ipc.address</name>
-    <value>0.0.0.0:60020</value>
-    <description>
-      The datanode ipc server address and port.
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-  
-  <property>
-    <name>dfs.http.address</name>
-    <value>0.0.0.0:60070</value>
-    <description>
-      The address and the base port where the dfs namenode web ui will listen on.
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>25</value>
-  </property>
-
-  <property>
-    <name>dfs.datanode.handler.count</name>
-    <value>8</value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.centos64/root/persistent-hdfs/conf/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ec2/deploy.centos64/root/persistent-hdfs/conf/mapred-site.xml b/ec2/deploy.centos64/root/persistent-hdfs/conf/mapred-site.xml
deleted file mode 100644
index b1637dc..0000000
--- a/ec2/deploy.centos64/root/persistent-hdfs/conf/mapred-site.xml
+++ /dev/null
@@ -1,29 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-  <property>
-    <name>mapred.job.tracker</name>
-    <value>{{active_master}}:9001</value>
-  </property>
-
-  <property>
-    <name>mapred.tasktracker.map.tasks.maximum</name>
-    <value>4</value>
-    <description>The maximum number of map tasks that will be run
-    simultaneously by a task tracker.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.tasktracker.reduce.tasks.maximum</name>
-    <value>2</value>
-    <description>The maximum number of reduce tasks that will be run
-    simultaneously by a task tracker.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.centos64/root/persistent-hdfs/conf/masters
----------------------------------------------------------------------
diff --git a/ec2/deploy.centos64/root/persistent-hdfs/conf/masters b/ec2/deploy.centos64/root/persistent-hdfs/conf/masters
deleted file mode 100644
index d26a194..0000000
--- a/ec2/deploy.centos64/root/persistent-hdfs/conf/masters
+++ /dev/null
@@ -1 +0,0 @@
-{{active_master}}

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.centos64/root/persistent-hdfs/conf/slaves
----------------------------------------------------------------------
diff --git a/ec2/deploy.centos64/root/persistent-hdfs/conf/slaves b/ec2/deploy.centos64/root/persistent-hdfs/conf/slaves
deleted file mode 100644
index 05f969e..0000000
--- a/ec2/deploy.centos64/root/persistent-hdfs/conf/slaves
+++ /dev/null
@@ -1 +0,0 @@
-{{slave_list}}

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.generic/root/mesos-ec2/ec2-variables.sh
----------------------------------------------------------------------
diff --git a/ec2/deploy.generic/root/mesos-ec2/ec2-variables.sh b/ec2/deploy.generic/root/mesos-ec2/ec2-variables.sh
deleted file mode 100644
index 1f76e61..0000000
--- a/ec2/deploy.generic/root/mesos-ec2/ec2-variables.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-# These variables are automatically filled in by the mesos-ec2 script.
-export MESOS_MASTERS="{{master_list}}"
-export MESOS_SLAVES="{{slave_list}}"
-export MESOS_ZOO_LIST="{{zoo_list}}"
-export MESOS_HDFS_DATA_DIRS="{{hdfs_data_dirs}}"
-export MESOS_MAPRED_LOCAL_DIRS="{{mapred_local_dirs}}"

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.lucid64/root/hadoop-0.20.2/conf/core-site.xml
----------------------------------------------------------------------
diff --git a/ec2/deploy.lucid64/root/hadoop-0.20.2/conf/core-site.xml b/ec2/deploy.lucid64/root/hadoop-0.20.2/conf/core-site.xml
deleted file mode 100644
index 0fc1402..0000000
--- a/ec2/deploy.lucid64/root/hadoop-0.20.2/conf/core-site.xml
+++ /dev/null
@@ -1,23 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-  <property>
-    <name>hadoop.tmp.dir</name>
-    <value>/mnt/hdfs</value>
-  </property>
-
-  <property>
-    <name>fs.default.name</name>
-    <value>hdfs://{{active_master}}:9000</value>
-  </property>
-
-  <property>
-    <name>io.file.buffer.size</name>
-    <value>65536</value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.lucid64/root/hadoop-0.20.2/conf/hadoop-env.sh
----------------------------------------------------------------------
diff --git a/ec2/deploy.lucid64/root/hadoop-0.20.2/conf/hadoop-env.sh b/ec2/deploy.lucid64/root/hadoop-0.20.2/conf/hadoop-env.sh
deleted file mode 100644
index bfd431e..0000000
--- a/ec2/deploy.lucid64/root/hadoop-0.20.2/conf/hadoop-env.sh
+++ /dev/null
@@ -1,59 +0,0 @@
-# Set Hadoop-specific environment variables here.
-
-# The only required environment variable is JAVA_HOME.  All others are
-# optional.  When running a distributed configuration it is best to
-# set JAVA_HOME in this file, so that it is correctly defined on
-# remote nodes.
-
-# The java implementation to use.  Required.
-export JAVA_HOME=/usr/lib/jvm/java-6-openjdk
-
-# Extra Java CLASSPATH elements.  Optional.
-# export HADOOP_CLASSPATH=
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-export HADOOP_HEAPSIZE=1500
-
-# Extra Java runtime options.  Empty by default.
-# export HADOOP_OPTS=-server
-export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true"
-
-# Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS"
-export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS"
-export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS"
-export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS"
-export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS"
-# export HADOOP_TASKTRACKER_OPTS=
-# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-# export HADOOP_CLIENT_OPTS
-
-# Extra ssh options.  Empty by default.
-# export HADOOP_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HADOOP_CONF_DIR"
-export HADOOP_SSH_OPTS="-o ConnectTimeout=5"
-
-# Where log files are stored.  $HADOOP_HOME/logs by default.
-# export HADOOP_LOG_DIR=${HADOOP_HOME}/logs
-export HADOOP_LOG_DIR=/mnt/hdfs-logs
-
-# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
-# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
-
-# host:path where hadoop code should be rsync'd from.  Unset by default.
-# export HADOOP_MASTER=master:/home/$USER/src/hadoop
-
-# Seconds to sleep between slave commands.  Unset by default.  This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HADOOP_SLAVE_SLEEP=0.1
-
-# The directory where pid files are stored. /tmp by default.
-# export HADOOP_PID_DIR=/var/hadoop/pids
-
-# A string representing this instance of hadoop. $USER by default.
-# export HADOOP_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes.  See 'man nice'.
-# export HADOOP_NICENESS=10
-
-ulimit -n 16000

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.lucid64/root/hadoop-0.20.2/conf/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ec2/deploy.lucid64/root/hadoop-0.20.2/conf/hdfs-site.xml b/ec2/deploy.lucid64/root/hadoop-0.20.2/conf/hdfs-site.xml
deleted file mode 100644
index 46318c7..0000000
--- a/ec2/deploy.lucid64/root/hadoop-0.20.2/conf/hdfs-site.xml
+++ /dev/null
@@ -1,31 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<configuration>
-
-  <property>
-    <name>dfs.replication</name>
-    <value>3</value>
-  </property>
-
-  <property>
-    <name>dfs.block.size</name>
-    <value>134217728</value>
-  </property>
-
-  <property>
-    <name>dfs.data.dir</name>
-    <value>{{hdfs_data_dirs}}</value>
-  </property>
-
-  <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>25</value>
-  </property>
-
-  <property>
-    <name>dfs.datanode.handler.count</name>
-    <value>8</value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.lucid64/root/hadoop-0.20.2/conf/masters
----------------------------------------------------------------------
diff --git a/ec2/deploy.lucid64/root/hadoop-0.20.2/conf/masters b/ec2/deploy.lucid64/root/hadoop-0.20.2/conf/masters
deleted file mode 100644
index d26a194..0000000
--- a/ec2/deploy.lucid64/root/hadoop-0.20.2/conf/masters
+++ /dev/null
@@ -1 +0,0 @@
-{{active_master}}

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.lucid64/root/hadoop-0.20.2/conf/slaves
----------------------------------------------------------------------
diff --git a/ec2/deploy.lucid64/root/hadoop-0.20.2/conf/slaves b/ec2/deploy.lucid64/root/hadoop-0.20.2/conf/slaves
deleted file mode 100644
index 05f969e..0000000
--- a/ec2/deploy.lucid64/root/hadoop-0.20.2/conf/slaves
+++ /dev/null
@@ -1 +0,0 @@
-{{slave_list}}

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.lucid64/root/mesos-ec2/cluster-url
----------------------------------------------------------------------
diff --git a/ec2/deploy.lucid64/root/mesos-ec2/cluster-url b/ec2/deploy.lucid64/root/mesos-ec2/cluster-url
deleted file mode 100644
index fcf8b41..0000000
--- a/ec2/deploy.lucid64/root/mesos-ec2/cluster-url
+++ /dev/null
@@ -1 +0,0 @@
-{{cluster_url}}

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.lucid64/root/mesos-ec2/copy-dir
----------------------------------------------------------------------
diff --git a/ec2/deploy.lucid64/root/mesos-ec2/copy-dir b/ec2/deploy.lucid64/root/mesos-ec2/copy-dir
deleted file mode 100755
index 02b6e64..0000000
--- a/ec2/deploy.lucid64/root/mesos-ec2/copy-dir
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-
-if [[ "$#" != "1" ]] ; then
-  echo "Usage: copy-dir <dir>"
-  exit 1
-fi
-
-DIR=`readlink -f "$1"`
-DIR=`echo "$DIR"|sed 's@/$@@'`
-DEST=`dirname "$DIR"`
-
-SLAVES=`cat /root/mesos-ec2/slaves`
-
-SSH_OPTS="-o StrictHostKeyChecking=no -o ConnectTimeout=5"
-
-echo "RSYNC'ing $DIR to slaves..."
-for slave in $SLAVES; do
-    echo $slave
-    rsync -e "ssh $SSH_OPTS" -az "$DIR" "$slave:$DEST" & sleep 0.5
-done
-wait

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.lucid64/root/mesos-ec2/hadoop-framework-conf/core-site.xml
----------------------------------------------------------------------
diff --git a/ec2/deploy.lucid64/root/mesos-ec2/hadoop-framework-conf/core-site.xml b/ec2/deploy.lucid64/root/mesos-ec2/hadoop-framework-conf/core-site.xml
deleted file mode 100644
index 818ed10..0000000
--- a/ec2/deploy.lucid64/root/mesos-ec2/hadoop-framework-conf/core-site.xml
+++ /dev/null
@@ -1,23 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-  <property>
-    <name>hadoop.tmp.dir</name>
-    <value>/mnt/hadoop-framework</value>
-  </property>
-
-  <property>
-    <name>fs.default.name</name>
-    <value>hdfs://{{active_master}}:9000</value>
-  </property>
-
-  <property>
-    <name>io.file.buffer.size</name>
-    <value>65536</value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.lucid64/root/mesos-ec2/hadoop-framework-conf/hadoop-env.sh
----------------------------------------------------------------------
diff --git a/ec2/deploy.lucid64/root/mesos-ec2/hadoop-framework-conf/hadoop-env.sh b/ec2/deploy.lucid64/root/mesos-ec2/hadoop-framework-conf/hadoop-env.sh
deleted file mode 100644
index 811f403..0000000
--- a/ec2/deploy.lucid64/root/mesos-ec2/hadoop-framework-conf/hadoop-env.sh
+++ /dev/null
@@ -1,59 +0,0 @@
-# Set Hadoop-specific environment variables here.
-
-# The only required environment variable is JAVA_HOME.  All others are
-# optional.  When running a distributed configuration it is best to
-# set JAVA_HOME in this file, so that it is correctly defined on
-# remote nodes.
-
-# The java implementation to use.  Required.
-export JAVA_HOME=/usr/lib/jvm/java-6-openjdk
-
-# Extra Java CLASSPATH elements.  Optional.
-# export HADOOP_CLASSPATH=
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-export HADOOP_HEAPSIZE=1500
-
-# Extra Java runtime options.  Empty by default.
-# export HADOOP_OPTS=-server
-export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true"
-
-# Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS"
-export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS"
-export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS"
-export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS"
-export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS"
-# export HADOOP_TASKTRACKER_OPTS=
-# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-# export HADOOP_CLIENT_OPTS
-
-# Extra ssh options.  Empty by default.
-# export HADOOP_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HADOOP_CONF_DIR"
-export HADOOP_SSH_OPTS="-o ConnectTimeout=5"
-
-# Where log files are stored.  $HADOOP_HOME/logs by default.
-# export HADOOP_LOG_DIR=${HADOOP_HOME}/logs
-export HADOOP_LOG_DIR=/mnt/hadoop-logs
-
-# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
-# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
-
-# host:path where hadoop code should be rsync'd from.  Unset by default.
-# export HADOOP_MASTER=master:/home/$USER/src/hadoop
-
-# Seconds to sleep between slave commands.  Unset by default.  This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HADOOP_SLAVE_SLEEP=0.1
-
-# The directory where pid files are stored. /tmp by default.
-# export HADOOP_PID_DIR=/var/hadoop/pids
-
-# A string representing this instance of hadoop. $USER by default.
-# export HADOOP_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes.  See 'man nice'.
-# export HADOOP_NICENESS=10
-
-ulimit -n 10000

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.lucid64/root/mesos-ec2/hadoop-framework-conf/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ec2/deploy.lucid64/root/mesos-ec2/hadoop-framework-conf/mapred-site.xml b/ec2/deploy.lucid64/root/mesos-ec2/hadoop-framework-conf/mapred-site.xml
deleted file mode 100644
index 8d6240c..0000000
--- a/ec2/deploy.lucid64/root/mesos-ec2/hadoop-framework-conf/mapred-site.xml
+++ /dev/null
@@ -1,86 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-  <property>
-    <name>mapred.job.tracker</name>
-    <value>{{active_master}}:9001</value>
-  </property>
-
-  <property>
-    <name>mapred.local.dir</name>
-    <value>{{mapred_local_dirs}}</value>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.taskScheduler</name>
-    <value>org.apache.hadoop.mapred.MesosScheduler</value>
-  </property>
-
-  <property>
-    <name>mapred.mesos.master</name>
-    <value>{{cluster_url}}</value>
-  </property>
-
-  <property>
-    <name>io.file.buffer.size</name>
-    <value>65536</value>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.handler.count</name>
-    <value>20</value>
-  </property>
-
-  <property>
-    <name>tasktracker.http.threads</name>
-    <value>50</value>
-  </property>
-
-  <property>
-    <name>mapred.child.java.opts</name>
-    <value>-Xmx1512m</value>
-  </property>
-
-  <property>
-    <name>mapred.job.reuse.jvm.num.tasks</name>
-    <value>-1</value>
-  </property>
-
-  <property>
-    <name>io.sort.factor</name>
-    <value>15</value>
-  </property>
-
-  <property>
-    <name>io.sort.mb</name>
-    <value>100</value>
-  </property>
-
-  <property>
-    <name>mapred.mesos.localitywait</name>
-    <value>5000</value>
-  </property>
-
-  <property>
-    <name>mapred.tasktracker.map.tasks.maximum</name>
-    <value>6</value>
-  </property>
-
-  <property>
-    <name>mapred.tasktracker.reduce.tasks.maximum</name>
-    <value>6</value>
-  </property>
-
-  <property>
-    <name>io.sort.mb</name>
-    <value>200</value>
-    <description>The total amount of buffer memory to use while sorting
-    files, in megabytes.  By default, gives each merge stream 1MB, which
-    should minimize seeks.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.lucid64/root/mesos-ec2/haproxy+apache/haproxy.config.template
----------------------------------------------------------------------
diff --git a/ec2/deploy.lucid64/root/mesos-ec2/haproxy+apache/haproxy.config.template b/ec2/deploy.lucid64/root/mesos-ec2/haproxy+apache/haproxy.config.template
deleted file mode 100644
index 957c3f6..0000000
--- a/ec2/deploy.lucid64/root/mesos-ec2/haproxy+apache/haproxy.config.template
+++ /dev/null
@@ -1,8 +0,0 @@
-listen webfarm {{active_master}}:80
-       timeout server 7500
-       timeout client 7500
-       timeout connect 7500
-       mode http
-       balance roundrobin
-       option httpchk HEAD /index.html HTTP/1.0
-       stats uri /stats

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.lucid64/root/mesos-ec2/masters
----------------------------------------------------------------------
diff --git a/ec2/deploy.lucid64/root/mesos-ec2/masters b/ec2/deploy.lucid64/root/mesos-ec2/masters
deleted file mode 100644
index c531652..0000000
--- a/ec2/deploy.lucid64/root/mesos-ec2/masters
+++ /dev/null
@@ -1 +0,0 @@
-{{master_list}}

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.lucid64/root/mesos-ec2/mesos-daemon
----------------------------------------------------------------------
diff --git a/ec2/deploy.lucid64/root/mesos-ec2/mesos-daemon b/ec2/deploy.lucid64/root/mesos-ec2/mesos-daemon
deleted file mode 100755
index 177265e..0000000
--- a/ec2/deploy.lucid64/root/mesos-ec2/mesos-daemon
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-
-# Set up MESOS_HOME in order to find projd
-export MESOS_HOME=/root/mesos
-
-# Set MESOS_PUBLIC_DNS so slaves can be linked in master web UI
-export MESOS_PUBLIC_DNS=`wget -q -O - http://instance-data.ec2.internal/latest/meta-data/public-hostname`
-
-# Set PATH to include Scala
-export PATH=$PATH:/root/scala-2.8.0.final/bin
-
-# Set HADOOP_HOME variable to allow slaves to get executors from HDFS
-export HADOOP_HOME=/root/hadoop-0.20.2
-
-ulimit -n 8192
-
-PROGRAM=$1
-shift
-
-EXTRA_OPTS=""
-if [ "$PROGRAM" == "mesos-slave" ]; then
-  # Compute CPU resources (if not specified).
-  if [[ "$*" != *--cpus* ]]; then
-    CPUS=`grep processor /proc/cpuinfo | wc -l`
-    EXTRA_OPTS="$EXTRA_OPTS --cpus=$CPUS"
-  fi
-
-  # Compute memory resources (if not specified).
-  if [[ "$*" != *--mem* ]]; then
-    MEM_KB=`cat /proc/meminfo | grep MemTotal | awk '{print $2}'`
-    MEM=$[(MEM_KB - 1024 * 1024) / 1024]
-    EXTRA_OPTS="$EXTRA_OPTS --mem=$MEM"
-  fi
-fi
-
-cd $MESOS_HOME/bin
-nohup ./$PROGRAM --log_dir=/mnt/mesos-logs --work_dir=/mnt/mesos-work $EXTRA_OPTS $@ </dev/null >/mnt/mesos-logs/$PROGRAM.out 2>&1 &

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.lucid64/root/mesos-ec2/redeploy-mesos
----------------------------------------------------------------------
diff --git a/ec2/deploy.lucid64/root/mesos-ec2/redeploy-mesos b/ec2/deploy.lucid64/root/mesos-ec2/redeploy-mesos
deleted file mode 100755
index 941d783..0000000
--- a/ec2/deploy.lucid64/root/mesos-ec2/redeploy-mesos
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-cd /root/mesos-ec2
-
-MASTERS=`cat masters`
-NUM_MASTERS=`cat masters | wc -l`
-SLAVES=`cat slaves`
-
-SSH_OPTS="-o StrictHostKeyChecking=no -o ConnectTimeout=5"
-
-if [[ $NUM_MASTERS -gt 1 ]]; then
-  echo "RSYNC'ing /root/mesos to masters..."
-  for master in $MASTERS; do
-    echo $master
-    rsync -e "ssh $SSH_OPTS" -az --exclude '*.d' --exclude '*.o' --exclude '*.cpp' --exclude '*.hpp' --exclude '*.pyc' --exclude 'mesos/frameworks/hadoop-0.20.0/logs/*' --exclude 'mesos/work' --exclude 'mesos/logs' --exclude 'mesos/test_output' /root/mesos $master:/root & sleep 0.3
-  done
-  wait
-fi
-
-echo "RSYNC'ing /root/mesos to slaves..."
-for slave in $SLAVES; do
-  echo $slave
-  rsync -e "ssh $SSH_OPTS" -az --exclude '*.d' --exclude '*.o' --exclude '*.cpp' --exclude '*.hpp' --exclude '*.pyc' --exclude 'mesos/frameworks/hadoop-0.20.0/logs/*' --exclude 'mesos/work' --exclude 'mesos/logs' --exclude 'mesos/test_output' /root/mesos $slave:/root & sleep 0.3
-done
-wait

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.lucid64/root/mesos-ec2/setup
----------------------------------------------------------------------
diff --git a/ec2/deploy.lucid64/root/mesos-ec2/setup b/ec2/deploy.lucid64/root/mesos-ec2/setup
deleted file mode 100755
index cd11d65..0000000
--- a/ec2/deploy.lucid64/root/mesos-ec2/setup
+++ /dev/null
@@ -1,232 +0,0 @@
-#!/bin/bash
-
-# Make sure we are in the mesos-ec2 directory
-cd /root/mesos-ec2
-
-# Set hostname based on EC2 private DNS name, so that it is set correctly
-# even if the instance is restarted with a different private DNS name
-PRIVATE_DNS=`wget -q -O - http://instance-data.ec2.internal/latest/meta-data/local-hostname`
-hostname $PRIVATE_DNS
-echo $PRIVATE_DNS > /etc/hostname
-export HOSTNAME=$PRIVATE_DNS  # Fix the bash built-in hostname variable too
-
-echo "Setting up Mesos master on `hostname`..."
-
-# Read command-line arguments
-OS_NAME=$1
-DOWNLOAD_METHOD=$2
-BRANCH=$3
-
-MASTERS_FILE="masters"
-MASTERS=`cat $MASTERS_FILE`
-NUM_MASTERS=`cat $MASTERS_FILE | wc -l`
-OTHER_MASTERS=`cat $MASTERS_FILE | sed '1d'`
-SLAVES=`cat slaves`
-ZOOS=`cat zoo`
-
-if [[ $ZOOS = *NONE* ]]; then
-  NUM_ZOOS=0
-  ZOOS=""
-else
-  NUM_ZOOS=`cat zoo | wc -l`
-fi
-
-# Scripts that get used for/while running Mesos.
-SCRIPTS="copy-dir
-         mesos-daemon
-         redeploy-mesos
-         setup-slave              
-         ssh-no-keychecking
-         start-mesos
-         stop-mesos"
-
-HADOOP_HOME=/root/hadoop-0.20.2
-
-#TODO(*): update config scripts to have conditionals for handling different
-#         platforms
-JAVA_HOME=/usr/lib/jvm/java-6-openjdk
-#JAVA_HOME=/usr/lib/jvm/java-6-sun #works for karmic, this is lucid
-
-SSH_OPTS="-o StrictHostKeyChecking=no -o ConnectTimeout=5"
-
-if [[ `tty` == "not a tty" ]] ; then
-    echo "Expecting a tty or pty! (use the ssh -t option)."
-    exit 1
-fi
-
-echo "Setting executable permissions on scripts..."
-for s in $SCRIPTS; do chmod u+x $s; done
-
-echo "Running setup-slave on master to mount filesystems, etc..."
-./setup-slave
-
-echo "SSH'ing to master machine(s) to approve key(s)..."
-for master in $MASTERS; do
-  echo $master
-  ssh $SSH_OPTS $master true &
-  sleep 0.3
-done
-ssh $SSH_OPTS localhost true &
-ssh $SSH_OPTS `hostname` true &
-wait
-
-if [[ $NUM_ZOOS != 0 ]] ; then
-  echo "SSH'ing to ZooKeeper server(s) to approve keys..."
-  zid=1
-  for zoo in $ZOO; do
-    echo $zoo
-    ssh $SSH_OPTS $zoo true \; mkdir -p /tmp/zookeeper \; echo $zid \> /tmp/zookeeper/myid &
-    zid=$(($zid+1))
-    sleep 0.3
-  done
-fi
-
-# Try to SSH to each cluster node to approve their key. Since some nodes may
-# be slow in starting, we retry failed slaves up to 3 times.
-TODO="$SLAVES $ZOO $OTHER_MASTERS" # List of nodes to try (initially all)
-TRIES="0"                          # Number of times we've tried so far
-echo "SSH'ing to other cluster nodes to approve keys..."
-while [ "e$TODO" != "e" ] && [ $TRIES -lt 4 ] ; do
-  NEW_TODO=
-  for slave in $TODO; do
-    echo $slave
-    ssh $SSH_OPTS $slave true
-    if [ $? != 0 ] ; then
-        NEW_TODO="$NEW_TODO $slave"
-    fi
-  done
-  TRIES=$[$TRIES + 1]
-  if [ "e$NEW_TODO" != "e" ] && [ $TRIES -lt 4 ] ; then
-      sleep 15
-      TODO="$NEW_TODO"
-      echo "Re-attempting SSH to cluster nodes to approve keys..."
-  else
-      break;
-  fi
-done
-
-echo "RSYNC'ing /root/mesos-ec2 to other cluster nodes..."
-for node in $SLAVES $ZOO $OTHER_MASTERS; do
-  echo $node
-  rsync -e "ssh $SSH_OPTS" -az /root/mesos-ec2 $node:/root &
-  scp $SSH_OPTS ~/.ssh/id_rsa $node:.ssh &
-  sleep 0.3
-done
-wait
-
-echo "Running slave setup script on other cluster nodes..."
-for node in $SLAVES $ZOO $OTHER_MASTERS; do
-  echo $node
-  ssh -t $SSH_OPTS root@$node "mesos-ec2/setup-slave" & sleep 0.3
-done
-wait
-
-echo "RSYNC'ing HDFS config files to other cluster nodes..."
-for node in $SLAVES $ZOO $OTHER_MASTERS; do
-  echo $node
-  rsync -e "ssh $SSH_OPTS" -az $HADOOP_HOME/conf $node:$HADOOP_HOME &
-  sleep 0.3
-done
-wait
-
-DOWNLOADED=0
-
-if [[ "$DOWNLOAD_METHOD" == "git" ]] ; then
-  # change git's ssh command so it does not ask to accept a keys
-  export GIT_SSH=/root/mesos-ec2/ssh-no-keychecking
-  REPOSITORY=git://github.com/mesos/mesos.git
-  echo "Checking out Mesos from $REPOSITORY"
-  pushd /root > /dev/null 2>&1
-  rm -rf mesos mesos.tgz
-  # Set git SSH command to a script that uses -o StrictHostKeyChecking=no
-  git clone $REPOSITORY mesos
-  pushd mesos 2>&1
-  git checkout -b $BRANCH --track origin/$BRANCH
-  popd > /dev/null 2>&1
-  popd > /dev/null 2>&1
-  DOWNLOADED=1
-fi
-
-# Build Mesos if we downloaded it
-if [[ "$DOWNLOADED" == "1" ]] ; then
-  echo "Building Mesos..."
-  pushd /root/mesos > /dev/null 2>&1
-  ./configure.ubuntu-lucid-64
-  make clean
-  make
-  popd > /dev/null 2>&1
-  if [ -d /root/spark ] ; then
-    echo "Building Spark..."
-    pushd /root/spark > /dev/null 2>&1
-    MESOS_HOME=/root/mesos make all native
-    popd > /dev/null 2>&1
-  fi
-  echo "Building Hadoop framework..."
-  pushd /root/mesos/frameworks/hadoop-0.20.2 > /dev/null 2>&1
-  ant
-  ant examples
-  popd > /dev/null 2>&1
-fi
-
-echo "Setting up Hadoop framework config files..."
-cp hadoop-framework-conf/* /root/mesos/frameworks/hadoop-0.20.2/conf
-
-echo "Setting up haproxy+apache framework config files..."
-cp haproxy+apache/* /root/mesos/frameworks/haproxy+apache
-
-echo "Setting up Spark config files..."
-# TODO: This currently overwrites whatever the user wrote there; on
-# the other hand, we also don't want to leave an old file created by
-# us because it would have the wrong hostname for HDFS etc
-mkdir -p /root/spark/conf
-echo "-Dspark.dfs=hdfs://$HOSTNAME:9000 -Dspark.repl.classdir=/nfs" \
-     > /root/spark/conf/java-opts
-
-echo "Redeploying /root/mesos..."
-./redeploy-mesos
-
-echo "Setting up NFS..."
-if [ ! -e /nfs ] ; then
-  mkdir -p /mnt/nfs
-  rm -fr /nfs
-  ln -s /mnt/nfs /nfs
-fi
-if ! grep -e '^/nfs ' /etc/exports; then
-  echo "/nfs    10.0.0.0/8(ro,async,no_subtree_check)" >> /etc/exports
-fi
-# Unexport and re-export everything in /etc/exports because, if we are
-# restarting a stopped EC2 instance, we might have had an entry for /nfs in
-# /etc/exports before we created /mnt/nfs.
-exportfs -ua
-exportfs -a
-
-echo "Mounting NFS on slaves..."
-for slave in $SLAVES; do
-  echo $slave
-  ssh -t $SSH_OPTS root@$slave "mkdir -p /nfs; mount $HOSTNAME:/nfs /nfs" & sleep 0.3
-done
-wait
-
-echo "Formatting HDFS namenode..."
-$HADOOP_HOME/bin/hadoop namenode -format
-
-echo "Starting HDFS..."
-$HADOOP_HOME/bin/start-dfs.sh
-
-sleep 1
-
-if [[ $NUM_ZOOS != 0 ]]; then
-  echo "Starting ZooKeeper quorum..."
-  for zoo in $ZOOS; do
-    ssh $SSH_OPTS $zoo "/root/mesos/third_party/zookeeper-*/bin/zkServer.sh start </dev/null >/dev/null" & sleep 0.1
-  done
-  wait
-  sleep 5
-fi
-
-echo "Stopping any existing Mesos cluster..."
-./stop-mesos
-sleep 2
-
-echo "Starting Mesos cluster..."
-./start-mesos

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.lucid64/root/mesos-ec2/setup-slave
----------------------------------------------------------------------
diff --git a/ec2/deploy.lucid64/root/mesos-ec2/setup-slave b/ec2/deploy.lucid64/root/mesos-ec2/setup-slave
deleted file mode 100755
index 5192193..0000000
--- a/ec2/deploy.lucid64/root/mesos-ec2/setup-slave
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/bin/bash
-
-# Make sure we are in the mesos-ec2 directory
-cd /root/mesos-ec2
-
-# Set hostname based on EC2 private DNS name, so that it is set correctly
-# even if the instance is restarted with a different private DNS name
-PRIVATE_DNS=`wget -q -O - http://instance-data.ec2.internal/latest/meta-data/local-hostname`
-hostname $PRIVATE_DNS
-echo $PRIVATE_DNS > /etc/hostname
-HOSTNAME=$PRIVATE_DNS  # Fix the bash built-in hostname variable too
-
-echo "Setting up Mesos slave on `hostname`..."
-
-# Mount options to use for ext3 and xfs disks (the ephemeral disks
-# are ext3, but we use xfs for EBS volumes to format them faster)
-EXT3_MOUNT_OPTS="defaults,noatime,nodiratime"
-XFS_MOUNT_OPTS="defaults,noatime,nodiratime,allocsize=8m"
-
-# Create Hadoop and HDFS directories in a given parent directory
-# (for example /mnt, /mnt2, and so on)
-function create_hadoop_dirs {
-  location=$1
-  mkdir -p $location/hdfs/dfs $location/hadoop/tmp
-  mkdir -p $location/hadoop/mrlocal $location/hadoop/mrlocal2
-}
-
-# Set up Hadoop and Mesos directories in /mnt
-create_hadoop_dirs /mnt
-mkdir -p /mnt/hdfs-logs
-mkdir -p /mnt/hadoop-logs
-mkdir -p /mnt/mesos-logs
-mkdir -p /mnt/mesos-work
-
-# Mount any ephemeral volumes we might have beyond /mnt
-function setup_extra_volume {
-  device=$1
-  mount_point=$2
-  if [[ -e $device && ! -e $mount_point ]]; then
-    mkdir -p $mount_point
-    mount -o $EXT3_MOUNT_OPTS $device $mount_point
-    echo "$device $mount_point auto $EXT3_MOUNT_OPTS 0 0" >> /etc/fstab
-  fi
-  if [[ -e $mount_point ]]; then
-    create_hadoop_dirs $mount_point
-  fi
-}
-setup_extra_volume /dev/sdc /mnt2
-setup_extra_volume /dev/sdd /mnt3
-setup_extra_volume /dev/sde /mnt4
-
-# Mount cgroup file system
-if [[ ! -e /cgroup ]]; then
-  mkdir -p /cgroup
-  mount -t cgroup none /cgroup
-  echo "none /cgroup cgroup defaults 0 0" >> /etc/fstab
-fi
-
-# Format and mount EBS volume (/dev/sdv) as /vol if the device exists
-# and we have not already created /vol
-if [[ -e /dev/sdv && ! -e /vol ]]; then
-  mkdir /vol
-  if mkfs.xfs -q /dev/sdv; then
-    mount -o $XFS_MOUNT_OPTS /dev/sdv /vol
-    echo "/dev/sdv /vol xfs $XFS_MOUNT_OPTS 0 0" >> /etc/fstab
-  else
-    # mkfs.xfs is not installed on this machine or has failed;
-    # delete /vol so that the user doesn't think we successfully
-    # mounted the EBS volume
-    rmdir /vol
-  fi
-fi
-
-# Remove ~/.ssh/known_hosts because it gets polluted as you start/stop many
-# clusters (new machines tend to come up under old hostnames)
-rm -f /root/.ssh/known_hosts
-
-./approve-master-key

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.lucid64/root/mesos-ec2/setup-torque
----------------------------------------------------------------------
diff --git a/ec2/deploy.lucid64/root/mesos-ec2/setup-torque b/ec2/deploy.lucid64/root/mesos-ec2/setup-torque
deleted file mode 100755
index 2ac8fd3..0000000
--- a/ec2/deploy.lucid64/root/mesos-ec2/setup-torque
+++ /dev/null
@@ -1,110 +0,0 @@
-#!/bin/bash
-
-cd /root/mesos-ec2
-
-MASTERS=`cat master`
-SLAVES_FILE="slaves"
-SLAVES=`cat $SLAVES_FILE`
-
-SCHEDULER_ITERATION=5
-
-#These seem to be broken, i.e. missing directories after install
-#ssh $MASTERS "apt-get install -y torque-server"
-#ssh $MASTERS "apt-get install -y torque-scheduler"
-#ssh $MASTERS "apt-get install -y torque-client"
-
-#install torque: download/unzip torque
-function installtorque {
-	pushd ~
-	echo "downloading and installing torque on master"
-	#wget http://www.clusterresources.com/downloads/torque/torque-2.4.7.tar.gz
-	rm -rf torque-2.4.7.tar.gz
-	wget http://mesos.berkeley.edu/torque-2.4.7.tar.gz
-	tar xzf torque-2.4.7.tar.gz
-	pushd torque-2.4.7
-	./configure --prefix=/usr
-	make -j8
-	make install
-	popd;popd
-}
-
-function setuptorque {
-	pushd ~/torque-2.4.7
-	echo "running ldconfig on master"
-	ldconfig
-        #./torque.setup root # Note: sets some defaults for batch queue
-	qterm
-        yes|./torque.setup root localhost # Note: sets some defaults for batch queue
-
-	#WARNING: allow root to qsub for debug purposes only, may be dangerous
-	qmgr -c 'set server acl_roots+=root@*' #allow root to submit jobs
-	qmgr -c "set server scheduler_iteration=$SCHEDULER_ITERATION"
-	#qmgr -c 's s allow_node_submit=true' #other hosts can submit too
-
-	NUM_SLAVES=`cat ~/mesos-ec2/slaves|wc -l`
-	#the server be restarted after this
-	qmgr -c "set queue batch resources_available.nodect=$NUM_SLAVES"
-	#qmgr -c "set server resources_available.nodect=$NUM_SLAVES"
-	qterm
-        pbs_server
-
-	touch ~/.rhosts
-	echo `hostname` |cat >> ~/.rhosts
-	echo `hostname -f` |cat >> ~/.rhosts
-	echo localhost |cat >> ~/.rhosts
-
-	popd
-}
-
-
-function installslaves {
-	pushd ~/torque-2.4.7
-	echo "building packages for slave"
-	make packages
-	#install torque-mom on slave nodes
-	apt-get install -y dsh
-	
-        echo "copying slave install packages to nfs"
-	mkdir /nfs/torque
-	cp torque-package-mom-linux-x86_64.sh /nfs/torque/torque-package-mom-linux-x86_64.sh
-	cp torque-package-mom-linux-x86_64.sh /nfs/torque/torque-package-clients-linux-x86_64.sh
-
-	echo "installing torque mom and clients package on slaves"
-	for i in `cat $SLAVES_FILE`; do ssh $i /nfs/torque/torque-package-mom-linux-x86_64.sh --install; ldconfig; done
-	for i in `cat $SLAVES_FILE`; do ssh $i /nfs/torque/torque-package-clients-linux-x86_64.sh --install; ldconfig; done
-
-	echo "Running ldconfig on slaves"
-	dsh -f $SLAVES_FILE ldconfig
-	popd
-}
-
-function installmpi {
-        #setup mpich2 on all of the cluster nodes
-        ./setup-mpi
-
-        #setup prologue script
-        cp ./prologue.setup-mpi-master /var/spool/torque/mom_priv/prologue
-        cp ./epilogue.kill-mpi-ring /var/spool/torque/mom_priv/epilogue
-        
-	for i in `cat $SLAVES_FILE`; do scp ./prologue.setup-mpi-master $i:/var/spool/torque/mom_priv/prologue; done
-	for i in `cat $SLAVES_FILE`; do scp ./epilogue.kill-mpi-ring $i:/var/spool/torque/mom_priv/epilogue; done
-}
-
-function installmaui {
-	pushd ~
-	#http://www.clusterresources.com/download/maui/maui-3.3.tar.gz
-	rm -rf mesos-maui-3.3.tar
-	wget http://mesos.berkeley.edu/mesos-maui-3.3.tar
-	tar -xf mesos-maui-3.3.tar
-	pushd maui-3.3
-	./configure
-	make
-	make install
-	/usr/local/maui/sbin/maui
-}
-
-installtorque
-setuptorque
-installslaves
-installmpi
-installmaui

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.lucid64/root/mesos-ec2/slaves
----------------------------------------------------------------------
diff --git a/ec2/deploy.lucid64/root/mesos-ec2/slaves b/ec2/deploy.lucid64/root/mesos-ec2/slaves
deleted file mode 100644
index 05f969e..0000000
--- a/ec2/deploy.lucid64/root/mesos-ec2/slaves
+++ /dev/null
@@ -1 +0,0 @@
-{{slave_list}}

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.lucid64/root/mesos-ec2/ssh-no-keychecking
----------------------------------------------------------------------
diff --git a/ec2/deploy.lucid64/root/mesos-ec2/ssh-no-keychecking b/ec2/deploy.lucid64/root/mesos-ec2/ssh-no-keychecking
deleted file mode 100755
index 3daf46f..0000000
--- a/ec2/deploy.lucid64/root/mesos-ec2/ssh-no-keychecking
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/sh
-
-# Utility script that exec's SSH without key checking so that we can check
-# out code from GitHub without prompting the user.
-
-exec ssh -o StrictHostKeyChecking=no $@

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.lucid64/root/mesos-ec2/start-mesos
----------------------------------------------------------------------
diff --git a/ec2/deploy.lucid64/root/mesos-ec2/start-mesos b/ec2/deploy.lucid64/root/mesos-ec2/start-mesos
deleted file mode 100755
index cc309cc..0000000
--- a/ec2/deploy.lucid64/root/mesos-ec2/start-mesos
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/bin/bash
-cd /root/mesos-ec2
-
-MASTERS=`cat masters`
-ACTIVE_MASTER=`cat masters | head -1`
-SLAVES=`cat slaves`
-ZOOS=`cat zoo`
-
-
-if [[ $ZOOS = *NONE* ]]; then
-  NUM_ZOOS=0
-else
-  NUM_ZOOS=`cat zoo | wc -l`
-fi
-
-SSH_OPTS="-o StrictHostKeyChecking=no -o ConnectTimeout=5"
-
-cluster_url=`cat cluster-url`
-
-echo "Running with cluster URL: "$cluster_url
-
-if [[ $NUM_ZOOS != 0 ]]; then
-  masterid=1
-  for master in $MASTERS; do
-    echo "Starting master $masterid on $master"
-    ssh $SSH_OPTS $master "/root/mesos-ec2/mesos-daemon mesos-master -p 5050 -u $cluster_url $@ </dev/null >/dev/null" & sleep 0.3
-    masterid=$(($masterid+1))
-  done
-  wait
-else
-  echo "Starting master on $ACTIVE_MASTER"
-  ssh $SSH_OPTS $ACTIVE_MASTER "/root/mesos-ec2/mesos-daemon mesos-master -p 5050 $@ </dev/null >/dev/null"
-fi
-
-sleep 5
-
-for slave in $SLAVES; do
-  echo "Starting slave on $slave"
-  ssh $SSH_OPTS $slave "/root/mesos-ec2/mesos-daemon mesos-slave -u ${cluster_url} </dev/null >/dev/null" &
-  sleep 0.3
-done
-wait
-
-if [[ $NUM_ZOOS != 0 ]]; then
-  echo "ZooKeeper is running at"
-  for zoo in $ZOOS; do
-    echo "      $zoo:2181"
-  done
-fi
-
-echo "Everything's started! You can view the master Web UI at"
-for master in $MASTERS; do
-  echo "      http://$master:8080"
-done

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.lucid64/root/mesos-ec2/stop-mesos
----------------------------------------------------------------------
diff --git a/ec2/deploy.lucid64/root/mesos-ec2/stop-mesos b/ec2/deploy.lucid64/root/mesos-ec2/stop-mesos
deleted file mode 100755
index 9fdb875..0000000
--- a/ec2/deploy.lucid64/root/mesos-ec2/stop-mesos
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-cd /root/mesos-ec2
-
-MASTERS=`cat masters`
-SLAVES=`cat slaves`
-
-SSH_OPTS="-o StrictHostKeyChecking=no -o ConnectTimeout=5"
-
-for slave in $SLAVES; do
-  echo "Stopping slave on $slave"
-  ssh $SSH_OPTS $slave pkill mesos-slave &
-  sleep 0.1
-done
-wait
-
-for master in $MASTERS; do
-  echo "Stopping master on $master"
-  ssh $SSH_OPTS $master pkill mesos-master &
-  sleep 0.1
-done
-wait

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/deploy.lucid64/root/mesos-ec2/zoo
----------------------------------------------------------------------
diff --git a/ec2/deploy.lucid64/root/mesos-ec2/zoo b/ec2/deploy.lucid64/root/mesos-ec2/zoo
deleted file mode 100644
index efc961b..0000000
--- a/ec2/deploy.lucid64/root/mesos-ec2/zoo
+++ /dev/null
@@ -1 +0,0 @@
-{{zoo_list}}

http://git-wip-us.apache.org/repos/asf/mesos/blob/8ca2934e/ec2/mesos-ec2
----------------------------------------------------------------------
diff --git a/ec2/mesos-ec2 b/ec2/mesos-ec2
deleted file mode 100755
index e3c0c14..0000000
--- a/ec2/mesos-ec2
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/sh
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#     http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-cd "`dirname $0`"
-python ./mesos_ec2.py $@