You are viewing a plain text version of this content. The canonical link for it is here.
Posted to olio-commits@incubator.apache.org by ws...@apache.org on 2008/10/20 17:39:20 UTC

svn commit: r706345 [1/5] - in /incubator/olio/workload/rails: ./ trunk/ trunk/bin/ trunk/config/ trunk/config/security/ trunk/deploy/ trunk/lib/ trunk/mysql-connector-java-5.0.6/ trunk/mysql-connector-java-5.0.6/debug/ trunk/mysql-connector-java-5.0.6...

Author: wsobel
Date: Mon Oct 20 10:39:16 2008
New Revision: 706345

URL: http://svn.apache.org/viewvc?rev=706345&view=rev
Log:
Added rails workload generator

Added:
    incubator/olio/workload/rails/
    incubator/olio/workload/rails/trunk/
    incubator/olio/workload/rails/trunk/.gitignore
    incubator/olio/workload/rails/trunk/README
    incubator/olio/workload/rails/trunk/Web2.0Driver.iml
    incubator/olio/workload/rails/trunk/bin/
    incubator/olio/workload/rails/trunk/bin/dbloader.sh   (with props)
    incubator/olio/workload/rails/trunk/bin/drop.sql   (with props)
    incubator/olio/workload/rails/trunk/bin/fileloader.rb   (with props)
    incubator/olio/workload/rails/trunk/bin/fileloader.sh   (with props)
    incubator/olio/workload/rails/trunk/bin/loader.pl   (with props)
    incubator/olio/workload/rails/trunk/bin/schema.sql
    incubator/olio/workload/rails/trunk/build.properties.template
    incubator/olio/workload/rails/trunk/build.xml
    incubator/olio/workload/rails/trunk/config/
    incubator/olio/workload/rails/trunk/config/.run.properties.swp   (with props)
    incubator/olio/workload/rails/trunk/config/.runconfig.xml.swp   (with props)
    incubator/olio/workload/rails/trunk/config/logging.properties
    incubator/olio/workload/rails/trunk/config/run.xml
    incubator/olio/workload/rails/trunk/config/security/
    incubator/olio/workload/rails/trunk/config/security/driver.policy
    incubator/olio/workload/rails/trunk/deploy/
    incubator/olio/workload/rails/trunk/deploy/benchmark.xml
    incubator/olio/workload/rails/trunk/deploy/config.xhtml
    incubator/olio/workload/rails/trunk/deploy/run.xml
    incubator/olio/workload/rails/trunk/deploy/run2.xml
    incubator/olio/workload/rails/trunk/lib/
    incubator/olio/workload/rails/trunk/lib/java_memcached-release_1.5.1.jar   (with props)
    incubator/olio/workload/rails/trunk/lib/mysql-connector-java-5.0.6-bin.jar   (with props)
    incubator/olio/workload/rails/trunk/mysql-connector-java-5.0.6/
    incubator/olio/workload/rails/trunk/mysql-connector-java-5.0.6/debug/
    incubator/olio/workload/rails/trunk/mysql-connector-java-5.0.6/debug/mysql-connector-java-5.0.6-bin-g.jar   (with props)
    incubator/olio/workload/rails/trunk/mysql-connector-java-5.0.6/src/
    incubator/olio/workload/rails/trunk/mysql-connector-java-5.0.6/src/lib/
    incubator/olio/workload/rails/trunk/mysql-connector-java-5.0.6/src/lib/ant-contrib.jar   (with props)
    incubator/olio/workload/rails/trunk/mysql-connector-java-5.0.6/src/lib/aspectjrt.jar   (with props)
    incubator/olio/workload/rails/trunk/mysql-connector-java-5.0.6/src/lib/aspectjtools.jar   (with props)
    incubator/olio/workload/rails/trunk/mysql-connector-java-5.0.6/src/lib/c3p0-0.9.1-pre6.jar   (with props)
    incubator/olio/workload/rails/trunk/mysql-connector-java-5.0.6/src/lib/commons-logging.jar   (with props)
    incubator/olio/workload/rails/trunk/mysql-connector-java-5.0.6/src/lib/jboss-common-jdbc-wrapper.jar   (with props)
    incubator/olio/workload/rails/trunk/mysql-connector-java-5.0.6/src/lib/jdbc2_0-stdext.jar   (with props)
    incubator/olio/workload/rails/trunk/mysql-connector-java-5.0.6/src/lib/jta-spec1_0_1.jar   (with props)
    incubator/olio/workload/rails/trunk/mysql-connector-java-5.0.6/src/lib/junit.jar   (with props)
    incubator/olio/workload/rails/trunk/mysql-connector-java-5.0.6/src/lib/log4j-1.2.9.jar   (with props)
    incubator/olio/workload/rails/trunk/mysql-connector-java-5.0.6/src/testsuite/
    incubator/olio/workload/rails/trunk/mysql-connector-java-5.0.6/src/testsuite/simple/
    incubator/olio/workload/rails/trunk/mysql-connector-java-5.0.6/src/testsuite/simple/tb2-data.txt.gz   (with props)
    incubator/olio/workload/rails/trunk/mysql-connector-java-5.0.6/src/testsuite/ssl-test-certs/
    incubator/olio/workload/rails/trunk/mysql-connector-java-5.0.6/src/testsuite/ssl-test-certs/test-cert-store   (with props)
    incubator/olio/workload/rails/trunk/nbproject/
    incubator/olio/workload/rails/trunk/nbproject/.cvsignore
    incubator/olio/workload/rails/trunk/nbproject/project.xml
    incubator/olio/workload/rails/trunk/resources/
    incubator/olio/workload/rails/trunk/resources/event.jpg   (with props)
    incubator/olio/workload/rails/trunk/resources/event.pdf
    incubator/olio/workload/rails/trunk/resources/event_thumb.jpg   (with props)
    incubator/olio/workload/rails/trunk/resources/person.jpg   (with props)
    incubator/olio/workload/rails/trunk/resources/person_thumb.jpg   (with props)
    incubator/olio/workload/rails/trunk/sbin/
    incubator/olio/workload/rails/trunk/sbin/agent.sh   (with props)
    incubator/olio/workload/rails/trunk/sbin/master.sh   (with props)
    incubator/olio/workload/rails/trunk/sbin/multi.sh   (with props)
    incubator/olio/workload/rails/trunk/sbin/registry.sh   (with props)
    incubator/olio/workload/rails/trunk/sbin/setenv.sh   (with props)
    incubator/olio/workload/rails/trunk/src/
    incubator/olio/workload/rails/trunk/src/com/
    incubator/olio/workload/rails/trunk/src/com/sun/
    incubator/olio/workload/rails/trunk/src/com/sun/web20/
    incubator/olio/workload/rails/trunk/src/com/sun/web20/driver/
    incubator/olio/workload/rails/trunk/src/com/sun/web20/driver/UIDriver.java
    incubator/olio/workload/rails/trunk/src/com/sun/web20/driver/Web20Driver.java
    incubator/olio/workload/rails/trunk/src/com/sun/web20/fsloader/
    incubator/olio/workload/rails/trunk/src/com/sun/web20/fsloader/FileLoader.java
    incubator/olio/workload/rails/trunk/src/com/sun/web20/harness/
    incubator/olio/workload/rails/trunk/src/com/sun/web20/harness/Web20Benchmark.java
    incubator/olio/workload/rails/trunk/src/com/sun/web20/loader/
    incubator/olio/workload/rails/trunk/src/com/sun/web20/loader/Address.java
    incubator/olio/workload/rails/trunk/src/com/sun/web20/loader/Attendees.java
    incubator/olio/workload/rails/trunk/src/com/sun/web20/loader/Comments.java
    incubator/olio/workload/rails/trunk/src/com/sun/web20/loader/Documents.java
    incubator/olio/workload/rails/trunk/src/com/sun/web20/loader/EventTag.java
    incubator/olio/workload/rails/trunk/src/com/sun/web20/loader/Friends.java
    incubator/olio/workload/rails/trunk/src/com/sun/web20/loader/Images.java
    incubator/olio/workload/rails/trunk/src/com/sun/web20/loader/LoadController.java
    incubator/olio/workload/rails/trunk/src/com/sun/web20/loader/Person.java
    incubator/olio/workload/rails/trunk/src/com/sun/web20/loader/SocialEvent.java
    incubator/olio/workload/rails/trunk/src/com/sun/web20/loader/Tag.java
    incubator/olio/workload/rails/trunk/src/com/sun/web20/loader/framework/
    incubator/olio/workload/rails/trunk/src/com/sun/web20/loader/framework/Loadable.java
    incubator/olio/workload/rails/trunk/src/com/sun/web20/loader/framework/Loader.java
    incubator/olio/workload/rails/trunk/src/com/sun/web20/loader/framework/ThreadConnection.java
    incubator/olio/workload/rails/trunk/src/com/sun/web20/loader/framework/ThreadResource.java
    incubator/olio/workload/rails/trunk/src/com/sun/web20/util/
    incubator/olio/workload/rails/trunk/src/com/sun/web20/util/GrowthTest.java
    incubator/olio/workload/rails/trunk/src/com/sun/web20/util/MemCacheUtility.java
    incubator/olio/workload/rails/trunk/src/com/sun/web20/util/RandomUtil.java
    incubator/olio/workload/rails/trunk/src/com/sun/web20/util/ScaleFactors.java
    incubator/olio/workload/rails/trunk/src/com/sun/web20/util/Scramble.java
    incubator/olio/workload/rails/trunk/src/com/sun/web20/util/UserName.java
    incubator/olio/workload/rails/trunk/tmp/
    incubator/olio/workload/rails/trunk/tmp/MemCacheUtility.java
    incubator/olio/workload/rails/trunk/tmp/Web20Benchmark.java
    incubator/olio/workload/rails/trunk/tmp/config.xhtml
    incubator/olio/workload/rails/trunk/tmp/run.xml
    incubator/olio/workload/rails/trunk/tmp/truncate_errorlog.sh

Added: incubator/olio/workload/rails/trunk/.gitignore
URL: http://svn.apache.org/viewvc/incubator/olio/workload/rails/trunk/.gitignore?rev=706345&view=auto
==============================================================================
--- incubator/olio/workload/rails/trunk/.gitignore (added)
+++ incubator/olio/workload/rails/trunk/.gitignore Mon Oct 20 10:39:16 2008
@@ -0,0 +1,2 @@
+build/*
+build.properties
\ No newline at end of file

Added: incubator/olio/workload/rails/trunk/README
URL: http://svn.apache.org/viewvc/incubator/olio/workload/rails/trunk/README?rev=706345&view=auto
==============================================================================
--- incubator/olio/workload/rails/trunk/README (added)
+++ incubator/olio/workload/rails/trunk/README Mon Oct 20 10:39:16 2008
@@ -0,0 +1,13 @@
+                    Web2.0 Sample README
+
+The Web2.0 sample provides a very simple example for organizing a Faban
+benchmark project as well as how to write a new benchmark. Many components
+are generic to most Faban projects. It is a good starting point to copy the
+directory from this sample to create your new benchmark. Many components
+will work with no or little modifications.
+
+Notes:
+- The sbin directory contains utilities to run a benchmark outside
+  the Faban harness. This will not be packaged into the deployment
+  jar files. Benchmark-specific scripts and binaries should be places
+  into the bin directory.
\ No newline at end of file

Added: incubator/olio/workload/rails/trunk/Web2.0Driver.iml
URL: http://svn.apache.org/viewvc/incubator/olio/workload/rails/trunk/Web2.0Driver.iml?rev=706345&view=auto
==============================================================================
--- incubator/olio/workload/rails/trunk/Web2.0Driver.iml (added)
+++ incubator/olio/workload/rails/trunk/Web2.0Driver.iml Mon Oct 20 10:39:16 2008
@@ -0,0 +1,35 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<module version="4" relativePaths="true" type="JAVA_MODULE">
+  <component name="ModuleRootManager" />
+  <component name="NewModuleRootManager">
+    <output url="file://$MODULE_DIR$/build/classes" />
+    <exclude-output />
+    <exclude-exploded />
+    <content url="file://$MODULE_DIR$">
+      <sourceFolder url="file://$MODULE_DIR$/src" isTestSource="false" />
+    </content>
+    <orderEntry type="inheritedJdk" />
+    <orderEntry type="sourceFolder" forTests="false" />
+    <orderEntry type="module-library">
+      <library>
+        <CLASSES>
+          <root url="jar://$MODULE_DIR$/lib/java_memcached-release_1.5.1.jar!/" />
+        </CLASSES>
+        <JAVADOC />
+        <SOURCES />
+      </library>
+    </orderEntry>
+    <orderEntry type="library" name="Faban" level="application" />
+    <orderEntry type="module-library">
+      <library>
+        <CLASSES>
+          <root url="jar://$MODULE_DIR$/lib/mysql-connector-java-5.0.6-bin.jar!/" />
+        </CLASSES>
+        <JAVADOC />
+        <SOURCES />
+      </library>
+    </orderEntry>
+    <orderEntryProperties />
+  </component>
+</module>
+

Added: incubator/olio/workload/rails/trunk/bin/dbloader.sh
URL: http://svn.apache.org/viewvc/incubator/olio/workload/rails/trunk/bin/dbloader.sh?rev=706345&view=auto
==============================================================================
--- incubator/olio/workload/rails/trunk/bin/dbloader.sh (added)
+++ incubator/olio/workload/rails/trunk/bin/dbloader.sh Mon Oct 20 10:39:16 2008
@@ -0,0 +1,43 @@
+#!/bin/sh
+#Script to run loader by hand
+
+#Edit above lines if required
+if [ -z "$2" ] ; then
+    echo "Usage: $0 [dbserver] [concurrent users]" >&2
+    exit 1
+fi
+
+if [ -z "$JAVA_HOME" ] ; then
+    echo "Please set JAVA_HOME and restart command" >&2
+    exit 1
+fi
+
+SCALE=$2
+DB_HOST=$1
+
+BINDIR=`dirname $0`
+
+# This script is in $FABAN_HOME/benchmarks/Web20Driver/bin
+# we need to go up 4 levels to get to $FABAN_HOME.
+if [ -n "$BINDIR" ]
+then
+    FABAN_HOME=`cd $BINDIR/../../.. > /dev/null 2>&1 && pwd`
+    BENCH_HOME=`cd $BINDIR/.. > /dev/null 2>&1 &&pwd`
+    export FABAN_HOME BENCH_HOME
+fi
+
+B=$BENCH_HOME/lib
+L=$FABAN_HOME/lib
+CLASSPATH=$B/mysql-connector-java-5.0.6-bin.jar:$B/Web20Driver.jar:\
+$L/commons-httpclient-2.0.1.jar:$L/fabancommon.jar:$L/commons-logging.jar:\
+$L/fabandriver.jar:$L/fabanagents.jar
+export CLASSPATH
+
+$JAVA_HOME/bin/java -server com.sun.web20.loader.LoadController com.mysql.jdbc.Driver \
+ "jdbc:mysql://$DB_HOST/web20ror?user=web20&password=web20&relaxAutoCommit=true&sessionVariables=FOREIGN_KEY_CHECKS=0" $SCALE
+EXIT_CODE=$?  
+if [ "$EXIT_CODE" = 0 ] ; then
+    echo "Database Load Successful"
+else
+    echo "ERROR: Database loader exited with code ${EXIT_CODE}."
+fi

Propchange: incubator/olio/workload/rails/trunk/bin/dbloader.sh
------------------------------------------------------------------------------
    svn:executable = *

Added: incubator/olio/workload/rails/trunk/bin/drop.sql
URL: http://svn.apache.org/viewvc/incubator/olio/workload/rails/trunk/bin/drop.sql?rev=706345&view=auto
==============================================================================
--- incubator/olio/workload/rails/trunk/bin/drop.sql (added)
+++ incubator/olio/workload/rails/trunk/bin/drop.sql Mon Oct 20 10:39:16 2008
@@ -0,0 +1,12 @@
+drop table users;
+drop table tags;
+drop table taggings;
+drop table schema_migrations;
+drop table invites;
+drop table images;
+drop table geolocations;
+drop table events_users;
+drop table events;
+drop table documents;
+drop table comments;
+drop table addresses;

Propchange: incubator/olio/workload/rails/trunk/bin/drop.sql
------------------------------------------------------------------------------
    svn:executable = *

Added: incubator/olio/workload/rails/trunk/bin/fileloader.rb
URL: http://svn.apache.org/viewvc/incubator/olio/workload/rails/trunk/bin/fileloader.rb?rev=706345&view=auto
==============================================================================
--- incubator/olio/workload/rails/trunk/bin/fileloader.rb (added)
+++ incubator/olio/workload/rails/trunk/bin/fileloader.rb Mon Oct 20 10:39:16 2008
@@ -0,0 +1,47 @@
+#!/opt/coolstack/bin/ruby
+ if ARGV.size != 1 then
+  puts "Usage: fileloader.rb <scale> \nPlease setup the $FABAN_HOME environment variable before running the command.\n"
+  exit 1
+ end
+
+e = 2.7182818
+scale = ARGV[0].to_i
+
+num_users = scale * 4
+e_power = num_users/-10000.0
+chl_prob = (1.0 - e**e_power) / (1.0 + e**e_power)
+num_events = (15000 * chl_prob + 0.5).ceil
+num_events = num_events.ceil
+
+sets_to_load_into_mogile =
+ {
+ "Users" => num_users,
+ "Events" => num_events
+ }
+
+sets_to_load_into_mogile.keys.each { |set|
+ #puts "key is #{set} and value is #{sets_to_load_into_mogile[set]}"
+if set == "Users"  then 
+	puts "---1 Copying for Persons. . ."
+	count = 1
+	puts "range is #{count} to #{num_users}"  
+	num_users.times do
+		puts "/copying for person #{count}\n"
+		exec "/usr/bin/cp $FABAN_HOME/benchmarks/Web20Driver/resources/person.jpg p#{count}.jpg;
+			/usr/bin/cp $FABAN_HOME/benchmarks/Web20Driver/resources/person_thumb.jpg p#{count}t.jpg" if fork.nil?
+		count = count + 1
+	end
+	
+else 
+	puts "---2 Copying for Events. . ."
+	count = 1
+	puts "range is #{count} to #{num_events}"  
+	num_events.times do
+		puts "/copying for event #{count}\n"
+		exec "/usr/bin/cp $FABAN_HOME/benchmarks/Web20Driver/resources/event.jpg e#{count}.jpg;
+			/usr/bin/cp $FABAN_HOME/benchmarks/Web20Driver/resources/event_thumb.jpg e#{count}t.jpg;
+			/usr/bin/cp $FABAN_HOME/benchmarks/Web20Driver/resources/event.pdf e#{count}1.pdf" if fork.nil?
+		count = count + 1
+	end
+end 
+}

Propchange: incubator/olio/workload/rails/trunk/bin/fileloader.rb
------------------------------------------------------------------------------
    svn:executable = *

Added: incubator/olio/workload/rails/trunk/bin/fileloader.sh
URL: http://svn.apache.org/viewvc/incubator/olio/workload/rails/trunk/bin/fileloader.sh?rev=706345&view=auto
==============================================================================
--- incubator/olio/workload/rails/trunk/bin/fileloader.sh (added)
+++ incubator/olio/workload/rails/trunk/bin/fileloader.sh Mon Oct 20 10:39:16 2008
@@ -0,0 +1,41 @@
+#!/bin/sh
+#Script to run loader by hand
+
+if [ -z "$1" ] ; then
+    echo "Usage: $0 [concurrent users]" >&2
+    exit 1
+fi
+
+if [ -z "$JAVA_HOME" ] ; then
+    echo "Please set JAVA_HOME and restart command" >&2
+    exit 1
+fi
+
+SCALE=$1
+
+BINDIR=`dirname $0`
+
+# This script is in $FABAN_HOME/benchmarks/Web20Driver/bin
+# we need to go up 4 levels to get to $FABAN_HOME.
+if [ -n "$BINDIR" ]
+then
+    #FABAN_HOME=`cd $BINDIR/../../.. > /dev/null 2>&1 && pwd`
+    BENCH_HOME=`cd $BINDIR/.. > /dev/null 2>&1 &&pwd`
+    export FABAN_HOME BENCH_HOME
+fi
+
+B=$BENCH_HOME/lib
+L=$FABAN_HOME/lib
+CLASSPATH=$B/Web20Driver.jar:$L/commons-httpclient-2.0.1.jar:\
+$L/fabancommon.jar:$L/commons-logging.jar:$L/fabandriver.jar:$L/fabanagents.jar
+export CLASSPATH
+
+$JAVA_HOME/bin/java -server com.sun.web20.fsloader.FileLoader \
+    $BENCH_HOME/resources $SCALE
+
+EXIT_CODE=$?
+if [ "$EXIT_CODE" = 0 ] ; then
+    echo "File Load Successful"
+else
+    echo "ERROR: File loader exited with code ${EXIT_CODE}."
+fi

Propchange: incubator/olio/workload/rails/trunk/bin/fileloader.sh
------------------------------------------------------------------------------
    svn:executable = *

Added: incubator/olio/workload/rails/trunk/bin/loader.pl
URL: http://svn.apache.org/viewvc/incubator/olio/workload/rails/trunk/bin/loader.pl?rev=706345&view=auto
==============================================================================
--- incubator/olio/workload/rails/trunk/bin/loader.pl (added)
+++ incubator/olio/workload/rails/trunk/bin/loader.pl Mon Oct 20 10:39:16 2008
@@ -0,0 +1,997 @@
+#!/opt/coolstack/bin/perl
+
+use Getopt::Std;
+use POSIX;
+use DBI;
+use File::Copy;
+use MogileFS::Client;
+use Getopt::Long;
+use Pod::Usage qw{ pod2usage };
+use Digest::MD5 qw{ md5_hex };
+use Time::HiRes qw{ gettimeofday tv_interval };
+use LWP::Simple;
+use POSIX qw(:sys_wait_h);
+use Compress::Zlib;
+$| = 1;
+use constant ERR_FATAL => 1;
+
+#----validate cmd line args----#
+%options=();
+# d is the domain name argument, usually sfbay.sun.com
+# s is the scale(active users) argument
+getopts("d:s:",\%options);
+print "Got -d $options{d}\n" if defined $options{d} or die "-d <domainname> is required";
+print "Got -s $options{s}\n" if defined $options{s} or die "-s <scale, or number of active users> is required";
+
+#----PARAMETERS----#
+ my $user = "mogile";
+ my $password = "some_pass";
+ my $db_host = "";
+ my $number_of_active_users = $options{s};
+ my $number_of_users = $number_of_active_users * 4;
+ my $e_power = $number_of_users / -10000.0;
+ my $chl_prob = (1.0 - exp($e_power)) / (1.0 + exp($e_power));
+ # Rounding results by adding .5 and converting to int.
+ my $number_of_events = int(15000 * $chl_prob + .5);
+ #     private static double cumuHalfLogistic(double x, double scale) {
+ #       double power = -x / scale;
+ #       return (1d - Math.exp(power)) / (1d + Math.exp(power));
+ #   }
+ #
+ #           double prob = cumuHalfLogistic(users, 10000);
+ #       // We limit to 5000 tags
+ #       return (int) Math.round(5000 * prob);
+ #my $number_of_events = ceil($number_of_users * 0.07);
+ #$mogile_domain = "sfbay.sun.com";
+ $mogile_domain = $options{d};
+# $mogile_tracker_host = "10.6.141.125:6001";
+ $mogile_tracker_host = "localhost:6001";
+ $mogile_file_class = "Addresses";
+ my $resources_dir = "/export/sw/resources"; #dir. that holds the files to be loaded.
+ my $event_img = "event.jpg";
+ my $event_img_thmb = "event_thumb.jpg";
+ my $event_literature = "event.pdf";
+ my $person_img = "person.jpg";
+ my $person_img_thmb = "person_thumb.jpg";
+
+#----CONSTANTS----#
+ my $sleep_time_after_load = 10;
+ my $person_prefix = "p";
+ my $event_prefix = "e";
+ my $person_thumbnail_suffix = "t";
+ my $event_thumbnail_suffix = "t";
+ my $event_literature_suffix = "l";
+ my $image_extn = "jpg";
+ my $literature_extn = "pdf";
+#----dont change this section----#
+$opts{help} = 0;
+$opts{trackers}=$mogile_tracker_host;
+$opts{domain}=$mogile_domain;
+$opts{class}=$mogile_file_class;
+#$opts{big}="vvv";
+
+#----Connect to db, obtain initial list of----# 
+#----rows in the file_to_replicate table  ----#
+ my $datasource = 'DBI:mysql:mogilefs:$db_host';
+ my $dbh = DBI->connect($datasource, $user, $password)
+                or die "Couldn't connect to database: " . DBI->errstr;
+ print "Connected to Mogile database as $user \n"; 
+ my $sth = $dbh->prepare('SELECT * FROM file_to_replicate')
+                or die "Couldn't prepare statement: " . $dbh->errstr;
+ $sth->execute()
+ 		or die "Couldn't execute statement: " . $sth->errstr;
+ print "Number of pre existing rows = " . $sth->rows . " \n";
+ my $initial_row_count = $sth->rows;
+
+#----Begin loading files----#
+ my $total = 0;
+
+ print "\n\nInserting person data: Main images\n";
+ print "----------------------------------\n";
+for (my $count = 0; $count < $number_of_users; $count ++) 
+  {
+ $key = gen_key($person_prefix, $count, "", $image_extn);
+ print "Inserting $key into Mogile.\n";
+ load($resources_dir . "/" . $person_img, $key);
+
+ print "Finished inserting $key\n";
+ $total ++;
+ }
+
+ print "\n\nInserting person data: Thumbnail images\n";
+ print "---------------------------------------\n";
+for (my $count = 0; $count < $number_of_users; $count ++) 
+  {
+ $key = gen_key($person_prefix, $count, $person_thumbnail_suffix, $image_extn);
+ print "Inserting $key into Mogile.\n";
+ load($resources_dir . "/" . $person_img_thmb, $key);
+
+ print "Finished inserting $key\n";
+ $total ++;
+ }
+
+ print "\n\nInserting event data: Images\n";
+ print "----------------------------\n";
+for (my $count = 0; $count < $number_of_events; $count ++) 
+  {
+ $key = gen_key($event_prefix, $count, "", $image_extn);
+ print "Inserting $key into Mogile.\n";
+ load($resources_dir . "/" . $event_img, $key);
+
+ print "Finished inserting $key\n";
+ $total ++;
+ }
+
+
+ print "\n\nInserting event data: Thumbnail Images\n";
+ print "----------------------------\n";
+for (my $count = 0; $count < $number_of_events; $count ++)
+  {
+ $key = gen_key($event_prefix, $count, $event_thumbnail_suffix, $image_extn);
+ print "Inserting $key into Mogile.\n";
+ load($resources_dir . "/" . $event_img_thmb, $key);
+
+ print "Finished inserting $key\n";
+ $total ++;
+ }
+
+
+
+ print "\n\nInserting event data: Literature\n";
+ print "--------------------------------\n";
+for (my $count = 0; $count < $number_of_events; $count ++) 
+  {
+ $key = gen_key($event_prefix, $count, $event_literature_suffix, $literature_extn);
+ print "Inserting $key into Mogile.\n";
+ load($resources_dir . "/" . $event_literature, $key);
+
+ print "Finished inserting $key\n";
+ $total ++;
+ }
+
+#----Connect to db, obtain final list of----# 
+#----rows in the file_to_replicate table  ----#
+ my $dbh = DBI->connect('DBI:mysql:mogilefs', $user, $password)
+                or die "Couldn't connect to database: " . DBI->errstr;
+ print "Connected to Mogile database as $user \n"; 
+ my $sth = $dbh->prepare('SELECT * FROM file_to_replicate')
+                or die "Couldn't prepare statement: " . $dbh->errstr;
+ $sth->execute()
+ 		or die "Couldn't execute statement: " . $sth->errstr;
+ print "Number of pre existing rows = " . $sth->rows . " \n";
+ my $final_row_count = $sth->rows;
+ print "waiting for replication to complete\n";
+ while ($final_row_count > $initial_row_count)
+  {
+   sleep 2;
+   print ". ";
+ $sth->execute();
+ $final_row_count = $sth->rows;
+  }
+
+  print "\n\nREPLICATION SUCCESSFUL! MOGILE DATA LOADING COMPLETE!\n\n";
+
+
+
+
+#**************HELPER FUNCTIONS****************#
+#**************FROM HERE***********************#
+
+
+
+
+#----To load a file into MogileFS----#
+#----Call method like:
+#    load_file($data_string, $key);
+#--------#
+ sub load_file
+  {
+   if ($#_ < 1) {print "load_file: wrong # of arguments"; return;}
+   my $data_string = $_[0];
+   my $key = $_[1];
+   $mogc = MogileFS::Client->new(domain => $mogile_domain,
+                               hosts  => [$mogile_tracker_host]);
+   $fh = $mogc->new_file($key, $class);
+   print $fh $data_string;
+   unless ($fh->close) {
+    die "Error writing file: " . $mogc->errcode . ": " . $mogc->errstr;
+   }
+   @urls = $mogc->get_paths($key);
+   print "\n urls = " . @urls[0] . "\n";
+
+  }
+
+
+
+
+#----generate the storage key----#
+  sub gen_key()
+  {
+   if ($#_ < 3) {print "gen_key: wrong # of arguments"; return;}
+   my $prefix = $_[0];
+   my $body = $_[1];
+   my $suffix = $_[2];
+   my $extn = $_[3];
+   my $key = $prefix . $body . $suffix . "." . $extn;
+   return $key;
+ }
+
+
+
+
+#----read an event file into a string----#
+  sub read_file()
+  {
+  if ($#_ < 0) {print "read_file: wrong # of arguments"; return;}
+  my $file_name = $_[0];
+  open(IN, "< $file_name");
+  binmode(IN);
+  while (read(IN, $b,1)) {
+   $data = $data . $b;
+   }
+  close(IN);
+  return $data;
+  }
+
+
+
+
+#----test write the data string----#
+  sub test_write()
+  {
+  }
+
+
+
+#----mogtool chunked load----#
+sub load()
+  {
+
+   if ($#_ < 1) {print "load_file: wrong # of arguments"; return;}
+   my $src = $_[0];
+   my $key = $_[1];
+
+
+abortWithUsage() unless
+    GetOptions(
+               # general purpose options
+               'trackers=s'    => \$opts{trackers},
+               'domain=s'      => \$opts{domain},
+               'class=s'       => \$opts{class},
+               'config=s'      => \$opts{conf},
+               'help'          => \$opts{help},
+               'debug'         => \$MogileFS::DEBUG,
+               'lib'           => \$opts{lib},
+
+               # extract+inject options
+               'gzip|z'        => \$opts{gzip},
+               'bigfile|b'     => \$opts{big},
+
+               # inject options
+               'overwrite'     => \$opts{overwrite},
+               'chunksize=s'   => \$opts{chunksize},
+               'receipt=s'     => \$opts{receipt},
+               'reciept=s'     => \$opts{receipt}, # requested :)
+               'verify'        => \$opts{verify},
+               'description=s' => \$opts{des},
+               'concurrent=i'  => \$opts{concurrent},
+
+               # extract options
+               'asfile'        => \$opts{asfile},
+               );
+
+# now load the config file?
+my @confs = ( $opts{conf}, "$ENV{HOME}/.mogtool", "/etc/mogilefs/mogtool.conf" );
+foreach my $conf (@confs) {
+    next unless $conf && -e $conf;
+    open FILE, "<$conf";
+    foreach (<FILE>) {
+        s!#.*!!;
+        next unless m!(\w+)\s*=\s*(.+)!;
+        $opts{$1} = $2;
+    }
+    close FILE;
+}
+
+# now bring in MogileFS, because hopefully we have a lib by now
+if ($opts{lib}) {
+    eval "use lib '$opts{lib}';";
+}
+
+# no trackers and domain..?
+unless ($opts{trackers} && $opts{domain}) {
+    abortWithUsage();
+}
+
+eval qq{
+    use MogileFS::Client; 1
+} or die "Failed to load MogileFS::Client module: $@\n";
+
+# init connection to mogile
+my $mogfs = get_mogfs();
+
+# get our command and pass off to our functions
+#my $cmd = shift;
+my $cmd = "inject";
+inject($src, $key) if $cmd eq 'i' || $cmd eq "inject";
+#extract() if $cmd eq 'x' || $cmd eq "extract";
+#list() if $cmd eq 'ls' || $cmd eq "list";
+#listkey() if $cmd eq 'lsk' || $cmd eq "listkey";
+#mdelete() if $cmd eq 'rm' || $cmd eq "delete";
+return;
+  }
+
+
+sub get_mogfs {
+    my @trackerinput = split(/\s*,\s*/, $opts{trackers});
+    my @trackers;
+    my %pref_ip;
+    foreach my $tracker (@trackerinput) {
+        if ($tracker =~ m!(.+)/(.+):(\d+)!) {
+            $pref_ip{$2} = $1;
+            push @trackers, "$2:$3";
+        } else {
+            push @trackers, $tracker;
+        }
+    }
+
+    my $mogfs = MogileFS::Client->new(
+                              domain => $opts{domain},
+                              hosts  => \@trackers,
+                              )
+            or error("Could not initialize MogileFS", ERR_FATAL);
+    $mogfs->set_pref_ip(\%pref_ip);
+    return $mogfs;
+}
+
+sub error {
+    my $err = shift() || "ERROR: no error message provided!";
+    print STDERR "$err\n";
+
+    if (my $errstr = $mogfs->errstr) {
+        $errstr =~ s/^\s+//;
+        $errstr =~ s/\s+$//;
+        if ($errstr) {
+            print STDERR "MogileFS backend error message: $errstr\n";
+        }
+    }
+
+    if ($@) {
+        my $err = $@;
+        $err =~ s/[\r\n]+$//;
+        print STDERR "System error message: $@\n";
+    }
+
+    # if a second argument, exit
+    if (defined (my $exitcode = shift())) {
+        exit $exitcode+0;
+    }
+}
+
+sub inject {
+   if ($#_ < 1) {print "load_file: wrong # of arguments"; return;}
+   my $src = $_[0];
+   my $key = $_[1];
+
+
+    abortWithUsage() unless $src && $key;
+
+    # make sure the source exists and the key is valid
+    die "Error: source $src doesn't exist.\n"
+        unless -e $src;
+    die "Error: key $key isn't valid; must not contain spaces or commas.\n"
+        unless $key =~ /^[^\s\,]+$/;
+
+    # before we get too far, find sendmail?
+    my $sendmail;
+    if ($opts{receipt}) {
+        $sendmail = `which sendmail` || '/usr/sbin/sendmail';
+        $sendmail =~ s/[\r\n]+$//;
+        unless (-e $sendmail) {
+            die "Error: attempted to find sendmail binary in /usr/sbin but couldn't.\n";
+        }
+    }
+
+    # open up O as the handle to use for reading data
+    my $type = 'unknown';
+    if (-d $src) {
+        my $taropts = ($opts{gzip} ? 'z' : '') . "cf";
+        $type = 'tarball';
+        open (O, '-|', 'tar', $taropts, '-', $src)
+            or die "Couldn't open tar for reading: $!\n";
+    } elsif (-f $src) {
+        $type = 'file';
+        open (O, "<$src")
+            or die "Couldn't open file for reading: $!\n";
+    } elsif (-b $src) {
+        $type = 'partition';
+        open (O, "<$src")
+            or die "Couldn't open block device for reading: $!\n";
+    } else {
+        die "Error: not file, directory, or partition.\n";
+    }
+
+    # now do some pre-file checking...
+    my $size = -s $src;
+    if ($type ne 'file') {
+        die "Error: you specified to store a file of type $type but didn't specify --bigfile.  Please see documentation.\n"
+            unless $opts{big};
+    } elsif ($size > 64 * 1024 * 1024) {
+        die "Error: the file is more than 64MB and you didn't specify --bigfile.  Please see documentation.\n"
+            unless $opts{big};
+    }
+
+    # see if there's already a pre file?
+    if ($opts{big}) {
+        my $data = $mogfs->get_file_data("_big_pre:$key");
+        if (defined $data) {
+            unless ($opts{overwrite}) {
+                error(<<MSG, ERR_FATAL);
+ERROR: The pre-insert file for $key exists.  This indicates that a previous
+attempt to inject a file failed--or is still running elsewhere!  Please
+verify that a previous injection of this file is finished, or run mogtool
+again with the --overwrite inject option.
+
+$$data
+MSG
+            }
+
+            # delete the pre notice since we didn't die (overwrite must be on)
+            $mogfs->delete("_big_pre:$key")
+                or error("ERROR: Unable to delete _big_pre:$key.", ERR_FATAL);
+        }
+
+        # now create our pre notice
+        my $prefh = $mogfs->new_file("_big_pre:$key", $opts{class})
+            or error("ERROR: Unable to create _big_pre:$key.", ERR_FATAL);
+        $prefh->print("starttime:" . time());
+        $prefh->close()
+            or error("ERROR: Unable to save to _big_pre:$key.", ERR_FATAL);
+    }
+
+    # setup config and temporary variables we're going to be using
+    my $chunk_size = 64 * 1024 * 1024;  # 64 MB
+    if ($opts{big}) {
+        if ($opts{chunksize} && ($opts{chunksize} =~ m!^(\d+)(G|M|K|B)?!i)) {
+            $chunk_size = $1;
+            unless (lc $2 eq 'b') {
+                $chunk_size *= (1024 ** ( { g => 3, m => 2, k => 1 }->{lc $2} || 2 ));
+            }
+            print "NOTE: Using chunksize of $chunk_size bytes.\n";
+        }
+    }
+    my $read_size = ($chunk_size > 1024*1024 ? 1024*1024 : $chunk_size);
+
+    # temporary variables
+    my $buf;
+    my $bufsize = 0;
+    my $chunknum = 0;
+    my %chunkinfo; # { id => [ md5, length ] }
+    my %chunkbuf; # { id => data }
+    my %children; # { pid => chunknum }
+    my %chunksout; # { chunknum => pid }
+
+    # this function writes out a chunk
+    my $emit = sub {
+        my $cn = shift() + 0;
+        return unless $cn;
+
+        # get the length of the chunk we're going to send
+        my $bufsize = length $chunkbuf{$cn};
+        return unless $bufsize;
+
+        # now spawn off a child to do the real work
+        if (my $pid = fork()) {
+            print "Spawned child $pid to deal with chunk number $cn.\n";
+            $chunksout{$cn} = $pid;
+            $children{$pid} = $cn;
+            return;
+        }
+
+        # drop other memory references we're not using anymore
+        foreach my $chunknum (keys %chunkbuf) {
+            next if $chunknum == $cn;
+            delete $chunkbuf{$chunknum};
+        }
+
+        # as a child, get a new mogile connection
+        my $mogfs = get_mogfs();
+        my $dkey = $opts{big} ? "$key,$chunknum" : "$key";
+
+        # TODO: be resilient to transient errors, retry, etc.
+        my $start_time = [ gettimeofday() ];
+        my $try = 0;
+        while (1) {
+            $try++;
+            my $fh = $mogfs->new_file($dkey, $opts{class}, $bufsize);
+            unless (defined $fh) {
+                error("WARNING: Unable to create new file '$dkey'.");
+                printf "This was try #$try and it's been %.2f seconds since we first tried.  Retrying...\n", tv_interval($start_time);
+                sleep 1;
+                next;
+            }
+            $fh->print($chunkbuf{$cn});
+            unless ($fh->close) {
+                error("WARNING: Unable to save file '$dkey'.");
+                printf "This was try #$try and it's been %.2f seconds since we first tried.  Retrying...\n", tv_interval($start_time);
+                sleep 1;
+                next;
+            }
+            last;
+        }
+        my $diff = tv_interval($start_time);
+        printf "        chunk $cn saved in %.2f seconds.\n", $diff;
+
+        # make sure we never return, always exit
+        exit 0;
+    };
+
+    # just used to reap our children in a loop until they're done.  also
+    # handles respawning a child that failed.
+    my $reap_children = sub {
+        # find out if we have any kids dead
+        while ((my $pid = waitpid -1, WNOHANG) > 0) {
+            my $cnum = delete $children{$pid};
+            unless ($cnum) {
+                print "Error: reaped child $pid, but no idea what they were doing...\n";
+                next;
+            }
+            if (my $status = $?) {
+                print "Error: reaped child $pid for chunk $cnum returned non-zero status... Retrying...\n";
+                $emit->($cnum);
+                next;
+            }
+#            my @paths = grep { defined $_ } $mogfs->get_paths($opts{big} ? "$key,$cnum" : "$key", 1);
+#            unless (@paths) {
+#                print "Error: reaped child $pid for chunk $cnum but no paths exist... Retrying...\n";
+#                $emit->($cnum);
+#                next;
+#            }
+            delete $chunkbuf{$cnum};
+            delete $chunksout{$cnum};
+            print "Child $pid successfully finished with chunk $cnum.\n";
+        }
+    };
+
+    # this function handles parallel threads
+    $opts{concurrent} ||= 1;
+    $opts{concurrent} = 1 if $opts{concurrent} < 1;
+    my $handle_children = sub {
+        # here we pause while our children are working
+        my $first = 1;
+        while ($first || scalar(keys %children) >= $opts{concurrent}) {
+            $first = 0;
+            $reap_children->();
+            select undef, undef, undef, 0.1;
+        }
+
+        # now spawn until we hit the limit
+        foreach my $cnum (keys %chunkbuf) {
+            next if $chunksout{$cnum};
+            $emit->($cnum);
+            last if scalar(keys %children) >= $opts{concurrent};
+        }
+    };
+
+    # setup compression stuff
+    my $dogzip = 0;
+    my $zlib;
+    if ($opts{gzip}) {
+        # if they turned gzip on we may or may not need this stream, so make it
+        $zlib = deflateInit()
+            or error("Error: unable to create gzip deflation stream", ERR_FATAL);
+    }
+
+    # read one meg chunks while we have data
+    my $sum = 0;
+    my $readbuf = '';
+    while (my $rv = read(O, $readbuf, $read_size)) {
+        # if this is a file, and this is our first read, see if it's gzipped
+        if (!$sum && $rv >= 2) {
+            if (substr($readbuf, 0, 2) eq "\x1f\x8b") {
+                # this is already gzipped, so just mark it as such and insert it
+                $opts{gzip} = 1;
+            } else {
+                # now turn on our gzipping if the user wants the output gzipped
+                $dogzip = 1 if $opts{gzip};
+            }
+        }
+
+        # now run it through the deflation stream before we process it here
+        if ($dogzip) {
+            my ($out, $status) = $zlib->deflate($readbuf);
+            error("Error: Deflation failure processing stream", ERR_FATAL)
+                unless $status == Z_OK;
+            $readbuf = $out;
+            $rv = length $readbuf;
+
+            # we don't always get a chunk from deflate
+            next unless $rv;
+        }
+
+        # now stick our data into our real buffer
+        $buf .= $readbuf;
+        $bufsize += $rv;
+        $sum += $rv;
+        $readbuf = '';
+
+        # generate output
+        if ($type ne 'tarball' && $size && $size > $read_size) {
+            printf "Buffer so far: $bufsize bytes [%.2f%% complete]\r", ($sum / $size * 100);
+        } else {
+            print "Buffer so far: $bufsize bytes\r";
+        }
+
+        # if we have one chunk, handle it
+        if ($bufsize >= $chunk_size) {
+            $chunkbuf{++$chunknum} = substr($buf, 0, $chunk_size);
+
+            # calculate the md5, print out status, and save this chunk
+            my $md5 = md5_hex($buf);
+            if ($opts{big}) {
+                print "chunk $key,$chunknum: $md5, len = $chunk_size\n";
+            } else {
+                print "file $key: $md5, len = $chunk_size\n";
+            }
+            $chunkinfo{$chunknum} = [ $md5, $chunk_size ];
+
+            # reset for the next read loop
+            $buf = substr($buf, $chunk_size);
+            $bufsize = length $buf;
+
+            # now spawn children to save chunks
+            $handle_children->();
+        }
+    }
+    close O;
+
+    # now we need to flush the gzip engine
+    if ($dogzip) {
+        my ($out, $status) = $zlib->flush;
+        error("Error: Deflation failure processing stream", ERR_FATAL)
+            unless $status == Z_OK;
+        $buf .= $out;
+        $bufsize += length $out;
+        $sum += length $out;
+    }
+
+    # final piece
+    if ($buf) {
+        $chunkbuf{++$chunknum} = $buf;
+        my $md5 = md5_hex($buf);
+        if ($opts{big}) {
+            print "chunk $key,$chunknum: $md5, len = $bufsize\n";
+        } else {
+            print "file $key: $md5, len = $bufsize\n";
+        }
+        $chunkinfo{$chunknum} = [ $md5, $bufsize ];
+    }
+
+    # now, while we still have chunks to process...
+    while (%chunkbuf) {
+        $handle_children->();
+        sleep 1;
+    }
+
+    # verify replication and chunks
+    # not any more.
+    my %paths; # { chunknum => [ path, path, path ... ] }
+    my %still_need = ( %chunkinfo );
+
+
+    # prepare the info file
+    my $des = $opts{des} || 'no description';
+    my $compressed = $opts{gzip} ? '1' : '0';
+    #FIXME: add 'partblocks' to info file
+
+    # create the info file
+    my $info = <<INFO;
+des $des
+type $type
+compressed $compressed
+filename $src
+chunks $chunknum
+size $sum
+
+INFO
+    foreach (sort { $a <=> $b } keys %chunkinfo) {
+        $info .= "part $_ bytes=$chunkinfo{$_}->[1] md5=$chunkinfo{$_}->[0] paths: ";
+        $info .= join(', ', @{$paths{$_} || []});
+        $info .= "\n";
+    }
+
+    # now write out the info file
+    if ($opts{big}) {
+        my $fhinfo = $mogfs->new_file("_big_info:$key", $opts{class})
+            or error("ERROR: Unable to create _big_info:$key.", ERR_FATAL);
+        $fhinfo->print($info);
+        $fhinfo->close()
+            or error("ERROR: Unable to save _big_info:$key.", ERR_FATAL);
+
+        # verify info file
+        print "Waiting for info file replication...\n";
+        while (1) {
+            my @paths = $mogfs->get_paths("_big_info:$key", 1);
+            next unless scalar(@paths) >= 2;
+            foreach my $path (@paths) {
+                my $data = get($path);
+                error("       FATAL: content mismatch on $path", ERR_FATAL)
+                    unless $data eq $info;
+            }
+            last;
+        }
+
+        # now delete our pre file
+        print "Deleting pre-insert file...\n";
+        $mogfs->delete("_big_pre:$key")
+            or error("ERROR: Unable to delete _big_pre:$key", ERR_FATAL);
+    }
+
+    # now email and save a receipt
+    if ($opts{receipt}) {
+        open MAIL, "| $sendmail -t"
+            or error("ERROR: Unable to open sendmail binary: $sendmail", ERR_FATAL);
+        print MAIL <<MAIL;
+To: $opts{receipt}
+From: mogtool\@dev.null
+Subject: mogtool.$key.receipt
+
+$info
+.
+MAIL
+        close MAIL;
+        print "Receipt emailed.\n";
+
+        # now dump to a file
+        open FILE, ">mogtool.$key.receipt"
+            or error("ERROR: Unable to create file mogtool.$key.receipt in current directory.", ERR_FATAL);
+        print FILE $info;
+        close FILE;
+        print "Receipt stored in mogtool.$key.receipt.\n";
+    }
+}
+
+sub _parse_info {
+    my $info = shift;
+    my $res = {};
+
+    # parse out the header data
+    $res->{des} = ($info =~ /^des\s+(.+)$/m) ? $1 : undef;
+    $res->{type} = ($info =~ /^type\s+(.+)$/m) ? $1 : undef;
+    $res->{compressed} = ($info =~ /^compressed\s+(.+)$/m) ? $1 : undef;
+    $res->{filename} = ($info =~ /^filename\s+(.+)$/m) ? $1 : undef;
+    $res->{chunks} = ($info =~ /^chunks\s+(\d+)$/m) ? $1 : undef;
+    $res->{size} = ($info =~ /^size\s+(\d+)$/m) ? $1 : undef;
+
+    # now get the pieces
+    $res->{maxnum} = undef;
+    while ($info =~ /^part\s+(\d+)\s+bytes=(\d+)\s+md5=(.+)\s+paths:\s+(.+)$/mg) {
+        $res->{maxnum} = $1 if !defined $res->{maxnum} || $1 > $res->{maxnum};
+        $res->{parts}->{$1} = {
+            bytes => $2,
+            md5 => $3,
+            paths => [ split(/\s*,\s*/, $4) ],
+        };
+    }
+
+    return $res;
+}
+
+sub extract {
+    my $key = shift @ARGV;
+    my $dest = shift @ARGV;
+    abortWithUsage() unless $key && $dest;
+
+    error("Error: key $key isn't valid; must not contain spaces or commas.", ERR_FATAL)
+        unless $key =~ /^[^\s\,]+$/;
+    unless ($dest eq '-' || $dest eq '.') {
+        error("Error: destination exists: $dest (specify --overwrite if you want to kill it)", ERR_FATAL)
+            if -e $dest && !$opts{overwrite} && !-b $dest;
+    }
+
+    # see if this is really a big file
+    my $file;
+    if ($opts{big}) {
+        my $info = $mogfs->get_file_data("_big_info:$key");
+        die "$key doesn't seem to be a valid big file.\n"
+            unless $info && $$info;
+
+        # verify validity
+        $file = _parse_info($$info);
+
+        # make sure we have enough info
+        error("Error: info file doesn't contain the number of chunks", ERR_FATAL)
+            unless $file->{chunks};
+        error("Error: info file doesn't contain the total size", ERR_FATAL)
+            unless $file->{size};
+
+    } else {
+        # not a big file, so it has to be of a certain type
+        $file->{type} = 'file';
+        $file->{maxnum} = 1;
+        $file->{parts}->{1} = {
+            paths => [ grep { defined $_ } $mogfs->get_paths($key) ],
+        };
+
+        # now, if it doesn't exist..
+        unless (scalar(@{$file->{parts}->{1}->{paths}})) {
+            error("Error: file doesn't exist (or did you forget --bigfile?)", ERR_FATAL);
+        }
+    }
+
+    # several cases.. going to stdout?
+    if ($dest eq '-') {
+        *O = *STDOUT;
+    } else {
+        # open up O as the handle to use for reading data
+        if ($file->{type} eq 'file' || $file->{type} eq 'partition' ||
+            ($file->{type} eq 'tarball' && $opts{asfile})) {
+            # just write it to the file with this name, but don't overwrite?
+            if ($dest eq '.') {
+                $dest = $file->{filename};
+                $dest =~ s!^(.+)/!!;
+            }
+            if (-b $dest) {
+                # if we're targetting a block device...
+                warn "FIXME: add in block checking\n";
+                open O, ">$dest"
+                    or die "Couldn't open $dest: $!\n";
+            } elsif (-e $dest) {
+                if ($opts{overwrite}) {
+                    open O, ">$dest"
+                        or die "Couldn't open $dest: $!\n";
+                } else {
+                    die "File already exists: $dest ... won't overwrite without --overwrite.\n";
+                }
+            } else {
+                open O, ">$dest"
+                    or die "Couldn't open $dest: $!\n";
+            }
+
+        } elsif ($file->{type} eq 'tarball') {
+            my $taropts = ($file->{compressed} ? 'z' : '') . "xf";
+            open O, '|-', 'tar', $taropts, '-'
+                or die "Couldn't open tar for writing: $!\n";
+
+        } else {
+            die "Error: unable to handle type '$file->{type}'\n";
+        }
+    }
+
+    # start fetching pieces
+    foreach my $i (1..$file->{maxnum}) {
+        print "Fetching piece $i...\n";
+
+        foreach my $path (@{$file->{parts}->{$i}->{paths} || []}) {
+            print "        Trying $path...\n";
+            my $data = get($path);
+            next unless $data;
+
+            # now verify MD5, etc
+            if ($opts{big}) {
+                my $len = length $data;
+                my $md5 = md5_hex($data);
+                print "                ($len bytes, $md5)\n";
+                next unless $len == $file->{parts}->{$i}->{bytes} &&
+                            $md5 eq $file->{parts}->{$i}->{md5};
+            }
+
+            # this chunk verified, write it out
+            print O $data;
+            last;
+        }
+    }
+
+    # at this point the file should be complete!
+    close O;
+    print "Done.\n";
+
+    # now make sure we have enough data
+#$ mogtool [opts] extract <key> {<file>,<dir>,<device>}
+                                 #=>  -  (for stdout)    (if compressed, add "z" flag)
+                                 #=>  .   (to untar)     (if compressed, do nothing???, make .tar.gz file -- unless they use -z again?)
+                                 #=> /dev/sda4  (but check /proc/partitions that it's big enough)  (if compress, Compress::Zlib to ungzip
+#                                 => foo.jpg  (write it to a file)
+
+
+    # now check
+    exit 0;
+}
+
+sub list {
+    # list all big files in mogile
+    my ($ct, $after, $list);
+    while (($after, $list) = $mogfs->list_keys("_big_info:", $after)) {
+        last unless $list && @$list;
+
+        # now extract the key and dump it
+        foreach my $key (@$list) {
+            next unless $key =~ /^_big_info:(.+)$/;
+
+            $key = $1;
+            $ct++;
+
+            print "$key\n";
+        }
+    }
+    print "#$ct files found\n";
+    exit 0;
+}
+
+sub listkey {
+
+    my $key_pattern = shift(@ARGV);
+    abortWithUsage() unless $key_pattern;
+
+    # list all files matchine a key
+    my ($ct, $after, $list);
+    while (($after, $list) = $mogfs->list_keys("$key_pattern", $after)) {
+        last unless $list && @$list;
+
+        # now extract the key and dump it
+        foreach my $key (@$list) {
+
+            $ct++;
+
+            print "$key\n";
+        }
+    }
+    print "#$ct files found\n";
+    exit 0;
+}
+
+sub mdelete {
+    my $key = shift(@ARGV);
+    abortWithUsage() unless $key;
+
+    # delete simple file
+    unless ($opts{big}) {
+        my $rv = $mogfs->delete($key);
+        error("Failed to delete: $key.", ERR_FATAL)
+            unless $rv;
+        print "Deleted.\n";
+        exit 0;
+    }
+
+    # delete big file
+    my $info = $mogfs->get_file_data("_big_info:$key");
+    error("$key doesn't seem to be a valid big file.", ERR_FATAL)
+        unless $info && $$info;
+
+    # verify validity
+    my $file = _parse_info($$info);
+
+    # make sure we have enough info to delete
+    error("Error: info file doesn't contain required information?", ERR_FATAL)
+        unless $file->{chunks} && $file->{maxnum};
+
+    # now delete each chunk, best attempt
+    foreach my $i (1..$file->{maxnum}) {
+        $mogfs->delete("$key,$i");
+    }
+
+    # delete the main pieces
+    my $rv = $mogfs->delete("_big_info:$key");
+    error("Unable to delete _big_info:$key.", ERR_FATAL)
+        unless $rv;
+    print "Deleted.\n";
+    exit 0;
+}
+
+abortWithUsage() if $opts{help};
+
+
+sub abortWithUsage {
+    my $msg = join '', @_;
+
+    if ( $msg ) {
+        pod2usage( -verbose => 1, -exitval => 1, -message => "$msg" );
+    } else {
+        pod2usage( -verbose => 1, -exitval => 1 );
+    }
+}
+
+
+
+
+

Propchange: incubator/olio/workload/rails/trunk/bin/loader.pl
------------------------------------------------------------------------------
    svn:executable = *

Added: incubator/olio/workload/rails/trunk/bin/schema.sql
URL: http://svn.apache.org/viewvc/incubator/olio/workload/rails/trunk/bin/schema.sql?rev=706345&view=auto
==============================================================================
--- incubator/olio/workload/rails/trunk/bin/schema.sql (added)
+++ incubator/olio/workload/rails/trunk/bin/schema.sql Mon Oct 20 10:39:16 2008
@@ -0,0 +1,195 @@
+CREATE TABLE `addresses` (
+  `id` int(11) NOT NULL auto_increment,
+  `street1` varchar(55) default NULL,
+  `street2` varchar(55) default NULL,
+  `city` varchar(55) default NULL,
+  `state` varchar(25) default NULL,
+  `zip` varchar(12) default NULL,
+  `country` varchar(55) default NULL,
+  `latitude` decimal(14,10) default NULL,
+  `longitude` decimal(14,10) default NULL,
+  PRIMARY KEY  (`id`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE `comments` (
+  `id` int(11) NOT NULL auto_increment,
+  `user_id` int(11) default NULL,
+  `event_id` int(11) default NULL,
+  `rating` int(11) default NULL,
+  `comment` text,
+  `created_at` datetime default NULL,
+  `updated_at` datetime default NULL,
+  PRIMARY KEY  (`id`),
+  KEY `index_comments_on_event_id` (`event_id`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE `documents` (
+  `id` int(11) NOT NULL auto_increment,
+  `size` int(11) default NULL,
+  `content_type` varchar(255) default NULL,
+  `filename` varchar(255) default NULL,
+  PRIMARY KEY  (`id`),
+  KEY `index_documents_on_filename` (`filename`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE `events` (
+  `id` int(11) NOT NULL auto_increment,
+  `title` varchar(100) default NULL,
+  `description` varchar(500) default NULL,
+  `telephone` varchar(20) default NULL,
+  `user_id` int(11) default NULL,
+  `address_id` int(11) default NULL,
+  `image_id` int(11) default NULL,
+  `document_id` int(11) default NULL,
+  `event_timestamp` datetime default NULL,
+  `event_date` date default NULL,
+  `created_at` datetime default NULL,
+  `total_score` int(11) default NULL,
+  `num_votes` int(11) default NULL,
+  `disabled` tinyint(1) default NULL,
+  `thumbnail` int(11) default NULL,
+  `summary` varchar(100) default NULL,
+  PRIMARY KEY  (`id`),
+  KEY `index_events_on_id` (`id`),
+  KEY `index_events_on_event_date` (`event_date`),
+  KEY `index_events_on_event_timestamp` (`event_timestamp`),
+  KEY `index_events_on_created_at` (`created_at`),
+  KEY `index_events_on_user_id` (`user_id`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE `events_users` (
+  `event_id` int(11) default NULL,
+  `user_id` int(11) default NULL,
+  KEY `index_events_users_on_event_id` (`event_id`),
+  KEY `index_events_users_on_user_id` (`user_id`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE `geolocations` (
+  `id` int(11) NOT NULL auto_increment,
+  `zip` int(11) default NULL,
+  `state_code` varchar(255) default NULL,
+  `state` varchar(255) default NULL,
+  `city` varchar(255) default NULL,
+  `longitude` float default NULL,
+  `latitude` float default NULL,
+  `created_at` datetime default NULL,
+  `updated_at` datetime default NULL,
+  PRIMARY KEY  (`id`),
+  KEY `index_geolocations_on_zip` (`zip`)
+) ENGINE=InnoDB AUTO_INCREMENT=29357 DEFAULT CHARSET=latin1;
+
+CREATE TABLE `images` (
+  `id` int(11) NOT NULL auto_increment,
+  `size` int(11) default NULL,
+  `content_type` varchar(255) default NULL,
+  `filename` varchar(255) default NULL,
+  `height` int(11) default NULL,
+  `width` int(11) default NULL,
+  `parent_id` int(11) default NULL,
+  `thumbnail` varchar(255) default NULL,
+  PRIMARY KEY  (`id`),
+  KEY `index_images_on_filename` (`filename`),
+  KEY `index_images_on_thumbnail` (`thumbnail`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE `invites` (
+  `id` int(11) NOT NULL auto_increment,
+  `user_id` int(11) NOT NULL,
+  `user_id_target` int(11) NOT NULL,
+  `is_accepted` tinyint(1) default '0',
+  PRIMARY KEY  (`id`),
+  KEY `index_invites_on_user_id` (`user_id`),
+  KEY `index_invites_on_user_id_target` (`user_id_target`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE `schema_migrations` (
+  `version` varchar(255) NOT NULL,
+  UNIQUE KEY `unique_schema_migrations` (`version`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE `taggings` (
+  `id` int(11) NOT NULL auto_increment,
+  `tag_id` int(11) default NULL,
+  `taggable_id` int(11) default NULL,
+  `taggable_type` varchar(255) default NULL,
+  PRIMARY KEY  (`id`),
+  KEY `index_taggings_on_tag_id_and_taggable_id_and_taggable_type` (`tag_id`,`taggable_id`,`taggable_type`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE `tags` (
+  `id` int(11) NOT NULL auto_increment,
+  `name` varchar(255) default NULL,
+  PRIMARY KEY  (`id`),
+  KEY `index_tags_on_name` (`name`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE `users` (
+  `id` int(11) NOT NULL auto_increment,
+  `username` varchar(25) default NULL,
+  `password` varchar(25) default NULL,
+  `firstname` varchar(25) default NULL,
+  `lastname` varchar(25) default NULL,
+  `email` varchar(90) default NULL,
+  `telephone` varchar(25) default NULL,
+  `summary` varchar(2500) default NULL,
+  `timezone` varchar(25) default NULL,
+  `created_at` datetime default NULL,
+  `updated_at` datetime default NULL,
+  `address_id` int(11) default NULL,
+  `image_id` int(11) default NULL,
+  `thumbnail` int(11) default NULL,
+  PRIMARY KEY  (`id`),
+  KEY `index_users_on_username` (`username`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+INSERT INTO schema_migrations (version) VALUES ('1');
+
+INSERT INTO schema_migrations (version) VALUES ('10');
+
+INSERT INTO schema_migrations (version) VALUES ('11');
+
+INSERT INTO schema_migrations (version) VALUES ('12');
+
+INSERT INTO schema_migrations (version) VALUES ('13');
+
+INSERT INTO schema_migrations (version) VALUES ('14');
+
+INSERT INTO schema_migrations (version) VALUES ('15');
+
+INSERT INTO schema_migrations (version) VALUES ('16');
+
+INSERT INTO schema_migrations (version) VALUES ('17');
+
+INSERT INTO schema_migrations (version) VALUES ('18');
+
+INSERT INTO schema_migrations (version) VALUES ('19');
+
+INSERT INTO schema_migrations (version) VALUES ('2');
+
+INSERT INTO schema_migrations (version) VALUES ('20');
+
+INSERT INTO schema_migrations (version) VALUES ('21');
+
+INSERT INTO schema_migrations (version) VALUES ('22');
+
+INSERT INTO schema_migrations (version) VALUES ('23');
+
+INSERT INTO schema_migrations (version) VALUES ('24');
+
+INSERT INTO schema_migrations (version) VALUES ('25');
+
+INSERT INTO schema_migrations (version) VALUES ('26');
+
+INSERT INTO schema_migrations (version) VALUES ('3');
+
+INSERT INTO schema_migrations (version) VALUES ('4');
+
+INSERT INTO schema_migrations (version) VALUES ('5');
+
+INSERT INTO schema_migrations (version) VALUES ('6');
+
+INSERT INTO schema_migrations (version) VALUES ('7');
+
+INSERT INTO schema_migrations (version) VALUES ('8');
+
+INSERT INTO schema_migrations (version) VALUES ('9');

Added: incubator/olio/workload/rails/trunk/build.properties.template
URL: http://svn.apache.org/viewvc/incubator/olio/workload/rails/trunk/build.properties.template?rev=706345&view=auto
==============================================================================
--- incubator/olio/workload/rails/trunk/build.properties.template (added)
+++ incubator/olio/workload/rails/trunk/build.properties.template Mon Oct 20 10:39:16 2008
@@ -0,0 +1,7 @@
+bench.shortname=Web20Driver
+faban.home=/Users/will/projects/RadLab/new/
+faban.url=http://dewberry.sfbay.sun.com:9980/
+deploy.user=deployer
+deploy.password=adminadmin
+deploy.clearconfig=false
+compiler.target.version=1.5

Added: incubator/olio/workload/rails/trunk/build.xml
URL: http://svn.apache.org/viewvc/incubator/olio/workload/rails/trunk/build.xml?rev=706345&view=auto
==============================================================================
--- incubator/olio/workload/rails/trunk/build.xml (added)
+++ incubator/olio/workload/rails/trunk/build.xml Mon Oct 20 10:39:16 2008
@@ -0,0 +1,130 @@
+<project name="Faban Benchmark" default="deploy.jar" basedir=".">
+
+    <property file="build.properties"/>
+    <property name="src.dir" value="src"/>
+    <property name="build.dir" value="build"/>
+    <property name="classes.dir" value="${build.dir}/classes"/>
+    <property name="lib.dir" value="lib"/>
+    <property name="buildlib.dir" value="${build.dir}/lib"/>
+    <property name="faban.libs" value="${faban.home}/lib"/>
+    <property name="faban.classes" value="${faban.home}/master/webapps/faban/WEB-INF/classes"/>
+
+    <path id="taskclasspath">
+        <fileset dir="${faban.home}/ant/lib" includes="*.jar"/>
+        <fileset dir="${faban.libs}" includes="*.jar"/>
+    </path>
+
+    <path id="classpath" location="${faban.libs}" >
+        <pathelement location="${classes.dir}"/>
+        <fileset dir="${lib.dir}" includes="*.jar"/>
+        <fileset dir="${faban.libs}" includes="*.jar"/>
+        <pathelement location="${faban.classes}"/>
+    </path>
+    
+    <taskdef name="deploy" classname="com.sun.faban.harness.util.DeployTask">
+        <classpath refid="taskclasspath"/>
+    </taskdef>
+
+    <target name="init">
+        <mkdir dir="${classes.dir}"/>
+    </target>
+
+    <target name="compile" depends="init" description="Compiling all source files">
+        <javac srcdir="${src.dir}"
+            deprecation="on" target="${compiler.target.version}"
+            destdir="${classes.dir}" debug="on">
+            <include name="**/*.java" />
+            <classpath refid="classpath"/>
+        </javac>
+    </target>
+
+    <target name="clean" description="cleanup module">
+      <delete>
+        <fileset dir="${build.dir}" includes="**/*"/>
+      </delete>
+    </target>
+
+
+    <target name="bench.jar" depends="compile" description="Assembles library jar with benchmark classes">
+        <mkdir dir="${buildlib.dir}"/>
+        <jar jarfile="${buildlib.dir}/${bench.shortname}.jar">
+            <fileset dir="${classes.dir}" includes="**/*.class"/>
+        </jar>
+    </target>
+
+    <target name="deploy.jar" depends="bench.jar"
+        description="Assembles deployment jar image for Faban harness">
+        <fixcrlf srcdir="bin"/> 
+        <jar jarfile="${build.dir}/${bench.shortname}.jar">
+            <metainf dir="deploy" includes="*"/>
+            <fileset dir="." includes="bin/**/*, lib/**/*, resources/**/*"/>
+            <fileset dir="${build.dir}" includes="lib/**/*"/>
+        </jar>
+    </target>
+
+    <target name="deploy" depends="deploy.jar"
+        description="Deploys benchmark on the Faban harness">
+        <deploy url="${faban.url}"
+            jar="${build.dir}/${bench.shortname}.jar"
+            user="${deploy.user}" password="${deploy.password}"
+            clearConfig="${deploy.clearconfig}"/>
+    </target>
+
+    <target name="run" depends="bench.jar" 
+        description="Test runs the benchmark outside the Faban Harness">
+        <java classname="com.sun.faban.driver.core.MasterImpl"
+              classpathref="classpath"
+              fork="true"
+              failonerror="true"
+              dir="config">
+              <jvmarg value="-XX:+DisableExplicitGC"/>
+              <jvmarg value="-Djava.security.policy=security/driver.policy"/>
+              <jvmarg value="-Djava.util.logging.config.file=logging.properties"/>
+              <jvmarg value="-Dbenchmark.config=../deploy/run.xml"/>
+        </java>
+    </target>
+    
+    <target name="registry"
+        description="Starts the Faban registry">
+        <java classname="com.sun.faban.common.RegistryImpl"
+              classpathref="classpath"
+              fork="true"
+              failonerror="true">
+              <jvmarg value="-XX:+DisableExplicitGC"/>
+              <jvmarg value="-Djava.security.policy=security/driver.policy"/>
+              <jvmarg value="-Djava.util.logging.config.file=logging.properties"/>
+        </java>            
+    </target>
+    
+    <target name="agent"
+        description="Start a Faban agent">
+        <!-- To start the agent, properties driver.name, driver.id, 
+             and master.host need to be declared -->
+        <java classname="com.sun.faban.driver.core.AgentImpl"
+              classpathref="classpath"
+              fork="true"
+              failonerror="true">
+              <jvmarg value="-XX:+DisableExplicitGC"/>
+              <jvmarg value="-Djava.security.policy=security/driver.policy"/>
+              <jvmarg value="-Djava.util.logging.config.file=logging.properties"/>
+              <arg value="${driver.name}"/>
+              <arg value="${driver.id}"/>
+              <arg value="${master.host}"/>
+        </java>                        
+    </target>
+    
+    <target name="multi"
+        description="Start a distributed benchmark run">
+        <parallel>
+            <antcall target="registry"/>
+            <sequential>
+                <sleep seconds="2"/>
+                <antcall target="agent"/>                
+            </sequential>
+            <sequential>
+                <sleep seconds="5"/>
+                <antcall target="run"/>
+            </sequential>
+        </parallel>
+    </target>
+</project>

Added: incubator/olio/workload/rails/trunk/config/.run.properties.swp
URL: http://svn.apache.org/viewvc/incubator/olio/workload/rails/trunk/config/.run.properties.swp?rev=706345&view=auto
==============================================================================
Binary file - no diff available.

Propchange: incubator/olio/workload/rails/trunk/config/.run.properties.swp
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Added: incubator/olio/workload/rails/trunk/config/.runconfig.xml.swp
URL: http://svn.apache.org/viewvc/incubator/olio/workload/rails/trunk/config/.runconfig.xml.swp?rev=706345&view=auto
==============================================================================
Binary file - no diff available.

Propchange: incubator/olio/workload/rails/trunk/config/.runconfig.xml.swp
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Added: incubator/olio/workload/rails/trunk/config/logging.properties
URL: http://svn.apache.org/viewvc/incubator/olio/workload/rails/trunk/config/logging.properties?rev=706345&view=auto
==============================================================================
--- incubator/olio/workload/rails/trunk/config/logging.properties (added)
+++ incubator/olio/workload/rails/trunk/config/logging.properties Mon Oct 20 10:39:16 2008
@@ -0,0 +1,59 @@
+############################################################
+#  	Default Logging Configuration File
+#
+# You can use a different file by specifying a filename
+# with the java.util.logging.config.file system property.  
+# For example java -Djava.util.logging.config.file=myfile
+############################################################
+
+############################################################
+#  	Global properties
+############################################################
+
+# "handlers" specifies a comma separated list of log Handler 
+# classes.  These handlers will be installed during VM startup.
+# Note that these classes must be on the system classpath.
+# By default we only configure a ConsoleHandler, which will only
+# show messages at the INFO and above levels.
+handlers= java.util.logging.ConsoleHandler
+
+# To also add the FileHandler, use the following line instead.
+#handlers= java.util.logging.FileHandler, java.util.logging.ConsoleHandler
+
+# Default global logging level.
+# This specifies which kinds of events are logged across
+# all loggers.  For any given facility this global level
+# can be overriden by a facility specific level
+# Note that the ConsoleHandler also has a separate level
+# setting to limit messages printed to the console.
+.level= INFO
+
+############################################################
+# Handler specific properties.
+# Describes specific configuration info for Handlers.
+############################################################
+
+# default file output is in user's home directory.
+java.util.logging.FileHandler.pattern = %h/java%u.log
+java.util.logging.FileHandler.limit = 50000
+java.util.logging.FileHandler.count = 1
+java.util.logging.FileHandler.formatter = java.util.logging.XMLFormatter
+
+# Limit the message that are printed on the console to INFO and above.
+# java.util.logging.ConsoleHandler.level = INFO
+java.util.logging.ConsoleHandler.level = FINEST
+java.util.logging.ConsoleHandler.formatter = java.util.logging.SimpleFormatter
+
+
+############################################################
+# Facility specific properties.
+# Provides extra control for each logger.
+############################################################
+
+# For example, set the com.xyz.foo logger to only log SEVERE
+# messages:
+com.xyz.foo.level = SEVERE
+com.sun.faban.driver.core.MatrixMix.level = FINER
+com.sun.faban.common.level = FINEST
+sample.level = FINEST
+faban.test.level = FINEST

Added: incubator/olio/workload/rails/trunk/config/run.xml
URL: http://svn.apache.org/viewvc/incubator/olio/workload/rails/trunk/config/run.xml?rev=706345&view=auto
==============================================================================
--- incubator/olio/workload/rails/trunk/config/run.xml (added)
+++ incubator/olio/workload/rails/trunk/config/run.xml Mon Oct 20 10:39:16 2008
@@ -0,0 +1,104 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<webBenchmark>
+    <!-- The definition binds the config file with the benchmark class -->
+    <runConfig definition="com.sun.web20.driver.Web20Driver">
+        <!-- The hostConfig section is used by the harness to control hosts -->
+        <hostConfig>
+            <host>brazilian.sfbay</host>
+        </hostConfig>
+        <!-- The scale of the benchmark run, the driver definition
+        defines the number of threads for each driver scale
+        and each driver type. -->
+        <scale>20</scale>
+        <!-- The rampup, steadystate, and rampdown of the driver -->
+        <runControl>
+            <rampUp>60</rampUp>
+            <steadyState>300</steadyState>
+            <rampDown>30</rampDown>
+        </runControl>
+        <!-- The place where results are written -->
+        <outputDir>/tmp/output</outputDir>
+        <!-- Audit is a flag for benchmarks to audit results
+        at the end of the run. It is not currently used
+        and subject to change in the near future -->
+        <audit>false</audit>
+        <threadStart>
+            <!-- The avg time between the start of each thread in
+            an agent -->
+            <delay>1000</delay>
+            <!-- Whether the agents will wait until all threads to
+            be up and then start the load all at once. If
+            set to false, each thread will start executing the
+            operations as soon as it is started. This will
+            gradually load up the server. -->
+            <simultaneous>false</simultaneous>
+            <!-- Whether the agents will start up threads in parallel
+            or not. If set to false, an agent will start all it's
+            threads before the next agent gets to start the
+            threads. The socket connection queue will be less
+            loaded when it is set to false. -->
+            <parallel>false</parallel>
+        </threadStart>
+
+        <!-- Stats collection. maxRunTime is in hours and is used only for
+        benchmarks that are cycle controlled. In time-controlled
+        benchmarks, the actual maxRunTime is calculated from the rampUp,
+        stdyState, rampDown parameters. The interval is in seconds and
+        applies to  throughput and response time graphs.
+        The default is 30 seconds-->
+        <stats>
+            <maxRunTime>6</maxRunTime>
+            <interval>30</interval>
+        </stats>
+        <!-- Run-time stats exposed or not and the interval to update
+        the stats. A graphing package is required to display the
+        real time stats. It is not yet integrated into Faban. -->
+        <runtimeStats enabled="false">
+            <interval>5</interval>
+        </runtimeStats>
+
+        <!-- The driver-specific config.  -->
+        <driverConfig name="Web20Driver">
+            <!-- The number of agents -->
+            <agents>3</agents>
+            <!-- The stats interval, overrides the ones specified for
+            the benchmark. -->
+            <stats>
+                <interval>30</interval>
+            </stats>
+            <!-- Port to send the runtime stats. -->
+            <runtimeStats target="9988"/>
+            <properties>
+                <!-- Demonstrates the different ways to specify properties -->
+                <!-- The first way is a full name value pair, multiple
+                values are supported, but only one name  -->
+                <property>
+                    <name>path1</name>
+                    <value>webapp</value>
+                </property>
+                <!-- The second way is a little more compact while preserving
+                the ability to set multiple values for a name -->
+                <property name="path2">
+                    <value>webapp/submission.jsp</value>
+                </property>
+               
+            </properties>
+            <operationMix>
+                <name>MyOperation1</name>
+                <r>0</r><r>70</r>
+            </operationMix>
+            <operationMix>
+                <name>MyOperation2</name>
+                <r>60</r><r>0</r>
+            </operationMix>
+            
+        </driverConfig>
+    </runConfig>
+    <!-- This shows a way to set different configurations of resources outside
+    the driver. Such parameters are generally used by the
+    Faban Harness.-->
+    <serverConfig>
+        <host>129.145.130.85</host>
+        <port>8080</port>
+    </serverConfig>
+</webBenchmark>

Added: incubator/olio/workload/rails/trunk/config/security/driver.policy
URL: http://svn.apache.org/viewvc/incubator/olio/workload/rails/trunk/config/security/driver.policy?rev=706345&view=auto
==============================================================================
--- incubator/olio/workload/rails/trunk/config/security/driver.policy (added)
+++ incubator/olio/workload/rails/trunk/config/security/driver.policy Mon Oct 20 10:39:16 2008
@@ -0,0 +1,8 @@
+/* AUTOMATICALLY GENERATED ON Thu Dec 03 17:57:08 PST 1998*/
+/* DO NOT EDIT */
+
+grant {
+  permission java.security.AllPermission;
+  };
+
+

Added: incubator/olio/workload/rails/trunk/deploy/benchmark.xml
URL: http://svn.apache.org/viewvc/incubator/olio/workload/rails/trunk/deploy/benchmark.xml?rev=706345&view=auto
==============================================================================
--- incubator/olio/workload/rails/trunk/deploy/benchmark.xml (added)
+++ incubator/olio/workload/rails/trunk/deploy/benchmark.xml Mon Oct 20 10:39:16 2008
@@ -0,0 +1,10 @@
+<?xml version="1.0" encoding="ISO-8859-1"?>
+<benchmark>
+    <!-- Note: Most of these fields are not needed for a benchmark
+    implemented using the Faban driver framework.
+    <name>Sample Web Workload1</name>
+    <version>0.1</version -->
+    <config-form>config.xhtml</config-form>
+    <config-file-name>run.xml</config-file-name>
+    <benchmark-class>com.sun.web20.harness.Web20Benchmark</benchmark-class>
+</benchmark>