You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ec...@apache.org on 2012/12/19 17:25:07 UTC

svn commit: r1423923 [3/8] - in /accumulo/trunk: ./ bin/ core/ core/src/main/java/org/apache/accumulo/core/cli/ core/src/main/java/org/apache/accumulo/core/client/impl/ core/src/main/java/org/apache/accumulo/core/client/mapreduce/ core/src/main/java/or...

Modified: accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataIngest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataIngest.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataIngest.java (original)
+++ accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataIngest.java Wed Dec 19 16:25:03 2012
@@ -21,13 +21,14 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
+import java.util.ArrayList;
+import java.util.List;
 
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.ArrayByteSequence;
 import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.data.Mutation;
@@ -35,6 +36,8 @@ import org.apache.accumulo.core.data.Val
 import org.apache.accumulo.core.security.ColumnVisibility;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * Takes a list of files and archives them into Accumulo keyed on the SHA1 hashes of the files. See docs/examples/README.filedata for instructions.
  */
@@ -166,31 +169,33 @@ public class FileDataIngest {
     return sb.toString();
   }
   
+  public static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--vis", description="use a given visibility for the new counts", converter=VisibilityConverter.class)
+    ColumnVisibility visibility = new ColumnVisibility();
+    
+    @Parameter(names="--chunk", description="size of the chunks used to store partial files")
+    int chunkSize = 64*1024;
+    
+    @Parameter(description="<file> { <file> ... }")
+    List<String> files = new ArrayList<String>();
+  }
+  
+  
   public static void main(String[] args) throws Exception {
-    if (args.length < 8) {
-      System.out.println("usage: " + FileDataIngest.class.getSimpleName()
-          + " <instance> <zoo> <user> <pass> <data table> <visibility> <data chunk size> <file>{ <file>}");
-      System.exit(1);
-    }
-    
-    String instance = args[0];
-    String zooKeepers = args[1];
-    String user = args[2];
-    String pass = args[3];
-    String dataTable = args[4];
-    ColumnVisibility colvis = new ColumnVisibility(args[5]);
-    int chunkSize = Integer.parseInt(args[6]);
-    
-    Connector conn = new ZooKeeperInstance(instance, zooKeepers).getConnector(user, pass.getBytes());
-    if (!conn.tableOperations().exists(dataTable)) {
-      conn.tableOperations().create(dataTable);
-      conn.tableOperations().attachIterator(dataTable, new IteratorSetting(1, ChunkCombiner.class));
-    }
-    BatchWriter bw = conn.createBatchWriter(dataTable, new BatchWriterConfig());
-    FileDataIngest fdi = new FileDataIngest(chunkSize, colvis);
-    for (int i = 7; i < args.length; i++) {
-      fdi.insertFileData(args[i], bw);
+    Opts opts = new Opts();
+    opts.parseArgs(FileDataIngest.class.getName(), args);
+    
+    Connector conn = opts.getConnector();
+    if (!conn.tableOperations().exists(opts.tableName)) {
+      conn.tableOperations().create(opts.tableName);
+      conn.tableOperations().attachIterator(opts.tableName, new IteratorSetting(1, ChunkCombiner.class));
+    }
+    BatchWriter bw = conn.createBatchWriter(opts.tableName, opts.getBatchWriterConfig());
+    FileDataIngest fdi = new FileDataIngest(opts.chunkSize, opts.visibility);
+    for (String filename : opts.files) {
+      fdi.insertFileData(filename, bw);
     }
     bw.close();
+    opts.stopTracing();
   }
 }

Modified: accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataQuery.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataQuery.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataQuery.java (original)
+++ accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataQuery.java Wed Dec 19 16:25:03 2012
@@ -43,10 +43,10 @@ public class FileDataQuery {
   private ChunkInputStream cis;
   Scanner scanner;
   
-  public FileDataQuery(String instanceName, String zooKeepers, String user, String password, String tableName, Authorizations auths) throws AccumuloException,
+  public FileDataQuery(String instanceName, String zooKeepers, String user, byte[] password, String tableName, Authorizations auths) throws AccumuloException,
       AccumuloSecurityException, TableNotFoundException {
     ZooKeeperInstance instance = new ZooKeeperInstance(instanceName, zooKeepers);
-    conn = instance.getConnector(user, password.getBytes());
+    conn = instance.getConnector(user, password);
     lastRefs = new ArrayList<Entry<Key,Value>>();
     cis = new ChunkInputStream();
     scanner = conn.createScanner(tableName, auths);

Modified: accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/InsertWithBatchWriter.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/InsertWithBatchWriter.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/InsertWithBatchWriter.java (original)
+++ accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/InsertWithBatchWriter.java Wed Dec 19 16:25:03 2012
@@ -16,16 +16,15 @@
  */
 package org.apache.accumulo.examples.simple.helloworld;
 
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.MultiTableBatchWriter;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.hadoop.io.Text;
@@ -34,29 +33,18 @@ import org.apache.hadoop.io.Text;
  * Inserts 10K rows (50K entries) into accumulo with each row having 5 entries.
  */
 public class InsertWithBatchWriter {
+  
   public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, MutationsRejectedException, TableExistsException,
       TableNotFoundException {
-    if (args.length != 5) {
-      System.out
-          .println("Usage: accumulo examples-simplejar accumulo.examples.helloworld.InsertWithBatchWriter <instance name> <zoo keepers> <username> <password> <tableName>");
-      System.exit(1);
-    }
-    
-    String instanceName = args[0];
-    String zooKeepers = args[1];
-    String user = args[2];
-    byte[] pass = args[3].getBytes();
-    String tableName = args[4];
+    ClientOnRequiredTable opts = new ClientOnRequiredTable();
+    opts.parseArgs(InsertWithBatchWriter.class.getName(), args);
     
-    ZooKeeperInstance instance = new ZooKeeperInstance(instanceName, zooKeepers);
-    Connector connector = instance.getConnector(user, pass);
-    MultiTableBatchWriter mtbw = connector.createMultiTableBatchWriter(new BatchWriterConfig());
+    Connector connector = opts.getConnector();
+    MultiTableBatchWriter mtbw = connector.createMultiTableBatchWriter(opts.getBatchWriterConfig());
     
-    BatchWriter bw = null;
-    
-    if (!connector.tableOperations().exists(tableName))
-      connector.tableOperations().create(tableName);
-    bw = mtbw.getBatchWriter(tableName);
+    if (!connector.tableOperations().exists(opts.tableName))
+      connector.tableOperations().create(opts.tableName);
+    BatchWriter bw = mtbw.getBatchWriter(opts.tableName);
     
     Text colf = new Text("colfam");
     System.out.println("writing ...");
@@ -69,7 +57,6 @@ public class InsertWithBatchWriter {
       if (i % 100 == 0)
         System.out.println(i);
     }
-    
     mtbw.close();
   }
   

Modified: accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/InsertWithOutputFormat.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/InsertWithOutputFormat.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/InsertWithOutputFormat.java (original)
+++ accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/InsertWithOutputFormat.java Wed Dec 19 16:25:03 2012
@@ -16,6 +16,7 @@
  */
 package org.apache.accumulo.examples.simple.helloworld;
 
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
@@ -33,20 +34,18 @@ import org.apache.hadoop.util.ToolRunner
  * Inserts 10K rows (50K entries) into accumulo with each row having 5 entries using an OutputFormat.
  */
 public class InsertWithOutputFormat extends Configured implements Tool {
+
   // this is a tool because when you run a mapreduce, you will need to use the
   // ToolRunner
   // if you want libjars to be passed properly to the map and reduce tasks
   // even though this class isn't a mapreduce
   @Override
   public int run(String[] args) throws Exception {
-    if (args.length != 5) {
-      System.out.println("Usage: bin/tool.sh " + this.getClass().getName() + " <instance name> <zoo keepers> <username> <password> <tablename>");
-      return 1;
-    }
-    Text tableName = new Text(args[4]);
+    ClientOnRequiredTable opts = new ClientOnRequiredTable();
+    opts.parseArgs(this.getClass().getName(), args);
+    
     Job job = new Job(getConf());
-    AccumuloOutputFormat.setZooKeeperInstance(job.getConfiguration(), args[0], args[1]);
-    AccumuloOutputFormat.setOutputInfo(job.getConfiguration(), args[2], args[3].getBytes(), true, null);
+    opts.setAccumuloConfigs(job);
     job.setOutputFormatClass(AccumuloOutputFormat.class);
     
     // when running a mapreduce, you won't need to instantiate the output
@@ -55,7 +54,7 @@ public class InsertWithOutputFormat exte
     // output.collect(tableName, mutation)
     TaskAttemptContext context = ContextFactory.createTaskAttemptContext(job);
     RecordWriter<Text,Mutation> rw = new AccumuloOutputFormat().getRecordWriter(context);
-    
+    Text table = new Text(opts.tableName);
     Text colf = new Text("colfam");
     System.out.println("writing ...");
     for (int i = 0; i < 10000; i++) {
@@ -63,7 +62,7 @@ public class InsertWithOutputFormat exte
       for (int j = 0; j < 5; j++) {
         m.put(colf, new Text(String.format("colqual_%d", j)), new Value((String.format("value_%d_%d", i, j)).getBytes()));
       }
-      rw.write(tableName, m); // repeat until done
+      rw.write(table, m); // repeat until done
       if (i % 100 == 0)
         System.out.println(i);
     }

Modified: accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/ReadData.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/ReadData.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/ReadData.java (original)
+++ accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/ReadData.java Wed Dec 19 16:25:03 2012
@@ -19,45 +19,45 @@ package org.apache.accumulo.examples.sim
 import java.util.Iterator;
 import java.util.Map.Entry;
 
-import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * Reads all data between two rows; all data after a given row; or all data in a table, depending on the number of arguments given.
  */
 public class ReadData {
+  
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--startKey")
+    String startKey;
+    @Parameter(names="--endKey")
+    String endKey;
+  }
+  
   public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    if (args.length < 5 || args.length > 7) {
-      System.out
-          .println("bin/accumulo accumulo.examples.helloworld.ReadData <instance name> <zoo keepers> <username> <password> <tablename> [startkey [endkey]]");
-      System.exit(1);
-    }
-    
-    String instanceName = args[0];
-    String zooKeepers = args[1];
-    String user = args[2];
-    byte[] pass = args[3].getBytes();
-    String tableName = args[4];
+    Opts opts = new Opts();
+    opts.parseArgs(ReadData.class.getName(), args);
     
-    ZooKeeperInstance instance = new ZooKeeperInstance(instanceName, zooKeepers);
-    Connector connector = instance.getConnector(user, pass);
+    Connector connector = opts.getConnector();
     
-    Scanner scan = connector.createScanner(tableName, Constants.NO_AUTHS);
+    Scanner scan = connector.createScanner(opts.tableName, opts.auths);
+    scan.setBatchSize(opts.scanBatchSize);
     Key start = null;
-    if (args.length > 5)
-      start = new Key(new Text(args[5]));
+    if (opts.startKey != null)
+      start = new Key(new Text(opts.startKey));
     Key end = null;
-    if (args.length > 6)
-      end = new Key(new Text(args[6]));
+    if (opts.endKey != null)
+      end = new Key(new Text(opts.endKey));
     scan.setRange(new Range(start, end));
     Iterator<Entry<Key,Value>> iter = scan.iterator();
     

Modified: accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/isolation/InterferenceTest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/isolation/InterferenceTest.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/isolation/InterferenceTest.java (original)
+++ accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/isolation/InterferenceTest.java Wed Dec 19 16:25:03 2012
@@ -19,14 +19,12 @@ package org.apache.accumulo.examples.sim
 import java.util.HashSet;
 import java.util.Map.Entry;
 
-import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.IsolatedScanner;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
@@ -34,6 +32,8 @@ import org.apache.accumulo.core.data.Val
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * This example shows how a concurrent reader and writer can interfere with each other. It creates two threads that run forever reading and writing to the same
  * table.
@@ -48,15 +48,16 @@ public class InterferenceTest {
   
   private static final int NUM_ROWS = 500;
   private static final int NUM_COLUMNS = 113; // scanner batches 1000 by default, so make num columns not a multiple of 10
-  private static long iterations;
   private static final Logger log = Logger.getLogger(InterferenceTest.class);
   
   static class Writer implements Runnable {
     
-    private BatchWriter bw;
+    private final BatchWriter bw;
+    private final long iterations;
     
-    Writer(BatchWriter bw) {
+    Writer(BatchWriter bw, long iterations) {
       this.bw = bw;
+      this.iterations = iterations; 
     }
     
     @Override
@@ -141,31 +142,32 @@ public class InterferenceTest {
     }
   }
   
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--iterations", description="number of times to run", required=true)
+    long iterations = 0;
+    @Parameter(names="--isolated", description="use isolated scans")
+    boolean isolated = false;
+  }
+  
+  
   public static void main(String[] args) throws Exception {
+    Opts opts = new Opts();
+    opts.parseArgs(InterferenceTest.class.getName(), args);
     
-    if (args.length != 7) {
-      System.out.println("Usage : " + InterferenceTest.class.getName() + " <instance name> <zookeepers> <user> <password> <table> <iterations> true|false");
-      System.out.println("          The last argument determines if scans should be isolated.  When false, expect to see errors");
-      return;
-    }
-    
-    ZooKeeperInstance zki = new ZooKeeperInstance(args[0], args[1]);
-    Connector conn = zki.getConnector(args[2], args[3].getBytes());
+    if (opts.iterations < 1)
+      opts.iterations = Long.MAX_VALUE;
     
-    String table = args[4];
-    iterations = Long.parseLong(args[5]);
-    if (iterations < 1)
-      iterations = Long.MAX_VALUE;
-    if (!conn.tableOperations().exists(table))
-      conn.tableOperations().create(table);
+    Connector conn = opts.getConnector();
+    if (!conn.tableOperations().exists(opts.tableName))
+      conn.tableOperations().create(opts.tableName);
     
-    Thread writer = new Thread(new Writer(conn.createBatchWriter(table, new BatchWriterConfig())));
+    Thread writer = new Thread(new Writer(conn.createBatchWriter(opts.tableName, opts.getBatchWriterConfig()), opts.iterations));
     writer.start();
     Reader r;
-    if (Boolean.parseBoolean(args[6]))
-      r = new Reader(new IsolatedScanner(conn.createScanner(table, Constants.NO_AUTHS)));
+    if (opts.isolated)
+      r = new Reader(new IsolatedScanner(conn.createScanner(opts.tableName, opts.auths)));
     else
-      r = new Reader(conn.createScanner(table, Constants.NO_AUTHS));
+      r = new Reader(conn.createScanner(opts.tableName, opts.auths));
     Thread reader;
     reader = new Thread(r);
     reader.start();

Modified: accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RegexExample.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RegexExample.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RegexExample.java (original)
+++ accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RegexExample.java Wed Dec 19 16:25:03 2012
@@ -18,12 +18,12 @@ package org.apache.accumulo.examples.sim
 
 import java.io.IOException;
 
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.user.RegExFilter;
-import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.Path;
@@ -33,6 +33,8 @@ import org.apache.hadoop.mapreduce.lib.o
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
+import com.beust.jcommander.Parameter;
+
 public class RegexExample extends Configured implements Tool {
   public static class RegexMapper extends Mapper<Key,Value,Key,Value> {
     public void map(Key row, Value data, Context context) throws IOException, InterruptedException {
@@ -40,16 +42,31 @@ public class RegexExample extends Config
     }
   }
   
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--rowRegex")
+    String rowRegex;
+    @Parameter(names="--columnFamilyRegex")
+    String columnFamilyRegex;
+    @Parameter(names="--columnQualifierRegex")
+    String columnQualifierRegex;
+    @Parameter(names="--valueRegex")
+    String valueRegex;
+    @Parameter(names="--output", required=true)
+    String destination;
+  }
+  
   public int run(String[] args) throws Exception {
-    Job job = new Job(getConf(), this.getClass().getSimpleName());
-    job.setJarByClass(this.getClass());
+    Opts opts = new Opts();
+    opts.parseArgs(getClass().getName(), args);
+    
+    Job job = new Job(getConf(), getClass().getSimpleName());
+    job.setJarByClass(getClass());
     
     job.setInputFormatClass(AccumuloInputFormat.class);
-    AccumuloInputFormat.setZooKeeperInstance(job.getConfiguration(), args[0], args[1]);
-    AccumuloInputFormat.setInputInfo(job.getConfiguration(), args[2], args[3].getBytes(), args[4], new Authorizations());
+    opts.setAccumuloConfigs(job);
     
     IteratorSetting regex = new IteratorSetting(50, "regex", RegExFilter.class);
-    RegExFilter.setRegexs(regex, args[5], args[6], args[7], args[8], false);
+    RegExFilter.setRegexs(regex, opts.rowRegex, opts.columnFamilyRegex, opts.columnQualifierRegex, opts.valueRegex, false);
     AccumuloInputFormat.addIterator(job.getConfiguration(), regex);
     
     job.setMapperClass(RegexMapper.class);
@@ -59,12 +76,12 @@ public class RegexExample extends Config
     job.setNumReduceTasks(0);
     
     job.setOutputFormatClass(TextOutputFormat.class);
-    TextOutputFormat.setOutputPath(job, new Path(args[9]));
+    TextOutputFormat.setOutputPath(job, new Path(opts.destination));
     
-    System.out.println("setRowRegex: " + args[5]);
-    System.out.println("setColumnFamilyRegex: " + args[6]);
-    System.out.println("setColumnQualifierRegex: " + args[7]);
-    System.out.println("setValueRegex: " + args[8]);
+    System.out.println("setRowRegex: " + opts.rowRegex);
+    System.out.println("setColumnFamilyRegex: " + opts.columnFamilyRegex);
+    System.out.println("setColumnQualifierRegex: " + opts.columnQualifierRegex);
+    System.out.println("setValueRegex: " + opts.valueRegex);
     
     job.waitForCompletion(true);
     return job.isSuccessful() ? 0 : 1;

Modified: accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RowHash.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RowHash.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RowHash.java (original)
+++ accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RowHash.java Wed Dec 19 16:25:03 2012
@@ -19,12 +19,12 @@ package org.apache.accumulo.examples.sim
 import java.io.IOException;
 import java.util.Collections;
 
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
 import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.Pair;
 import org.apache.commons.codec.binary.Base64;
@@ -36,6 +36,8 @@ import org.apache.hadoop.mapreduce.Mappe
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
+import com.beust.jcommander.Parameter;
+
 public class RowHash extends Configured implements Tool {
   /**
    * The Mapper class that given a row number, will generate the appropriate output line.
@@ -52,24 +54,27 @@ public class RowHash extends Configured 
     public void setup(Context job) {}
   }
   
+  private static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--column", required=true)
+    String column = null;
+  }
+  
   @Override
   public int run(String[] args) throws Exception {
     Job job = new Job(getConf(), this.getClass().getName());
     job.setJarByClass(this.getClass());
-    
+    Opts opts = new Opts();
+    opts.parseArgs(RowHash.class.getName(), args);
     job.setInputFormatClass(AccumuloInputFormat.class);
-    AccumuloInputFormat.setZooKeeperInstance(job.getConfiguration(), args[0], args[1]);
-    AccumuloInputFormat.setInputInfo(job.getConfiguration(), args[2], args[3].getBytes(), args[4], new Authorizations());
+    opts.setAccumuloConfigs(job);
     
-    String col = args[5];
+    String col = opts.column;
     int idx = col.indexOf(":");
     Text cf = new Text(idx < 0 ? col : col.substring(0, idx));
     Text cq = idx < 0 ? null : new Text(col.substring(idx + 1));
     if (cf.getLength() > 0)
       AccumuloInputFormat.fetchColumns(job.getConfiguration(), Collections.singleton(new Pair<Text,Text>(cf, cq)));
     
-    // AccumuloInputFormat.setLogLevel(job, Level.TRACE);
-    
     job.setMapperClass(HashDataMapper.class);
     job.setMapOutputKeyClass(Text.class);
     job.setMapOutputValueClass(Mutation.class);
@@ -77,9 +82,6 @@ public class RowHash extends Configured 
     job.setNumReduceTasks(0);
     
     job.setOutputFormatClass(AccumuloOutputFormat.class);
-    AccumuloOutputFormat.setZooKeeperInstance(job.getConfiguration(), args[0], args[1]);
-    AccumuloOutputFormat.setOutputInfo(job.getConfiguration(), args[2], args[3].getBytes(), true, args[6]);
-    // AccumuloOutputFormat.setLogLevel(job, Level.TRACE);
     
     job.waitForCompletion(true);
     return job.isSuccessful() ? 0 : 1;

Modified: accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java (original)
+++ accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java Wed Dec 19 16:25:03 2012
@@ -20,10 +20,10 @@ import java.io.IOException;
 import java.util.HashSet;
 import java.util.Map;
 
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.Pair;
 import org.apache.accumulo.core.util.format.DefaultFormatter;
@@ -37,11 +37,21 @@ import org.apache.hadoop.mapreduce.lib.o
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * Takes a table and outputs the specified column to a set of part files on hdfs accumulo accumulo.examples.mapreduce.TableToFile <username> <password>
  * <tablename> <column> <hdfs-output-path>
  */
 public class TableToFile extends Configured implements Tool {
+  
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--output", description="output directory", required=true)
+    String output;
+    @Parameter(names="--columns", description="columns to extract, in cf:cq{,cf:cq,...} form")
+    String columns;
+  }
+  
   /**
    * The Mapper class that given a row number, will generate the appropriate output line.
    */
@@ -74,13 +84,14 @@ public class TableToFile extends Configu
   public int run(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
     Job job = new Job(getConf(), this.getClass().getSimpleName() + "_" + System.currentTimeMillis());
     job.setJarByClass(this.getClass());
+    Opts opts = new Opts();
+    opts.parseArgs(getClass().getName(), args);
     
     job.setInputFormatClass(AccumuloInputFormat.class);
-    AccumuloInputFormat.setZooKeeperInstance(job.getConfiguration(), args[0], args[1]);
-    AccumuloInputFormat.setInputInfo(job.getConfiguration(), args[2], args[3].getBytes(), args[4], new Authorizations());
+    opts.setAccumuloConfigs(job);
     
     HashSet<Pair<Text,Text>> columnsToFetch = new HashSet<Pair<Text,Text>>();
-    for (String col : args[5].split(",")) {
+    for (String col : opts.columns.split(",")) {
       int idx = col.indexOf(":");
       Text cf = new Text(idx < 0 ? col : col.substring(0, idx));
       Text cq = idx < 0 ? null : new Text(col.substring(idx + 1));
@@ -97,7 +108,7 @@ public class TableToFile extends Configu
     job.setNumReduceTasks(0);
     
     job.setOutputFormatClass(TextOutputFormat.class);
-    TextOutputFormat.setOutputPath(job, new Path(args[6]));
+    TextOutputFormat.setOutputPath(job, new Path(opts.output));
     
     job.waitForCompletion(true);
     return job.isSuccessful() ? 0 : 1;

Modified: accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TeraSortIngest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TeraSortIngest.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TeraSortIngest.java (original)
+++ accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TeraSortIngest.java Wed Dec 19 16:25:03 2012
@@ -25,6 +25,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Random;
 
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
@@ -46,6 +47,8 @@ import org.apache.hadoop.mapreduce.TaskA
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * Generate the *almost* official terasort input data set. (See below) The user specifies the number of rows and the output directory and this class runs a
  * map/reduce program to generate the data. The format of the data is:
@@ -60,10 +63,7 @@ import org.apache.hadoop.util.ToolRunner
  * the same way TeraSort does use 10000000000 rows and 10/10 byte key length and 78/78 byte value length. Along with the 10 byte row id and \r\n this gives you
  * 100 byte row * 10000000000 rows = 1tb. Min/Max ranges for key and value parameters are inclusive/inclusive respectively.
  * 
- * Params <numrows> <minkeylength> <maxkeylength> <minvaluelength> <maxvaluelength> <tablename> <instance> <zoohosts> <username> <password> [numsplits]
- * numsplits allows you specify how many splits, and therefore mappers, to use
- * 
- * 
+ *  
  */
 public class TeraSortIngest extends Configured implements Tool {
   /**
@@ -343,10 +343,27 @@ public class TeraSortIngest extends Conf
     System.exit(res);
   }
   
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--count", description="number of rows to ingest", required=true)
+    long numRows;
+    @Parameter(names={"-nk", "--minKeySize"}, description="miniumum key size", required=true)
+    int minKeyLength;
+    @Parameter(names={"-xk", "--maxKeySize"}, description="maximum key size", required=true)
+    int maxKeyLength;
+    @Parameter(names={"-nv", "--minValueSize"}, description="minimum key size", required=true)
+    int minValueLength;
+    @Parameter(names={"-xv", "--maxValueSize"}, description="maximum key size", required=true)
+    int maxValueLength;
+    @Parameter(names="--splits", description="number of splits to create in the table")
+    int splits = 0;
+  }
+  
   @Override
   public int run(String[] args) throws Exception {
     Job job = new Job(getConf(), "TeraSortCloud");
     job.setJarByClass(this.getClass());
+    Opts opts = new Opts();
+    opts.parseArgs(TeraSortIngest.class.getName(), args);
     
     job.setInputFormatClass(RangeInputFormat.class);
     job.setMapperClass(SortGenMapper.class);
@@ -356,20 +373,19 @@ public class TeraSortIngest extends Conf
     job.setNumReduceTasks(0);
     
     job.setOutputFormatClass(AccumuloOutputFormat.class);
-    AccumuloOutputFormat.setZooKeeperInstance(job.getConfiguration(), args[6], args[7]);
-    AccumuloOutputFormat.setOutputInfo(job.getConfiguration(), args[8], args[9].getBytes(), true, null);
+    opts.setAccumuloConfigs(job);
     AccumuloOutputFormat.setMaxMutationBufferSize(job.getConfiguration(), 10L * 1000 * 1000);
     
     Configuration conf = job.getConfiguration();
-    conf.setLong(NUMROWS, Long.parseLong(args[0]));
-    conf.setInt("cloudgen.minkeylength", Integer.parseInt(args[1]));
-    conf.setInt("cloudgen.maxkeylength", Integer.parseInt(args[2]));
-    conf.setInt("cloudgen.minvaluelength", Integer.parseInt(args[3]));
-    conf.setInt("cloudgen.maxvaluelength", Integer.parseInt(args[4]));
-    conf.set("cloudgen.tablename", args[5]);
+    conf.setLong(NUMROWS, opts.numRows);
+    conf.setInt("cloudgen.minkeylength", opts.minKeyLength);
+    conf.setInt("cloudgen.maxkeylength", opts.maxKeyLength);
+    conf.setInt("cloudgen.minvaluelength", opts.minValueLength);
+    conf.setInt("cloudgen.maxvaluelength", opts.maxValueLength);
+    conf.set("cloudgen.tablename", opts.tableName);
     
     if (args.length > 10)
-      conf.setInt(NUMSPLITS, Integer.parseInt(args[10]));
+      conf.setInt(NUMSPLITS, opts.splits);
     
     job.waitForCompletion(true);
     return job.isSuccessful() ? 0 : 1;

Modified: accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/UniqueColumns.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/UniqueColumns.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/UniqueColumns.java (original)
+++ accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/UniqueColumns.java Wed Dec 19 16:25:03 2012
@@ -4,13 +4,12 @@ import java.io.IOException;
 import java.util.HashMap;
 import java.util.HashSet;
 
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
 import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.Path;
@@ -22,6 +21,8 @@ import org.apache.hadoop.mapreduce.lib.o
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -70,34 +71,37 @@ public class UniqueColumns extends Confi
     }
   }
   
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--output", description="output directory")
+    String output;
+    @Parameter(names="--reducers", description="number of reducers to use", required=true)
+    int reducers;
+    @Parameter(names="--offline", description="run against an offline table")
+    boolean offline = false;
+  }
+  
   
   @Override
   public int run(String[] args) throws Exception {
+    Opts opts = new Opts();
+    opts.parseArgs(UniqueColumns.class.getName(), args);
     
-    if (args.length != 8) {
-      throw new IllegalArgumentException("Usage : " + UniqueColumns.class.getSimpleName()
-          + " <instance name> <zookeepers> <user> <password> <table> <output directory> <num reducers> offline|online");
-    }
-
-    boolean scanOffline = args[7].equals("offline");
-    String table = args[4];
     String jobName = this.getClass().getSimpleName() + "_" + System.currentTimeMillis();
     
     Job job = new Job(getConf(), jobName);
     job.setJarByClass(this.getClass());
 
-    String clone = table;
+    String clone = opts.tableName;
     Connector conn = null;
-    if (scanOffline) {
+    if (opts.offline) {
       /*
        * this example clones the table and takes it offline. If you plan to run map reduce jobs over a table many times, it may be more efficient to compact the
        * table, clone it, and then keep using the same clone as input for map reduce.
        */
       
-      ZooKeeperInstance zki = new ZooKeeperInstance(args[0], args[1]);
-      conn = zki.getConnector(args[2], args[3].getBytes());
-      clone = table + "_" + jobName;
-      conn.tableOperations().clone(table, clone, true, new HashMap<String,String>(), new HashSet<String>());
+      conn = opts.getConnector();
+      clone = opts.tableName + "_" + jobName;
+      conn.tableOperations().clone(opts.tableName, clone, true, new HashMap<String,String>(), new HashSet<String>());
       conn.tableOperations().offline(clone);
       
       AccumuloInputFormat.setScanOffline(job.getConfiguration(), true);
@@ -106,9 +110,7 @@ public class UniqueColumns extends Confi
 
     
     job.setInputFormatClass(AccumuloInputFormat.class);
-    AccumuloInputFormat.setZooKeeperInstance(job.getConfiguration(), args[0], args[1]);
-    AccumuloInputFormat.setInputInfo(job.getConfiguration(), args[2], args[3].getBytes(), clone, new Authorizations());
-    
+    opts.setAccumuloConfigs(job);
     
     job.setMapperClass(UMapper.class);
     job.setMapOutputKeyClass(Text.class);
@@ -117,14 +119,14 @@ public class UniqueColumns extends Confi
     job.setCombinerClass(UReducer.class);
     job.setReducerClass(UReducer.class);
 
-    job.setNumReduceTasks(Integer.parseInt(args[6]));
+    job.setNumReduceTasks(opts.reducers);
 
     job.setOutputFormatClass(TextOutputFormat.class);
-    TextOutputFormat.setOutputPath(job, new Path(args[5]));
+    TextOutputFormat.setOutputPath(job, new Path(opts.output));
     
     job.waitForCompletion(true);
     
-    if (scanOffline) {
+    if (opts.offline) {
       conn.tableOperations().delete(clone);
     }
 

Modified: accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/WordCount.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/WordCount.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/WordCount.java (original)
+++ accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/WordCount.java Wed Dec 19 16:25:03 2012
@@ -18,16 +18,11 @@ package org.apache.accumulo.examples.sim
 
 import java.io.IOException;
 
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.commons.cli.BasicParser;
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.Parser;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.LongWritable;
@@ -38,24 +33,17 @@ import org.apache.hadoop.mapreduce.lib.i
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * A simple map reduce job that inserts word counts into accumulo. See the README for instructions on how to run this.
  * 
  */
 public class WordCount extends Configured implements Tool {
-  private static Options opts;
-  private static Option passwordOpt;
-  private static Option usernameOpt;
-  private static String USAGE = "wordCount <instance name> <zoo keepers> <input dir> <output table>";
   
-  static {
-    usernameOpt = new Option("u", "username", true, "username");
-    passwordOpt = new Option("p", "password", true, "password");
-    
-    opts = new Options();
-    
-    opts.addOption(usernameOpt);
-    opts.addOption(passwordOpt);
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--input", description="input directory")
+    String inputDirectory;
   }
   
   public static class MapClass extends Mapper<LongWritable,Text,Text,Mutation> {
@@ -77,25 +65,15 @@ public class WordCount extends Configure
     }
   }
   
-  public int run(String[] unprocessed_args) throws Exception {
-    Parser p = new BasicParser();
-    
-    CommandLine cl = p.parse(opts, unprocessed_args);
-    String[] args = cl.getArgs();
-    
-    String username = cl.getOptionValue(usernameOpt.getOpt(), "root");
-    String password = cl.getOptionValue(passwordOpt.getOpt(), "secret");
-    
-    if (args.length != 4) {
-      System.out.println("ERROR: Wrong number of parameters: " + args.length + " instead of 4.");
-      return printUsage();
-    }
+  public int run(String[] args) throws Exception {
+    Opts opts = new Opts();
+    opts.parseArgs(WordCount.class.getName(), args);
     
     Job job = new Job(getConf(), WordCount.class.getName());
     job.setJarByClass(this.getClass());
     
     job.setInputFormatClass(TextInputFormat.class);
-    TextInputFormat.setInputPaths(job, new Path(args[2]));
+    TextInputFormat.setInputPaths(job, new Path(opts.inputDirectory));
     
     job.setMapperClass(MapClass.class);
     
@@ -104,18 +82,11 @@ public class WordCount extends Configure
     job.setOutputFormatClass(AccumuloOutputFormat.class);
     job.setOutputKeyClass(Text.class);
     job.setOutputValueClass(Mutation.class);
-    AccumuloOutputFormat.setOutputInfo(job.getConfiguration(), username, password.getBytes(), true, args[3]);
-    AccumuloOutputFormat.setZooKeeperInstance(job.getConfiguration(), args[0], args[1]);
+    opts.setAccumuloConfigs(job);
     job.waitForCompletion(true);
     return 0;
   }
   
-  private int printUsage() {
-    HelpFormatter hf = new HelpFormatter();
-    hf.printHelp(USAGE, opts);
-    return 0;
-  }
-  
   public static void main(String[] args) throws Exception {
     int res = ToolRunner.run(CachedConfiguration.getInstance(), new WordCount(), args);
     System.exit(res);

Modified: accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java (original)
+++ accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java Wed Dec 19 16:25:03 2012
@@ -21,9 +21,8 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.util.Collection;
 
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.client.mapreduce.AccumuloFileOutputFormat;
 import org.apache.accumulo.core.client.mapreduce.lib.partition.RangePartitioner;
 import org.apache.accumulo.core.data.Key;
@@ -44,6 +43,8 @@ import org.apache.hadoop.mapreduce.lib.i
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * Example map reduce job that bulk ingest data into an accumulo table. The expected input is text files containing tab separated key value pairs on each line.
  */
@@ -92,11 +93,16 @@ public class BulkIngestExample extends C
     }
   }
   
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--inputDir", required=true)
+    String inputDir;
+    @Parameter(names="--workDir", required=true)
+    String workDir;
+  }
+  
   public int run(String[] args) {
-    if (args.length != 7) {
-      System.out.println("ERROR: Wrong number of parameters: " + args.length + " instead of 7.");
-      return printUsage();
-    }
+    Opts opts = new Opts();
+    opts.parseArgs(BulkIngestExample.class.getName(), args);
     
     Configuration conf = getConf();
     PrintStream out = null;
@@ -112,23 +118,17 @@ public class BulkIngestExample extends C
       
       job.setReducerClass(ReduceClass.class);
       job.setOutputFormatClass(AccumuloFileOutputFormat.class);
+      opts.setAccumuloConfigs(job);
       
-      Instance instance = new ZooKeeperInstance(args[0], args[1]);
-      String user = args[2];
-      byte[] pass = args[3].getBytes();
-      String tableName = args[4];
-      String inputDir = args[5];
-      String workDir = args[6];
+      Connector connector = opts.getConnector();
       
-      Connector connector = instance.getConnector(user, pass);
-      
-      TextInputFormat.setInputPaths(job, new Path(inputDir));
-      AccumuloFileOutputFormat.setOutputPath(job, new Path(workDir + "/files"));
+      TextInputFormat.setInputPaths(job, new Path(opts.inputDir));
+      AccumuloFileOutputFormat.setOutputPath(job, new Path(opts.workDir + "/files"));
       
       FileSystem fs = FileSystem.get(conf);
-      out = new PrintStream(new BufferedOutputStream(fs.create(new Path(workDir + "/splits.txt"))));
+      out = new PrintStream(new BufferedOutputStream(fs.create(new Path(opts.workDir + "/splits.txt"))));
       
-      Collection<Text> splits = connector.tableOperations().getSplits(tableName, 100);
+      Collection<Text> splits = connector.tableOperations().getSplits(opts.tableName, 100);
       for (Text split : splits)
         out.println(new String(Base64.encodeBase64(TextUtil.getBytes(split))));
       
@@ -136,13 +136,13 @@ public class BulkIngestExample extends C
       out.close();
       
       job.setPartitionerClass(RangePartitioner.class);
-      RangePartitioner.setSplitFile(job, workDir + "/splits.txt");
+      RangePartitioner.setSplitFile(job, opts.workDir + "/splits.txt");
       
       job.waitForCompletion(true);
-      Path failures = new Path(workDir, "failures");
+      Path failures = new Path(opts.workDir, "failures");
       fs.delete(failures, true);
-      fs.mkdirs(new Path(workDir, "failures"));
-      connector.tableOperations().importDirectory(tableName, workDir + "/files", workDir + "/failures", false);
+      fs.mkdirs(new Path(opts.workDir, "failures"));
+      connector.tableOperations().importDirectory(opts.tableName, opts.workDir + "/files", opts.workDir + "/failures", false);
       
     } catch (Exception e) {
       throw new RuntimeException(e);
@@ -154,11 +154,6 @@ public class BulkIngestExample extends C
     return 0;
   }
   
-  private int printUsage() {
-    System.out.println("accumulo " + this.getClass().getName() + " <instanceName> <zooKeepers> <username> <password> <table> <input dir> <work dir> ");
-    return 0;
-  }
-  
   public static void main(String[] args) throws Exception {
     int res = ToolRunner.run(CachedConfiguration.getInstance(), new BulkIngestExample(), args);
     System.exit(res);

Modified: accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/GenerateTestData.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/GenerateTestData.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/GenerateTestData.java (original)
+++ accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/GenerateTestData.java Wed Dec 19 16:25:03 2012
@@ -21,26 +21,32 @@ import java.io.IOException;
 import java.io.PrintStream;
 
 import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 
+import com.beust.jcommander.Parameter;
+
 public class GenerateTestData {
   
+  static class Opts extends org.apache.accumulo.core.cli.Help {
+    @Parameter(names="--start-row", required=true)
+    int startRow = 0;
+    @Parameter(names="--count", required=true)
+    int numRows = 0;
+    @Parameter(names="--output", required=true)
+    String outputFile;
+  }
+  
   public static void main(String[] args) throws IOException {
-    int startRow = Integer.parseInt(args[0]);
-    int numRows = Integer.parseInt(args[1]);
-    String outputFile = args[2];
+    Opts opts = new Opts();
+    opts.parseArgs(GenerateTestData.class.getName(), args);
     
-    Configuration conf = CachedConfiguration.getInstance();
-    FileSystem fs = FileSystem.get(conf);
+    FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
+    PrintStream out = new PrintStream(new BufferedOutputStream(fs.create(new Path(opts.outputFile))));
     
-    PrintStream out = new PrintStream(new BufferedOutputStream(fs.create(new Path(outputFile))));
-    
-    for (int i = 0; i < numRows; i++) {
-      out.println(String.format("row_%08d\tvalue_%08d", i + startRow, i + startRow));
+    for (int i = 0; i < opts.numRows; i++) {
+      out.println(String.format("row_%08d\tvalue_%08d", i + opts.startRow, i + opts.startRow));
     }
-    
     out.close();
   }
   

Modified: accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/SetupTable.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/SetupTable.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/SetupTable.java (original)
+++ accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/SetupTable.java Wed Dec 19 16:25:03 2012
@@ -16,38 +16,35 @@
  */
 package org.apache.accumulo.examples.simple.mapreduce.bulk;
 
+import java.util.ArrayList;
+import java.util.List;
 import java.util.TreeSet;
 
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 public class SetupTable {
   
-  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableExistsException {
-    Connector conn = new ZooKeeperInstance(args[0], args[1]).getConnector(args[2], args[3].getBytes());
-    if (args.length == 5) {
-      // create a basic table
-      conn.tableOperations().create(args[4]);
-    } else if (args.length > 5) {
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(description="<split> { <split> ... } ")
+    List<String> splits = new ArrayList<String>();
+  }
+  
+  public static void main(String[] args) throws Exception {
+    Opts opts = new Opts();
+    opts.parseArgs(SetupTable.class.getName(), args);
+    Connector conn = opts.getConnector();
+    conn.tableOperations().create(opts.tableName);
+    if (!opts.splits.isEmpty()) {
       // create a table with initial partitions
       TreeSet<Text> intialPartitions = new TreeSet<Text>();
-      for (int i = 5; i < args.length; ++i)
-        intialPartitions.add(new Text(args[i]));
-      conn.tableOperations().create(args[4]);
-      
-      try {
-        conn.tableOperations().addSplits(args[4], intialPartitions);
-      } catch (TableNotFoundException e) {
-        // unlikely
-        throw new RuntimeException(e);
+      for (String split : opts.splits) {
+        intialPartitions.add(new Text(split));
       }
-    } else {
-      System.err.println("Usage : SetupTable <instance> <zookeepers> <username> <password> <table name> [<split point>{ <split point}]");
-    }
+      conn.tableOperations().addSplits(opts.tableName, intialPartitions);
+    } 
   }
 }

Modified: accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/VerifyIngest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/VerifyIngest.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/VerifyIngest.java (original)
+++ accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/VerifyIngest.java Wed Dec 19 16:25:03 2012
@@ -19,49 +19,44 @@ package org.apache.accumulo.examples.sim
 import java.util.Iterator;
 import java.util.Map.Entry;
 
-import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
 
+import com.beust.jcommander.Parameter;
+
 public class VerifyIngest {
   private static final Logger log = Logger.getLogger(VerifyIngest.class);
   
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--start-row")
+    int startRow = 0;
+    @Parameter(names="--count", required=true, description="number of rows to verify")
+    int numRows = 0;
+  }
+  
   public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    if (args.length != 7) {
-      System.err.println("VerifyIngest <instance name> <zoo keepers> <username> <password> <table> <startRow> <numRows> ");
-      return;
-    }
+    Opts opts = new Opts();
+    opts.parseArgs(VerifyIngest.class.getName(), args);
     
-    String instanceName = args[0];
-    String zooKeepers = args[1];
-    String user = args[2];
-    byte[] pass = args[3].getBytes();
-    String table = args[4];
-    
-    int startRow = Integer.parseInt(args[5]);
-    int numRows = Integer.parseInt(args[6]);
-    
-    Instance instance = new ZooKeeperInstance(instanceName, zooKeepers);
-    Connector connector = instance.getConnector(user, pass);
-    Scanner scanner = connector.createScanner(table, Constants.NO_AUTHS);
+    Connector connector = opts.getConnector();
+    Scanner scanner = connector.createScanner(opts.tableName, opts.auths);
     
-    scanner.setRange(new Range(new Text(String.format("row_%08d", startRow)), null));
+    scanner.setRange(new Range(new Text(String.format("row_%08d", opts.startRow)), null));
     
     Iterator<Entry<Key,Value>> si = scanner.iterator();
     
     boolean ok = true;
     
-    for (int i = startRow; i < numRows; i++) {
+    for (int i = opts.startRow; i < opts.numRows; i++) {
       
       if (si.hasNext()) {
         Entry<Key,Value> entry = si.next();

Modified: accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/ContinuousQuery.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/ContinuousQuery.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/ContinuousQuery.java (original)
+++ accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/ContinuousQuery.java Wed Dec 19 16:25:03 2012
@@ -22,18 +22,19 @@ import java.util.Collections;
 import java.util.Map.Entry;
 import java.util.Random;
 
-import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.cli.ClientOpts;
 import org.apache.accumulo.core.client.BatchScanner;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.IteratorSetting;
 import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.user.IntersectingIterator;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * Using the doc2word table created by Reverse.java, this program randomly selects N words per document. Then it continually queries a random set of words in
  * the shard table (created by {@link Index}) using the {@link IntersectingIterator}.
@@ -42,35 +43,31 @@ import org.apache.hadoop.io.Text;
  */
 
 public class ContinuousQuery {
-  public static void main(String[] args) throws Exception {
-    
-    if (args.length != 7 && args.length != 8) {
-      System.err.println("Usage : " + ContinuousQuery.class.getName()
-          + " <instance> <zoo keepers> <shard table> <doc2word table> <user> <pass> <num query terms> [iterations]");
-      System.exit(-1);
-    }
-    
-    String instance = args[0];
-    String zooKeepers = args[1];
-    String table = args[2];
-    String docTable = args[3];
-    String user = args[4];
-    String pass = args[5];
-    int numTerms = Integer.parseInt(args[6]);
+  
+  static class Opts extends ClientOpts {
+    @Parameter(names="--shardTable", required=true, description="name of the shard table")
+    String table = null;
+    @Parameter(names="--doc2Term", required=true, description="name of the doc2Term table")
+    String doc2Term;
+    @Parameter(names="--terms", required=true, description="the number of terms in the query")
+    int numTerms;
+    @Parameter(names="--count", description="the number of queries to run")
     long iterations = Long.MAX_VALUE;
-    if (args.length > 7)
-      iterations = Long.parseLong(args[7]);
+  }
+  
+  public static void main(String[] args) throws Exception {
+    Opts opts = new Opts();
+    opts.parseArgs(ContinuousQuery.class.getName(), args);
     
-    ZooKeeperInstance zki = new ZooKeeperInstance(instance, zooKeepers);
-    Connector conn = zki.getConnector(user, pass.getBytes());
+    Connector conn = opts.getConnector();
     
-    ArrayList<Text[]> randTerms = findRandomTerms(conn.createScanner(docTable, Constants.NO_AUTHS), numTerms);
+    ArrayList<Text[]> randTerms = findRandomTerms(conn.createScanner(opts.doc2Term, opts.auths), opts.numTerms);
     
     Random rand = new Random();
     
-    BatchScanner bs = conn.createBatchScanner(table, Constants.NO_AUTHS, 20);
+    BatchScanner bs = conn.createBatchScanner(opts.table, opts.auths, opts.scanThreads);
     
-    for (long i = 0; i < iterations; i += 1) {
+    for (long i = 0; i < opts.iterations; i += 1) {
       Text[] columns = randTerms.get(rand.nextInt(randTerms.size()));
       
       bs.clearScanIterators();

Modified: accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Index.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Index.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Index.java (original)
+++ accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Index.java Wed Dec 19 16:25:03 2012
@@ -18,16 +18,18 @@ package org.apache.accumulo.examples.sim
 
 import java.io.File;
 import java.io.FileReader;
+import java.util.ArrayList;
 import java.util.HashSet;
+import java.util.List;
 
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * This program indexes a set of documents given on the command line into a shard table.
  * 
@@ -89,37 +91,24 @@ public class Index {
     
   }
   
-  private static BatchWriter setupBatchWriter(String instance, String zooKeepers, String table, String user, String pass) throws Exception {
-    ZooKeeperInstance zinstance = new ZooKeeperInstance(instance, zooKeepers);
-    Connector conn = zinstance.getConnector(user, pass.getBytes());
-    return conn.createBatchWriter(table, new BatchWriterConfig());
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(names="--partitions", required=true, description="the number of shards to create")
+    int partitions;
+    @Parameter(required=true, description="<file> { <file> ... }")
+    List<String> files = new ArrayList<String>();
   }
   
   public static void main(String[] args) throws Exception {
-    
-    if (args.length < 7) {
-      System.err.println("Usage : " + Index.class.getName() + " <instance> <zoo keepers> <table> <user> <pass> <num partitions> <file>{ <file>}");
-      System.exit(-1);
-    }
-    
-    String instance = args[0];
-    String zooKeepers = args[1];
-    String table = args[2];
-    String user = args[3];
-    String pass = args[4];
-    
-    int numPartitions = Integer.parseInt(args[5]);
+    Opts opts = new Opts();
+    opts.parseArgs(Index.class.getName(), args);
     
     String splitRegex = "\\W+";
     
-    BatchWriter bw = setupBatchWriter(instance, zooKeepers, table, user, pass);
-    
-    for (int i = 6; i < args.length; i++) {
-      index(numPartitions, new File(args[i]), splitRegex, bw);
+    BatchWriter bw = opts.getConnector().createBatchWriter(opts.tableName, opts.getBatchWriterConfig());    
+    for (String filename : opts.files) {
+      index(opts.partitions, new File(filename), splitRegex, bw);
     }
-    
     bw.close();
-    
   }
   
 }

Modified: accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Query.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Query.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Query.java (original)
+++ accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Query.java Wed Dec 19 16:25:03 2012
@@ -16,20 +16,23 @@
  */
 package org.apache.accumulo.examples.simple.shard;
 
+import java.util.ArrayList;
 import java.util.Collections;
+import java.util.List;
 import java.util.Map.Entry;
 
-import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 import org.apache.accumulo.core.client.BatchScanner;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.user.IntersectingIterator;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * This program queries a set of terms in the shard table (populated by {@link Index}) using the {@link IntersectingIterator}.
  * 
@@ -38,30 +41,25 @@ import org.apache.hadoop.io.Text;
 
 public class Query {
   
+  static class Opts extends ClientOnRequiredTable {
+    @Parameter(description=" term { <term> ... }")
+    List<String> terms = new ArrayList<String>();
+  }
+  
   /**
    * @param args
    */
   public static void main(String[] args) throws Exception {
+    Opts opts = new Opts();
+    opts.parseArgs(Query.class.getName(), args);
     
-    if (args.length < 6) {
-      System.err.println("Usage : " + Query.class.getName() + " <instance> <zoo keepers> <table> <user> <pass> <term>{ <term>}");
-      System.exit(-1);
-    }
-    
-    String instance = args[0];
-    String zooKeepers = args[1];
-    String table = args[2];
-    String user = args[3];
-    String pass = args[4];
-    
-    ZooKeeperInstance zki = new ZooKeeperInstance(instance, zooKeepers);
-    Connector conn = zki.getConnector(user, pass.getBytes());
-    
-    BatchScanner bs = conn.createBatchScanner(table, Constants.NO_AUTHS, 20);
+    Connector conn = opts.getConnector();
+    BatchScanner bs = conn.createBatchScanner(opts.tableName, opts.auths, opts.batchThreads);
     
-    Text columns[] = new Text[args.length - 5];
-    for (int i = 5; i < args.length; i++) {
-      columns[i - 5] = new Text(args[i]);
+    Text columns[] = new Text[opts.terms.size()];
+    int i = 0;
+    for (String term : opts.terms) {
+      columns[i++] = new Text(term);
     }
     IteratorSetting ii = new IteratorSetting(20, "ii", IntersectingIterator.class);
     IntersectingIterator.setColumnFamilies(ii, columns);

Modified: accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Reverse.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Reverse.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Reverse.java (original)
+++ accumulo/trunk/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Reverse.java Wed Dec 19 16:25:03 2012
@@ -18,17 +18,17 @@ package org.apache.accumulo.examples.sim
 
 import java.util.Map.Entry;
 
-import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.cli.ClientOpts;
 import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.hadoop.io.Text;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * The program reads an accumulo table written by {@link Index} and writes out to another table. It writes out a mapping of documents to terms. The document to
  * term mapping is used by {@link ContinuousQuery}.
@@ -37,25 +37,23 @@ import org.apache.hadoop.io.Text;
  */
 
 public class Reverse {
+  
+  static class Opts extends ClientOpts {
+    @Parameter(names="--shardTable")
+    String shardTable = "shard";
+    @Parameter(names="--doc2Term")
+    String doc2TermTable = "doc2Term";
+  }
+  
   public static void main(String[] args) throws Exception {
+    Opts opts = new Opts();
+    opts.parseArgs(Reverse.class.getName(), args);
     
-    if (args.length != 6) {
-      System.err.println("Usage : " + Reverse.class.getName() + " <instance> <zoo keepers> <shard table> <doc2word table> <user> <pass>");
-      System.exit(-1);
-    }
-    
-    String instance = args[0];
-    String zooKeepers = args[1];
-    String inTable = args[2];
-    String outTable = args[3];
-    String user = args[4];
-    String pass = args[5];
-    
-    ZooKeeperInstance zki = new ZooKeeperInstance(instance, zooKeepers);
-    Connector conn = zki.getConnector(user, pass.getBytes());
+    Connector conn = opts.getConnector();
     
-    Scanner scanner = conn.createScanner(inTable, Constants.NO_AUTHS);
-    BatchWriter bw = conn.createBatchWriter(outTable, new BatchWriterConfig());
+    Scanner scanner = conn.createScanner(opts.shardTable, opts.auths);
+    scanner.setBatchSize(opts.scanBatchSize);
+    BatchWriter bw = conn.createBatchWriter(opts.doc2TermTable, opts.getBatchWriterConfig());
     
     for (Entry<Key,Value> entry : scanner) {
       Key key = entry.getKey();

Modified: accumulo/trunk/examples/simple/src/test/java/org/apache/accumulo/examples/simple/dirlist/CountTest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/examples/simple/src/test/java/org/apache/accumulo/examples/simple/dirlist/CountTest.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/examples/simple/src/test/java/org/apache/accumulo/examples/simple/dirlist/CountTest.java (original)
+++ accumulo/trunk/examples/simple/src/test/java/org/apache/accumulo/examples/simple/dirlist/CountTest.java Wed Dec 19 16:25:03 2012
@@ -31,6 +31,7 @@ import org.apache.accumulo.core.data.Val
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.ColumnVisibility;
 import org.apache.accumulo.core.util.Pair;
+import org.apache.accumulo.examples.simple.dirlist.FileCount.Opts;
 import org.apache.hadoop.io.Text;
 
 public class CountTest extends TestCase {
@@ -61,7 +62,11 @@ public class CountTest extends TestCase 
     scanner.fetchColumn(new Text("dir"), new Text("counts"));
     assertFalse(scanner.iterator().hasNext());
     
-    FileCount fc = new FileCount("counttest", null, "root", "", "dirlisttable", "", "", true);
+    Opts opts = new Opts();
+    opts.instance = "counttest";
+    opts.tableName = "dirlisttable";
+    opts.mock = true;
+    FileCount fc = new FileCount(opts);
     fc.run();
     
     ArrayList<Pair<String,String>> expected = new ArrayList<Pair<String,String>>();

Modified: accumulo/trunk/fate/src/main/java/org/apache/accumulo/fate/util/LoggingRunnable.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/fate/src/main/java/org/apache/accumulo/fate/util/LoggingRunnable.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/fate/src/main/java/org/apache/accumulo/fate/util/LoggingRunnable.java (original)
+++ accumulo/trunk/fate/src/main/java/org/apache/accumulo/fate/util/LoggingRunnable.java Wed Dec 19 16:25:03 2012
@@ -46,20 +46,5 @@ public class LoggingRunnable implements 
       }
     }
   }
-  
-  public static void main(String[] args) {
-    Runnable r = new Runnable() {
-      @Override
-      public void run() {
-        int x[] = new int[0];
-        
-        x[0]++;
-      }
-    };
-    
-    LoggingRunnable lr = new LoggingRunnable(null, r);
-    lr.run();
-    
-  }
-  
+
 }

Modified: accumulo/trunk/pom.xml
URL: http://svn.apache.org/viewvc/accumulo/trunk/pom.xml?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/pom.xml (original)
+++ accumulo/trunk/pom.xml Wed Dec 19 16:25:03 2012
@@ -144,7 +144,7 @@
             <configuration>
               <outputDirectory>../lib</outputDirectory>
               <!-- just grab the non-provided runtime dependencies -->
-              <includeArtifactIds>commons-collections,commons-configuration,commons-io,commons-lang,jline,log4j,libthrift,commons-jci-core,commons-jci-fam,commons-logging,commons-logging-api,commons-vfs2,gson</includeArtifactIds>
+              <includeArtifactIds>commons-collections,commons-configuration,commons-io,commons-lang,jline,log4j,libthrift,commons-jci-core,commons-jci-fam,commons-logging,commons-logging-api,commons-vfs2,gson,jcommander</includeArtifactIds>
               <excludeTransitive>true</excludeTransitive>
             </configuration>
           </execution>
@@ -713,6 +713,11 @@
         <version>1.0</version>
       </dependency>
       <dependency>
+      	<groupId>com.beust</groupId>
+      	<artifactId>jcommander</artifactId>
+      	<version>1.30</version>
+      </dependency>
+      <dependency>
         <groupId>com.google.code.gson</groupId>
         <artifactId>gson</artifactId>
         <version>2.2.2</version>

Added: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/cli/ClientOnDefaultTable.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/cli/ClientOnDefaultTable.java?rev=1423923&view=auto
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/cli/ClientOnDefaultTable.java (added)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/cli/ClientOnDefaultTable.java Wed Dec 19 16:25:03 2012
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.server.cli;
+
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.ZooKeeperInstance;
+import org.apache.accumulo.core.client.mock.MockInstance;
+import org.apache.accumulo.server.client.HdfsZooInstance;
+
+public class ClientOnDefaultTable extends org.apache.accumulo.core.cli.ClientOnDefaultTable {
+  {
+    user = "root";
+  }
+
+  @Override
+  synchronized public Instance getInstance() {
+    if (cachedInstance != null)
+      return cachedInstance;
+    
+    if (mock)
+      return cachedInstance = new MockInstance(instance);
+    if (instance == null) {
+      return cachedInstance = HdfsZooInstance.getInstance();
+    }
+    return cachedInstance = new ZooKeeperInstance(this.instance, this.zookeepers);
+  }
+  public ClientOnDefaultTable(String table) {
+    super(table);
+  }
+}

Propchange: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/cli/ClientOnDefaultTable.java
------------------------------------------------------------------------------
    svn:eol-style = native

Added: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/cli/ClientOnRequiredTable.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/cli/ClientOnRequiredTable.java?rev=1423923&view=auto
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/cli/ClientOnRequiredTable.java (added)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/cli/ClientOnRequiredTable.java Wed Dec 19 16:25:03 2012
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.server.cli;
+
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.ZooKeeperInstance;
+import org.apache.accumulo.core.client.mock.MockInstance;
+import org.apache.accumulo.server.client.HdfsZooInstance;
+
+public class ClientOnRequiredTable extends org.apache.accumulo.core.cli.ClientOnRequiredTable {
+  {
+    user = "root";
+  }
+
+  @Override
+  synchronized public Instance getInstance() {
+    if (cachedInstance != null)
+      return cachedInstance;
+    
+    if (mock)
+      return cachedInstance = new MockInstance(instance);
+    if (instance == null) {
+      return cachedInstance = HdfsZooInstance.getInstance();
+    }
+    return cachedInstance = new ZooKeeperInstance(this.instance, this.zookeepers);
+  }
+}

Propchange: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/cli/ClientOnRequiredTable.java
------------------------------------------------------------------------------
    svn:eol-style = native

Added: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/cli/ClientOpts.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/cli/ClientOpts.java?rev=1423923&view=auto
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/cli/ClientOpts.java (added)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/cli/ClientOpts.java Wed Dec 19 16:25:03 2012
@@ -0,0 +1,23 @@
+package org.apache.accumulo.server.cli;
+
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.ZooKeeperInstance;
+import org.apache.accumulo.core.client.mock.MockInstance;
+import org.apache.accumulo.server.client.HdfsZooInstance;
+
+public class ClientOpts extends org.apache.accumulo.core.cli.ClientOpts {
+  
+  {
+    user = "root";
+  }
+
+  @Override
+  public Instance getInstance() {
+    if (mock)
+      return new MockInstance(instance);
+    if (instance == null) {
+      return HdfsZooInstance.getInstance();
+    }
+    return new ZooKeeperInstance(this.instance, this.zookeepers);
+  }
+}

Propchange: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/cli/ClientOpts.java
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/fate/Admin.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/fate/Admin.java?rev=1423923&r1=1423922&r2=1423923&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/fate/Admin.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/fate/Admin.java Wed Dec 19 16:25:03 2012
@@ -16,41 +16,75 @@
  */
 package org.apache.accumulo.server.fate;
 
+import java.util.ArrayList;
+import java.util.List;
+
 import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.AdminUtil;
 import org.apache.accumulo.fate.ZooStore;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
-import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.master.Master;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 
+import com.beust.jcommander.JCommander;
+import com.beust.jcommander.Parameter;
+import com.beust.jcommander.Parameters;
+
+
 /**
  * A utility to administer FATE operations
  */
 public class Admin {
+  
+  static class TxOpts {
+    @Parameter(description="<txid>", required=true)
+    List<String> args = new ArrayList<String>();
+  }
+  
+  @Parameters(commandDescription="Stop an existing FATE by transaction id")
+  static class FailOpts extends TxOpts {
+  }
+  
+  @Parameters(commandDescription="Delete an existing FATE by transaction id")
+  static class DeleteOpts extends TxOpts {
+  }
+  
+  @Parameters(commandDescription="List the existing FATE transactions")
+  static class ListOpts {
+  }
+  
   public static void main(String[] args) throws Exception {
-    AdminUtil<Master> admin = new AdminUtil<Master>();
-    boolean valid = (args.length == 2 && args[0].matches("fail|delete")) || (args.length == 1 && args[0].equals("print"));
-    
-    if (!valid) {
-      System.err.println("Usage : " + Admin.class.getSimpleName() + " fail <txid> | delete <txid> | print");
+    ClientOpts opts = new ClientOpts();
+    JCommander jc = new JCommander(opts);
+    jc.setProgramName(Admin.class.getName());
+    FailOpts fail = new FailOpts();
+    jc.addCommand("fail", fail);
+    DeleteOpts deleteOpts = new DeleteOpts();
+    jc.addCommand("delete", deleteOpts);
+    jc.addCommand("list", new ListOpts());
+    jc.parse(args);
+    if (opts.help || jc.getParsedCommand() == null) {
+      jc.usage();
       System.exit(-1);
     }
     
-    Instance instance = HdfsZooInstance.getInstance();
+    AdminUtil<Master> admin = new AdminUtil<Master>();
+    
+    Instance instance = opts.getInstance();
     String path = ZooUtil.getRoot(instance) + Constants.ZFATE;
     String masterPath = ZooUtil.getRoot(instance) + Constants.ZMASTER_LOCK;
     IZooReaderWriter zk = ZooReaderWriter.getRetryingInstance();
     ZooStore<Master> zs = new ZooStore<Master>(path, zk);
     
-    if (args[0].equals("fail")) {
+    if (jc.getParsedCommand().equals("fail")) {
       admin.prepFail(zs, masterPath, args[1]);
-    } else if (args[0].equals("delete")) {
+    } else if (jc.getParsedCommand().equals("delete")) {
       admin.prepDelete(zs, masterPath, args[1]);
       admin.deleteLocks(zs, zk, ZooUtil.getRoot(instance) + Constants.ZTABLE_LOCKS, args[1]);
-    } else if (args[0].equals("print")) {
+    } else if (jc.getParsedCommand().equals("print")) {
       admin.print(zs, zk, ZooUtil.getRoot(instance) + Constants.ZTABLE_LOCKS);
     }
   }