You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ct...@apache.org on 2014/04/09 19:57:38 UTC

[07/64] [abbrv] Merge branch '1.4.6-SNAPSHOT' into 1.5.2-SNAPSHOT

http://git-wip-us.apache.org/repos/asf/accumulo/blob/92613388/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java
----------------------------------------------------------------------
diff --cc core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java
index f4408f5,0000000..ae5e395
mode 100644,000000..100644
--- a/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormatTest.java
@@@ -1,462 -1,0 +1,456 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.client.mapreduce;
 +
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertNull;
 +import static org.junit.Assert.assertTrue;
 +
 +import java.io.ByteArrayOutputStream;
 +import java.io.DataOutputStream;
 +import java.io.IOException;
 +import java.util.Collection;
 +import java.util.Collections;
 +import java.util.HashSet;
 +import java.util.List;
 +import java.util.Set;
 +
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.Instance;
 +import org.apache.accumulo.core.client.IteratorSetting;
 +import org.apache.accumulo.core.client.mock.MockInstance;
 +import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.iterators.user.RegExFilter;
 +import org.apache.accumulo.core.iterators.user.WholeRowIterator;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.util.CachedConfiguration;
 +import org.apache.accumulo.core.util.Pair;
 +import org.apache.commons.codec.binary.Base64;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.conf.Configured;
 +import org.apache.hadoop.io.Text;
 +import org.apache.hadoop.mapreduce.InputFormat;
 +import org.apache.hadoop.mapreduce.InputSplit;
 +import org.apache.hadoop.mapreduce.Job;
 +import org.apache.hadoop.mapreduce.Mapper;
 +import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
 +import org.apache.hadoop.util.Tool;
 +import org.apache.hadoop.util.ToolRunner;
 +import org.apache.log4j.Level;
 +import org.junit.Assert;
 +import org.junit.Test;
 +
 +public class AccumuloInputFormatTest {
 +
 +  private static final String PREFIX = AccumuloInputFormatTest.class.getSimpleName();
 +
 +  /**
 +   * Test basic setting & getting of max versions.
 +   * 
 +   * @throws IOException
 +   *           Signals that an I/O exception has occurred.
 +   */
 +  @Deprecated
 +  @Test
 +  public void testMaxVersions() throws IOException {
 +    Job job = new Job();
 +    AccumuloInputFormat.setMaxVersions(job.getConfiguration(), 1);
 +    int version = AccumuloInputFormat.getMaxVersions(job.getConfiguration());
 +    assertEquals(1, version);
 +  }
 +
 +  /**
 +   * Test max versions with an invalid value.
 +   * 
 +   * @throws IOException
 +   *           Signals that an I/O exception has occurred.
 +   */
 +  @Deprecated
 +  @Test(expected = IOException.class)
 +  public void testMaxVersionsLessThan1() throws IOException {
 +    Job job = new Job();
 +    AccumuloInputFormat.setMaxVersions(job.getConfiguration(), 0);
 +  }
 +
 +  /**
 +   * Test no max version configured.
-    * 
-    * @throws IOException
 +   */
 +  @Deprecated
 +  @Test
 +  public void testNoMaxVersion() throws IOException {
 +    Job job = new Job();
 +    assertEquals(-1, AccumuloInputFormat.getMaxVersions(job.getConfiguration()));
 +  }
 +
 +  /**
 +   * Check that the iterator configuration is getting stored in the Job conf correctly.
-    * 
-    * @throws IOException
 +   */
 +  @Test
 +  public void testSetIterator() throws IOException {
 +    Job job = new Job();
 +
 +    IteratorSetting is = new IteratorSetting(1, "WholeRow", "org.apache.accumulo.core.iterators.WholeRowIterator");
 +    AccumuloInputFormat.addIterator(job, is);
 +    Configuration conf = job.getConfiguration();
 +    ByteArrayOutputStream baos = new ByteArrayOutputStream();
 +    is.write(new DataOutputStream(baos));
 +    String iterators = conf.get("AccumuloInputFormat.ScanOpts.Iterators");
 +    assertEquals(new String(Base64.encodeBase64(baos.toByteArray())), iterators);
 +  }
 +
 +  @Test
 +  public void testAddIterator() throws IOException {
 +    Job job = new Job();
 +
 +    AccumuloInputFormat.addIterator(job, new IteratorSetting(1, "WholeRow", WholeRowIterator.class));
 +    AccumuloInputFormat.addIterator(job, new IteratorSetting(2, "Versions", "org.apache.accumulo.core.iterators.VersioningIterator"));
 +    IteratorSetting iter = new IteratorSetting(3, "Count", "org.apache.accumulo.core.iterators.CountingIterator");
 +    iter.addOption("v1", "1");
 +    iter.addOption("junk", "\0omg:!\\xyzzy");
 +    AccumuloInputFormat.addIterator(job, iter);
 +
 +    List<IteratorSetting> list = AccumuloInputFormat.getIterators(job);
 +
 +    // Check the list size
 +    assertTrue(list.size() == 3);
 +
 +    // Walk the list and make sure our settings are correct
 +    IteratorSetting setting = list.get(0);
 +    assertEquals(1, setting.getPriority());
 +    assertEquals("org.apache.accumulo.core.iterators.user.WholeRowIterator", setting.getIteratorClass());
 +    assertEquals("WholeRow", setting.getName());
 +    assertEquals(0, setting.getOptions().size());
 +
 +    setting = list.get(1);
 +    assertEquals(2, setting.getPriority());
 +    assertEquals("org.apache.accumulo.core.iterators.VersioningIterator", setting.getIteratorClass());
 +    assertEquals("Versions", setting.getName());
 +    assertEquals(0, setting.getOptions().size());
 +
 +    setting = list.get(2);
 +    assertEquals(3, setting.getPriority());
 +    assertEquals("org.apache.accumulo.core.iterators.CountingIterator", setting.getIteratorClass());
 +    assertEquals("Count", setting.getName());
 +    assertEquals(2, setting.getOptions().size());
 +    assertEquals("1", setting.getOptions().get("v1"));
 +    assertEquals("\0omg:!\\xyzzy", setting.getOptions().get("junk"));
 +  }
 +
 +  /**
 +   * Test adding iterator options where the keys and values contain both the FIELD_SEPARATOR character (':') and ITERATOR_SEPARATOR (',') characters. There
 +   * should be no exceptions thrown when trying to parse these types of option entries.
 +   * 
 +   * This test makes sure that the expected raw values, as appears in the Job, are equal to what's expected.
 +   */
 +  @Test
 +  public void testIteratorOptionEncoding() throws Throwable {
 +    String key = "colon:delimited:key";
 +    String value = "comma,delimited,value";
 +    IteratorSetting someSetting = new IteratorSetting(1, "iterator", "Iterator.class");
 +    someSetting.addOption(key, value);
 +    Job job = new Job();
 +    AccumuloInputFormat.addIterator(job, someSetting);
 +
 +    List<IteratorSetting> list = AccumuloInputFormat.getIterators(job);
 +    assertEquals(1, list.size());
 +    assertEquals(1, list.get(0).getOptions().size());
 +    assertEquals(list.get(0).getOptions().get(key), value);
 +
 +    someSetting.addOption(key + "2", value);
 +    someSetting.setPriority(2);
 +    someSetting.setName("it2");
 +    AccumuloInputFormat.addIterator(job, someSetting);
 +    list = AccumuloInputFormat.getIterators(job);
 +    assertEquals(2, list.size());
 +    assertEquals(1, list.get(0).getOptions().size());
 +    assertEquals(list.get(0).getOptions().get(key), value);
 +    assertEquals(2, list.get(1).getOptions().size());
 +    assertEquals(list.get(1).getOptions().get(key), value);
 +    assertEquals(list.get(1).getOptions().get(key + "2"), value);
 +  }
 +
 +  /**
 +   * Test getting iterator settings for multiple iterators set
-    * 
-    * @throws IOException
 +   */
 +  @Test
 +  public void testGetIteratorSettings() throws IOException {
 +    Job job = new Job();
 +
 +    AccumuloInputFormat.addIterator(job, new IteratorSetting(1, "WholeRow", "org.apache.accumulo.core.iterators.WholeRowIterator"));
 +    AccumuloInputFormat.addIterator(job, new IteratorSetting(2, "Versions", "org.apache.accumulo.core.iterators.VersioningIterator"));
 +    AccumuloInputFormat.addIterator(job, new IteratorSetting(3, "Count", "org.apache.accumulo.core.iterators.CountingIterator"));
 +
 +    List<IteratorSetting> list = AccumuloInputFormat.getIterators(job);
 +
 +    // Check the list size
 +    assertTrue(list.size() == 3);
 +
 +    // Walk the list and make sure our settings are correct
 +    IteratorSetting setting = list.get(0);
 +    assertEquals(1, setting.getPriority());
 +    assertEquals("org.apache.accumulo.core.iterators.WholeRowIterator", setting.getIteratorClass());
 +    assertEquals("WholeRow", setting.getName());
 +
 +    setting = list.get(1);
 +    assertEquals(2, setting.getPriority());
 +    assertEquals("org.apache.accumulo.core.iterators.VersioningIterator", setting.getIteratorClass());
 +    assertEquals("Versions", setting.getName());
 +
 +    setting = list.get(2);
 +    assertEquals(3, setting.getPriority());
 +    assertEquals("org.apache.accumulo.core.iterators.CountingIterator", setting.getIteratorClass());
 +    assertEquals("Count", setting.getName());
 +
 +  }
 +
 +  @Test
 +  public void testSetRegex() throws IOException {
 +    Job job = new Job();
 +
 +    String regex = ">\"*%<>\'\\";
 +
 +    IteratorSetting is = new IteratorSetting(50, regex, RegExFilter.class);
 +    RegExFilter.setRegexs(is, regex, null, null, null, false);
 +    AccumuloInputFormat.addIterator(job, is);
 +
 +    assertTrue(regex.equals(AccumuloInputFormat.getIterators(job).get(0).getName()));
 +  }
 +
 +  private static AssertionError e1 = null;
 +  private static AssertionError e2 = null;
 +
 +  private static class MRTester extends Configured implements Tool {
 +    private static class TestMapper extends Mapper<Key,Value,Key,Value> {
 +      Key key = null;
 +      int count = 0;
 +
 +      @Override
 +      protected void map(Key k, Value v, Context context) throws IOException, InterruptedException {
 +        try {
 +          if (key != null)
 +            assertEquals(key.getRow().toString(), new String(v.get()));
 +          assertEquals(k.getRow(), new Text(String.format("%09x", count + 1)));
 +          assertEquals(new String(v.get()), String.format("%09x", count));
 +        } catch (AssertionError e) {
 +          e1 = e;
 +        }
 +        key = new Key(k);
 +        count++;
 +      }
 +
 +      @Override
 +      protected void cleanup(Context context) throws IOException, InterruptedException {
 +        try {
 +          assertEquals(100, count);
 +        } catch (AssertionError e) {
 +          e2 = e;
 +        }
 +      }
 +    }
 +
 +    @Override
 +    public int run(String[] args) throws Exception {
 +
 +      if (args.length != 5) {
 +        throw new IllegalArgumentException("Usage : " + MRTester.class.getName() + " <user> <pass> <table> <instanceName> <inputFormatClass>");
 +      }
 +
 +      String user = args[0];
 +      String pass = args[1];
 +      String table = args[2];
 +      String instanceName = args[3];
 +      String inputFormatClassName = args[4];
 +      @SuppressWarnings("unchecked")
 +      Class<? extends InputFormat<?,?>> inputFormatClass = (Class<? extends InputFormat<?,?>>) Class.forName(inputFormatClassName);
 +
 +      Job job = new Job(getConf(), this.getClass().getSimpleName() + "_" + System.currentTimeMillis());
 +      job.setJarByClass(this.getClass());
 +
 +      job.setInputFormatClass(inputFormatClass);
 +
 +      AccumuloInputFormat.setConnectorInfo(job, user, new PasswordToken(pass));
 +      AccumuloInputFormat.setInputTableName(job, table);
 +      AccumuloInputFormat.setMockInstance(job, instanceName);
 +
 +      job.setMapperClass(TestMapper.class);
 +      job.setMapOutputKeyClass(Key.class);
 +      job.setMapOutputValueClass(Value.class);
 +      job.setOutputFormatClass(NullOutputFormat.class);
 +
 +      job.setNumReduceTasks(0);
 +
 +      job.waitForCompletion(true);
 +
 +      return job.isSuccessful() ? 0 : 1;
 +    }
 +
 +    public static int main(String[] args) throws Exception {
 +      return ToolRunner.run(CachedConfiguration.getInstance(), new MRTester(), args);
 +    }
 +  }
 +
 +  @Test
 +  public void testMap() throws Exception {
 +    final String INSTANCE_NAME = PREFIX + "_mapreduce_instance";
 +    final String TEST_TABLE_1 = PREFIX + "_mapreduce_table_1";
 +
 +    MockInstance mockInstance = new MockInstance(INSTANCE_NAME);
 +    Connector c = mockInstance.getConnector("root", new PasswordToken(""));
 +    c.tableOperations().create(TEST_TABLE_1);
 +    BatchWriter bw = c.createBatchWriter(TEST_TABLE_1, new BatchWriterConfig());
 +    for (int i = 0; i < 100; i++) {
 +      Mutation m = new Mutation(new Text(String.format("%09x", i + 1)));
 +      m.put(new Text(), new Text(), new Value(String.format("%09x", i).getBytes()));
 +      bw.addMutation(m);
 +    }
 +    bw.close();
 +
 +    Assert.assertEquals(0, MRTester.main(new String[] {"root", "", TEST_TABLE_1, INSTANCE_NAME, AccumuloInputFormat.class.getCanonicalName()}));
 +    assertNull(e1);
 +    assertNull(e2);
 +  }
 +
 +  @Test
 +  public void testCorrectRangeInputSplits() throws Exception {
 +    Job job = new Job(new Configuration(), this.getClass().getSimpleName() + "_" + System.currentTimeMillis());
 +
 +    String username = "user", table = "table", instance = "instance";
 +    PasswordToken password = new PasswordToken("password");
 +    Authorizations auths = new Authorizations("foo");
 +    Collection<Pair<Text,Text>> fetchColumns = Collections.singleton(new Pair<Text,Text>(new Text("foo"), new Text("bar")));
 +    boolean isolated = true, localIters = true;
 +    Level level = Level.WARN;
 +
 +    Instance inst = new MockInstance(instance);
 +    Connector connector = inst.getConnector(username, password);
 +    connector.tableOperations().create(table);
 +
 +    AccumuloInputFormat.setConnectorInfo(job, username, password);
 +    AccumuloInputFormat.setInputTableName(job, table);
 +    AccumuloInputFormat.setScanAuthorizations(job, auths);
 +    AccumuloInputFormat.setMockInstance(job, instance);
 +    AccumuloInputFormat.setScanIsolation(job, isolated);
 +    AccumuloInputFormat.setLocalIterators(job, localIters);
 +    AccumuloInputFormat.fetchColumns(job, fetchColumns);
 +    AccumuloInputFormat.setLogLevel(job, level);
 +
 +    AccumuloInputFormat aif = new AccumuloInputFormat();
 +
 +    List<InputSplit> splits = aif.getSplits(job);
 +
 +    Assert.assertEquals(1, splits.size());
 +
 +    InputSplit split = splits.get(0);
 +
 +    Assert.assertEquals(RangeInputSplit.class, split.getClass());
 +
 +    RangeInputSplit risplit = (RangeInputSplit) split;
 +
 +    Assert.assertEquals(username, risplit.getPrincipal());
 +    Assert.assertEquals(table, risplit.getTable());
 +    Assert.assertEquals(password, risplit.getToken());
 +    Assert.assertEquals(auths, risplit.getAuths());
 +    Assert.assertEquals(instance, risplit.getInstanceName());
 +    Assert.assertEquals(isolated, risplit.isIsolatedScan());
 +    Assert.assertEquals(localIters, risplit.usesLocalIterators());
 +    Assert.assertEquals(fetchColumns, risplit.getFetchedColumns());
 +    Assert.assertEquals(level, risplit.getLogLevel());
 +  }
 +
 +  static class TestMapper extends Mapper<Key,Value,Key,Value> {
 +    Key key = null;
 +    int count = 0;
 +
 +    @Override
 +    protected void map(Key k, Value v, Context context) throws IOException, InterruptedException {
 +      if (key != null)
 +        assertEquals(key.getRow().toString(), new String(v.get()));
 +      assertEquals(k.getRow(), new Text(String.format("%09x", count + 1)));
 +      assertEquals(new String(v.get()), String.format("%09x", count));
 +      key = new Key(k);
 +      count++;
 +    }
 +  }
 +
 +  @Test
 +  public void testPartialInputSplitDelegationToConfiguration() throws Exception {
 +    String user = "testPartialInputSplitUser";
 +    PasswordToken password = new PasswordToken("");
 +
 +    MockInstance mockInstance = new MockInstance("testPartialInputSplitDelegationToConfiguration");
 +    Connector c = mockInstance.getConnector(user, password);
 +    c.tableOperations().create("testtable");
 +    BatchWriter bw = c.createBatchWriter("testtable", new BatchWriterConfig());
 +    for (int i = 0; i < 100; i++) {
 +      Mutation m = new Mutation(new Text(String.format("%09x", i + 1)));
 +      m.put(new Text(), new Text(), new Value(String.format("%09x", i).getBytes()));
 +      bw.addMutation(m);
 +    }
 +    bw.close();
 +
 +    Assert.assertEquals(
 +        0,
 +        MRTester.main(new String[] {user, "", "testtable", "testPartialInputSplitDelegationToConfiguration",
 +            EmptySplitsAccumuloInputFormat.class.getCanonicalName()}));
 +    assertNull(e1);
 +    assertNull(e2);
 +  }
 +
 +  @Test
 +  public void testPartialFailedInputSplitDelegationToConfiguration() throws Exception {
 +    String user = "testPartialFailedInputSplit";
 +    PasswordToken password = new PasswordToken("");
 +
 +    MockInstance mockInstance = new MockInstance("testPartialFailedInputSplitDelegationToConfiguration");
 +    Connector c = mockInstance.getConnector(user, password);
 +    c.tableOperations().create("testtable");
 +    BatchWriter bw = c.createBatchWriter("testtable", new BatchWriterConfig());
 +    for (int i = 0; i < 100; i++) {
 +      Mutation m = new Mutation(new Text(String.format("%09x", i + 1)));
 +      m.put(new Text(), new Text(), new Value(String.format("%09x", i).getBytes()));
 +      bw.addMutation(m);
 +    }
 +    bw.close();
 +
 +    // We should fail before we even get into the Mapper because we can't make the RecordReader
 +    Assert.assertEquals(
 +        1,
 +        MRTester.main(new String[] {user, "", "testtable", "testPartialFailedInputSplitDelegationToConfiguration",
 +            BadPasswordSplitsAccumuloInputFormat.class.getCanonicalName()}));
 +    assertNull(e1);
 +    assertNull(e2);
 +  }
 +
 +  @Test
 +  public void testEmptyColumnFamily() throws IOException {
 +    Job job = new Job();
 +    Set<Pair<Text,Text>> cols = new HashSet<Pair<Text,Text>>();
 +    cols.add(new Pair<Text,Text>(new Text(""), null));
 +    cols.add(new Pair<Text,Text>(new Text("foo"), new Text("bar")));
 +    cols.add(new Pair<Text,Text>(new Text(""), new Text("bar")));
 +    cols.add(new Pair<Text,Text>(new Text(""), new Text("")));
 +    cols.add(new Pair<Text,Text>(new Text("foo"), new Text("")));
 +    AccumuloInputFormat.fetchColumns(job, cols);
 +    Set<Pair<Text,Text>> setCols = AccumuloInputFormat.getFetchedColumns(job);
 +    assertEquals(cols, setCols);
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/92613388/core/src/test/java/org/apache/accumulo/core/util/shell/command/FormatterCommandTest.java
----------------------------------------------------------------------
diff --cc core/src/test/java/org/apache/accumulo/core/util/shell/command/FormatterCommandTest.java
index 0eb2653,0000000..6000817
mode 100644,000000..100644
--- a/core/src/test/java/org/apache/accumulo/core/util/shell/command/FormatterCommandTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/util/shell/command/FormatterCommandTest.java
@@@ -1,212 -1,0 +1,199 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.util.shell.command;
 +
 +import java.io.IOException;
 +import java.io.InputStream;
 +import java.io.StringWriter;
 +import java.io.Writer;
 +import java.util.Iterator;
 +import java.util.Map.Entry;
 +
- import org.junit.Assert;
- 
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.TableExistsException;
 +import org.apache.accumulo.core.client.mock.MockShell;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.util.format.Formatter;
 +import org.apache.accumulo.core.util.shell.Shell;
 +import org.apache.commons.lang.StringUtils;
 +import org.apache.log4j.Level;
 +import org.apache.log4j.Logger;
++import org.junit.Assert;
 +import org.junit.Test;
 +
 +/**
 + * Uses the MockShell to test the shell output with Formatters
 + */
 +public class FormatterCommandTest {
 +  Writer writer = null;
 +  InputStream in = null;
 +  
 +  @Test
 +  public void test() throws IOException, AccumuloException, AccumuloSecurityException, TableExistsException, ClassNotFoundException {
 +    // Keep the Shell AUDIT log off the test output
 +    Logger.getLogger(Shell.class).setLevel(Level.WARN);
 +    
 +    final String[] args = new String[] {"--fake", "-u", "root", "-p", ""};
 +   
 +    final String[] commands = createCommands();
 +    
 +    in = MockShell.makeCommands(commands);
 +    writer = new StringWriter();
 +    
 +    final MockShell shell = new MockShell(in, writer);
 +    shell.config(args);
 +    
 +    // Can't call createtable in the shell with MockAccumulo
 +    shell.getConnector().tableOperations().create("test");
 +
 +    try {
 +      shell.start();
 +    } catch (Exception e) {
 +      Assert.fail("Exception while running commands: " + e.getMessage());
 +    } 
 +    
 +    shell.getReader().flushConsole();
 +    
 +    final String[] output = StringUtils.split(writer.toString(), '\n');
 +   
 +    boolean formatterOn = false;
 +    
 +    final String[] expectedDefault = new String[] {
 +        "row cf:cq []    1234abcd",
 +        "row cf1:cq1 []    9876fedc",
 +        "row2 cf:cq []    13579bdf",
 +        "row2 cf1:cq []    2468ace"
 +    };
 +    
 +    final String[] expectedFormatted = new String[] {
 +        "row cf:cq []    0x31 0x32 0x33 0x34 0x61 0x62 0x63 0x64",
 +        "row cf1:cq1 []    0x39 0x38 0x37 0x36 0x66 0x65 0x64 0x63",
 +        "row2 cf:cq []    0x31 0x33 0x35 0x37 0x39 0x62 0x64 0x66",
 +        "row2 cf1:cq []    0x32 0x34 0x36 0x38 0x61 0x63 0x65"
 +    };
 +    
 +    int outputIndex = 0;
 +    while (outputIndex < output.length) {
 +      final String line = output[outputIndex];
 +      
 +      if (line.startsWith("root@mock-instance")) {
 +        if (line.contains("formatter -t test -f org.apache.accumulo.core.util.shell.command.FormatterCommandTest$HexFormatter")) {
 +          formatterOn = true;
 +        }
 +       
 +        outputIndex++;
 +      } else if (line.startsWith("row")) {
 +        int expectedIndex = 0;
 +        String[] comparisonData;
 +        
 +        // Pick the type of data we expect (formatted or default)
 +        if (formatterOn) {
 +          comparisonData = expectedFormatted;
 +        } else {
 +          comparisonData = expectedDefault;
 +        }
 +        
 +        // Ensure each output is what we expected
 +        while (expectedIndex + outputIndex < output.length && expectedIndex < expectedFormatted.length) {
 +          Assert.assertEquals(comparisonData[expectedIndex].trim(), output[expectedIndex + outputIndex].trim());
 +          expectedIndex++;
 +        }
 +        
 +        outputIndex += expectedIndex;
 +      }
 +    }
 +  }
 +  
 +  private String[] createCommands() {
 +    return new String[] {
 +        "table test",
 +        "insert row cf cq 1234abcd",
 +        "insert row cf1 cq1 9876fedc",
 +        "insert row2 cf cq 13579bdf",
 +        "insert row2 cf1 cq 2468ace",
 +        "scan",
 +        "formatter -t test -f org.apache.accumulo.core.util.shell.command.FormatterCommandTest$HexFormatter",
 +        "scan"
 +    };
 +  }
 +  
 +  /**
 +   * <p>Simple <code>Formatter</code> that will convert each character in the Value
 +   * from decimal to hexadecimal. Will automatically skip over characters in the value
 +   * which do not fall within the [0-9,a-f] range.</p>
 +   * 
 +   * <p>Example: <code>'0'</code> will be displayed as <code>'0x30'</code></p>
 +   */
 +  public static class HexFormatter implements Formatter {
 +    private Iterator<Entry<Key, Value>> iter = null;
 +    private boolean printTs = false;
 +
 +    private final static String tab = "\t";
 +    private final static String newline = "\n";
 +    
 +    public HexFormatter() {}
 +    
-     /* (non-Javadoc)
-      * @see java.util.Iterator#hasNext()
-      */
 +    @Override
 +    public boolean hasNext() {
 +      return this.iter.hasNext();
 +    }
 +
-     /* (non-Javadoc)
-      * @see java.util.Iterator#next()
-      */
 +    @Override
 +    public String next() {
 +      final Entry<Key, Value> entry = iter.next();
 +      
 +      String key;
 +      
 +      // Observe the timestamps
 +      if (printTs) {
 +        key = entry.getKey().toString();
 +      } else {
 +        key = entry.getKey().toStringNoTime();
 +      }
 +      
 +      final Value v = entry.getValue();
 +      
 +      // Approximate how much space we'll need
 +      final StringBuilder sb = new StringBuilder(key.length() + v.getSize() * 5); 
 +      
 +      sb.append(key).append(tab);
 +      
 +      for (byte b : v.get()) {
 +        if ((b >= 48 && b <= 57) || (b >= 97 || b <= 102)) {
 +          sb.append(String.format("0x%x ", Integer.valueOf(b)));
 +        }
 +      }
 +      
 +      sb.append(newline);
 +      
 +      return sb.toString();
 +    }
 +
-     /* (non-Javadoc)
-      * @see java.util.Iterator#remove()
-      */
 +    @Override
 +    public void remove() {
 +    }
 +
-     /* (non-Javadoc)
-      * @see org.apache.accumulo.core.util.format.Formatter#initialize(java.lang.Iterable, boolean)
-      */
 +    @Override
 +    public void initialize(final Iterable<Entry<Key,Value>> scanner, final boolean printTimestamps) {
 +      this.iter = scanner.iterator();
 +      this.printTs = printTimestamps;
 +    }
 +  }
 +  
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/92613388/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchScanner.java
----------------------------------------------------------------------
diff --cc examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchScanner.java
index 2ae82b4,0000000..a3bcf62
mode 100644,000000..100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchScanner.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchScanner.java
@@@ -1,234 -1,0 +1,229 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.examples.simple.client;
 +
 +import java.util.Arrays;
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.Map.Entry;
 +import java.util.concurrent.TimeUnit;
 +import java.util.Random;
 +
 +import org.apache.accumulo.core.cli.BatchScannerOpts;
 +import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.BatchScanner;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.TableNotFoundException;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.hadoop.io.Text;
 +import org.apache.log4j.Logger;
 +
 +import com.beust.jcommander.Parameter;
 +
 +/**
 + * Internal class used to verify validity of data read.
 + */
 +class CountingVerifyingReceiver {
 +  private static final Logger log = Logger.getLogger(CountingVerifyingReceiver.class);
 +  
 +  long count = 0;
 +  int expectedValueSize = 0;
 +  HashMap<Text,Boolean> expectedRows;
 +  
 +  CountingVerifyingReceiver(HashMap<Text,Boolean> expectedRows, int expectedValueSize) {
 +    this.expectedRows = expectedRows;
 +    this.expectedValueSize = expectedValueSize;
 +  }
 +  
 +  public void receive(Key key, Value value) {
 +    
 +    String row = key.getRow().toString();
 +    long rowid = Integer.parseInt(row.split("_")[1]);
 +    
 +    byte expectedValue[] = RandomBatchWriter.createValue(rowid, expectedValueSize);
 +    
 +    if (!Arrays.equals(expectedValue, value.get())) {
 +      log.error("Got unexpected value for " + key + " expected : " + new String(expectedValue) + " got : " + new String(value.get()));
 +    }
 +    
 +    if (!expectedRows.containsKey(key.getRow())) {
 +      log.error("Got unexpected key " + key);
 +    } else {
 +      expectedRows.put(key.getRow(), true);
 +    }
 +    
 +    count++;
 +  }
 +}
 +
 +/**
 + * Simple example for reading random batches of data from Accumulo. See docs/examples/README.batch for instructions.
 + */
 +public class RandomBatchScanner {
 +  private static final Logger log = Logger.getLogger(CountingVerifyingReceiver.class);
 +  
 +  /**
 +   * Generate a number of ranges, each covering a single random row.
 +   * 
 +   * @param num
 +   *          the number of ranges to generate
 +   * @param min
 +   *          the minimum row that will be generated
 +   * @param max
 +   *          the maximum row that will be generated
 +   * @param r
 +   *          a random number generator
 +   * @param ranges
 +   *          a set in which to store the generated ranges
 +   * @param expectedRows
 +   *          a map in which to store the rows covered by the ranges (initially mapped to false)
 +   */
 +  static void generateRandomQueries(int num, long min, long max, Random r, HashSet<Range> ranges, HashMap<Text,Boolean> expectedRows) {
 +    log.info(String.format("Generating %,d random queries...", num));
 +    while (ranges.size() < num) {
 +      long rowid = (Math.abs(r.nextLong()) % (max - min)) + min;
 +      
 +      Text row1 = new Text(String.format("row_%010d", rowid));
 +      
 +      Range range = new Range(new Text(row1));
 +      ranges.add(range);
 +      expectedRows.put(row1, false);
 +    }
 +    
 +    log.info("finished");
 +  }
 +  
 +  /**
 +   * Prints a count of the number of rows mapped to false.
 +   * 
 +   * @param expectedRows
 +   * @return boolean indicating "were all the rows found?"
 +   */
 +  private static boolean checkAllRowsFound(HashMap<Text,Boolean> expectedRows) {
 +    int count = 0;
 +    boolean allFound = true;
 +    for (Entry<Text,Boolean> entry : expectedRows.entrySet())
 +      if (!entry.getValue())
 +        count++;
 +    
 +    if (count > 0) {
 +      log.warn("Did not find " + count + " rows");
 +      allFound = false;
 +    }
 +    return allFound;
 +  }
 +  
 +  /**
 +   * Generates a number of random queries, verifies that the key/value pairs returned were in the queried ranges and that the values were generated by
 +   * {@link RandomBatchWriter#createValue(long, int)}. Prints information about the results.
 +   * 
 +   * @param num
 +   *          the number of queries to generate
 +   * @param min
 +   *          the min row to query
 +   * @param max
 +   *          the max row to query
 +   * @param evs
 +   *          the expected size of the values
 +   * @param r
 +   *          a random number generator
 +   * @param tsbr
 +   *          a batch scanner
 +   * @return boolean indicating "did the queries go fine?"
 +   */
 +  static boolean doRandomQueries(int num, long min, long max, int evs, Random r, BatchScanner tsbr) {
 +    
 +    HashSet<Range> ranges = new HashSet<Range>(num);
 +    HashMap<Text,Boolean> expectedRows = new java.util.HashMap<Text,Boolean>();
 +    
 +    generateRandomQueries(num, min, max, r, ranges, expectedRows);
 +    
 +    tsbr.setRanges(ranges);
 +    
 +    CountingVerifyingReceiver receiver = new CountingVerifyingReceiver(expectedRows, evs);
 +    
 +    long t1 = System.currentTimeMillis();
 +    
 +    for (Entry<Key,Value> entry : tsbr) {
 +      receiver.receive(entry.getKey(), entry.getValue());
 +    }
 +    
 +    long t2 = System.currentTimeMillis();
 +    
 +    log.info(String.format("%6.2f lookups/sec %6.2f secs%n", num / ((t2 - t1) / 1000.0), ((t2 - t1) / 1000.0)));
 +    log.info(String.format("num results : %,d%n", receiver.count));
 +    
 +    return checkAllRowsFound(expectedRows);
 +  }
 +  
 +  public static class Opts  extends ClientOnRequiredTable {
 +    @Parameter(names="--min", description="miniumum row that will be generated")
 +    long min = 0;
 +    @Parameter(names="--max", description="maximum ow that will be generated")
 +    long max = 0;
 +    @Parameter(names="--num", required=true, description="number of ranges to generate")
 +    int num = 0;
 +    @Parameter(names="--size", required=true, description="size of the value to write")
 +    int size = 0;
 +    @Parameter(names="--seed", description="seed for pseudo-random number generator")
 +    Long seed = null;
 +  }
 +  
 +  /**
 +   * Scans over a specified number of entries to Accumulo using a {@link BatchScanner}. Completes scans twice to compare times for a fresh query with those for
 +   * a repeated query which has cached metadata and connections already established.
-    * 
-    * @param args
-    * @throws AccumuloException
-    * @throws AccumuloSecurityException
-    * @throws TableNotFoundException
 +   */
 +  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
 +    Opts opts = new Opts();
 +    BatchScannerOpts bsOpts = new BatchScannerOpts();
 +    opts.parseArgs(RandomBatchScanner.class.getName(), args, bsOpts);
 +    
 +    Connector connector = opts.getConnector();
 +    BatchScanner batchReader = connector.createBatchScanner(opts.tableName, opts.auths, bsOpts.scanThreads);
 +    batchReader.setTimeout(bsOpts.scanTimeout, TimeUnit.MILLISECONDS);
 +    
 +    Random r;
 +    if (opts.seed == null)
 +      r = new Random();
 +    else
 +      r = new Random(opts.seed);
 +    
 +    // do one cold
 +    boolean status = doRandomQueries(opts.num, opts.min, opts.max, opts.size, r, batchReader);
 +    
 +    System.gc();
 +    System.gc();
 +    System.gc();
 +    
 +    if (opts.seed == null)
 +      r = new Random();
 +    else
 +      r = new Random(opts.seed);
 +    
 +    // do one hot (connections already established, metadata table cached)
 +    status = status && doRandomQueries(opts.num, opts.min, opts.max, opts.size, r, batchReader);
 +    
 +    batchReader.close();
 +    if (!status) {
 +      System.exit(1);
 +    }
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/92613388/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
----------------------------------------------------------------------
diff --cc examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
index ce91da6,0000000..e76352a
mode 100644,000000..100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
@@@ -1,172 -1,0 +1,168 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.examples.simple.client;
 +
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.Map.Entry;
 +import java.util.Random;
 +import java.util.Set;
 +
 +import org.apache.accumulo.core.cli.BatchWriterOpts;
 +import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.MutationsRejectedException;
 +import org.apache.accumulo.core.client.TableNotFoundException;
 +import org.apache.accumulo.core.client.security.SecurityErrorCode;
 +import org.apache.accumulo.core.data.KeyExtent;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.security.ColumnVisibility;
 +import org.apache.hadoop.io.Text;
 +
 +import com.beust.jcommander.Parameter;
 +
 +/**
 + * Simple example for writing random data to Accumulo. See docs/examples/README.batch for instructions.
 + * 
 + * The rows of the entries will be randomly generated numbers between a specified min and max (prefixed by "row_"). The column families will be "foo" and column
 + * qualifiers will be "1". The values will be random byte arrays of a specified size.
 + */
 +public class RandomBatchWriter {
 +  
 +  /**
 +   * Creates a random byte array of specified size using the specified seed.
 +   * 
 +   * @param rowid
 +   *          the seed to use for the random number generator
 +   * @param dataSize
 +   *          the size of the array
 +   * @return a random byte array
 +   */
 +  public static byte[] createValue(long rowid, int dataSize) {
 +    Random r = new Random(rowid);
 +    byte value[] = new byte[dataSize];
 +    
 +    r.nextBytes(value);
 +    
 +    // transform to printable chars
 +    for (int j = 0; j < value.length; j++) {
 +      value[j] = (byte) (((0xff & value[j]) % 92) + ' ');
 +    }
 +    
 +    return value;
 +  }
 +  
 +  /**
 +   * Creates a mutation on a specified row with column family "foo", column qualifier "1", specified visibility, and a random value of specified size.
 +   * 
 +   * @param rowid
 +   *          the row of the mutation
 +   * @param dataSize
 +   *          the size of the random value
 +   * @param visibility
 +   *          the visibility of the entry to insert
 +   * @return a mutation
 +   */
 +  public static Mutation createMutation(long rowid, int dataSize, ColumnVisibility visibility) {
 +    Text row = new Text(String.format("row_%010d", rowid));
 +    
 +    Mutation m = new Mutation(row);
 +    
 +    // create a random value that is a function of the
 +    // row id for verification purposes
 +    byte value[] = createValue(rowid, dataSize);
 +    
 +    m.put(new Text("foo"), new Text("1"), visibility, new Value(value));
 +    
 +    return m;
 +  }
 +  
 +  static class Opts extends ClientOnRequiredTable {
 +    @Parameter(names="--num", required=true)
 +    int num = 0;
 +    @Parameter(names="--min")
 +    long min = 0;
 +    @Parameter(names="--max")
 +    long max = Long.MAX_VALUE;
 +    @Parameter(names="--size", required=true, description="size of the value to write")
 +    int size = 0;
 +    @Parameter(names="--vis", converter=VisibilityConverter.class)
 +    ColumnVisibility visiblity = new ColumnVisibility("");
 +    @Parameter(names="--seed", description="seed for pseudo-random number generator")
 +    Long seed = null;
 +  }
 + 
 +  /**
 +   * Writes a specified number of entries to Accumulo using a {@link BatchWriter}.
-    * 
-    * @throws AccumuloException
-    * @throws AccumuloSecurityException
-    * @throws TableNotFoundException
 +   */
 +  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
 +    Opts opts = new Opts();
 +    BatchWriterOpts bwOpts = new BatchWriterOpts();
 +    opts.parseArgs(RandomBatchWriter.class.getName(), args, bwOpts);
 +    if ((opts.max - opts.min) < opts.num) {
 +      System.err.println(String.format("You must specify a min and a max that allow for at least num possible values. For example, you requested %d rows, but a min of %d and a max of %d only allows for %d rows.", opts.num, opts.min, opts.max, (opts.max - opts.min)));
 +      System.exit(1);
 +    }
 +    Random r;
 +    if (opts.seed == null)
 +      r = new Random();
 +    else {
 +      r = new Random(opts.seed);
 +    }
 +    Connector connector = opts.getConnector();
 +    BatchWriter bw = connector.createBatchWriter(opts.tableName, bwOpts.getBatchWriterConfig());
 +    
 +    // reuse the ColumnVisibility object to improve performance
 +    ColumnVisibility cv = opts.visiblity;
 +   
 +    // Generate num unique row ids in the given range
 +    HashSet<Long> rowids = new HashSet<Long>(opts.num);
 +    while (rowids.size() < opts.num) {
 +      rowids.add((Math.abs(r.nextLong()) % (opts.max - opts.min)) + opts.min);
 +    }
 +    for (long rowid : rowids) {
 +      Mutation m = createMutation(rowid, opts.size, cv);
 +      bw.addMutation(m);
 +    }
 +    
 +    try {
 +      bw.close();
 +    } catch (MutationsRejectedException e) {
 +      if (e.getAuthorizationFailuresMap().size() > 0) {
 +        HashMap<String,Set<SecurityErrorCode>> tables = new HashMap<String,Set<SecurityErrorCode>>();
 +        for (Entry<KeyExtent,Set<SecurityErrorCode>> ke : e.getAuthorizationFailuresMap().entrySet()) {
 +          Set<SecurityErrorCode> secCodes = tables.get(ke.getKey().getTableId().toString());
 +          if (secCodes == null) {
 +            secCodes = new HashSet<SecurityErrorCode>();
 +            tables.put(ke.getKey().getTableId().toString(), secCodes);
 +          }
 +          secCodes.addAll(ke.getValue());
 +        }
 +        System.err.println("ERROR : Not authorized to write to tables : " + tables);
 +      }
 +      
 +      if (e.getConstraintViolationSummaries().size() > 0) {
 +        System.err.println("ERROR : Constraint violations occurred : " + e.getConstraintViolationSummaries());
 +      }
 +      System.exit(1);
 +    }
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/92613388/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/SequentialBatchWriter.java
----------------------------------------------------------------------
diff --cc examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/SequentialBatchWriter.java
index a56fdc0,0000000..c37c1c3
mode 100644,000000..100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/SequentialBatchWriter.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/SequentialBatchWriter.java
@@@ -1,73 -1,0 +1,68 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.examples.simple.client;
 +
 +import org.apache.accumulo.core.cli.BatchWriterOpts;
 +import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.MutationsRejectedException;
 +import org.apache.accumulo.core.client.TableNotFoundException;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.security.ColumnVisibility;
 +
 +import com.beust.jcommander.Parameter;
 +
 +/**
 + * Simple example for writing random data in sequential order to Accumulo. See docs/examples/README.batch for instructions.
 + */
 +public class SequentialBatchWriter {
 +  
 +  static class Opts extends ClientOnRequiredTable {
 +    @Parameter(names="--start")
 +    long start = 0;
 +    @Parameter(names="--num", required=true)
 +    long num = 0;
 +    @Parameter(names="--size", required=true, description="size of the value to write")
 +    int valueSize = 0;
 +    @Parameter(names="--vis", converter=VisibilityConverter.class)
 +    ColumnVisibility vis = new ColumnVisibility();
 +  }
 +  
 +  /**
 +   * Writes a specified number of entries to Accumulo using a {@link BatchWriter}. The rows of the entries will be sequential starting at a specified number.
 +   * The column families will be "foo" and column qualifiers will be "1". The values will be random byte arrays of a specified size.
-    * 
-    * @throws AccumuloException
-    * @throws AccumuloSecurityException
-    * @throws TableNotFoundException
-    * @throws MutationsRejectedException
 +   */
 +  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException {
 +    Opts opts = new Opts();
 +    BatchWriterOpts bwOpts = new BatchWriterOpts();
 +    opts.parseArgs(SequentialBatchWriter.class.getName(), args, bwOpts);
 +    Connector connector = opts.getConnector();
 +    BatchWriter bw = connector.createBatchWriter(opts.tableName, bwOpts.getBatchWriterConfig());
 +    
 +    long end = opts.start + opts.num;
 +    
 +    for (long i = opts.start; i < end; i++) {
 +      Mutation m = RandomBatchWriter.createMutation(i, opts.valueSize, opts.vis);
 +      bw.addMutation(m);
 +    }
 +    
 +    bw.close();
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/92613388/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/TraceDumpExample.java
----------------------------------------------------------------------
diff --cc examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/TraceDumpExample.java
index 2947e0e,0000000..70a23e5
mode 100644,000000..100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/TraceDumpExample.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/TraceDumpExample.java
@@@ -1,77 -1,0 +1,72 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.examples.simple.client;
 +
 +import org.apache.accumulo.core.cli.ClientOnDefaultTable;
 +import org.apache.accumulo.core.cli.ScannerOpts;
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.client.TableNotFoundException;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.trace.TraceDump;
 +import org.apache.accumulo.core.trace.TraceDump.Printer;
 +import org.apache.hadoop.io.Text;
 +
 +import com.beust.jcommander.Parameter;
 +
 +/**
 + * Example of using the TraceDump class to print a formatted view of a Trace
 + *
 + */
 +public class TraceDumpExample {
 +	
 +	static class Opts extends ClientOnDefaultTable {
 +		public Opts() {
 +			super("trace");
 +		}
 +
 +		@Parameter(names = {"--traceid"}, description = "The hex string id of a given trace, for example 16cfbbd7beec4ae3")
 +		public String traceId = "";
 +	}
 +	
 +	public void dump(Opts opts) throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
 +	
 +		if (opts.traceId.isEmpty()) {
 +			throw new IllegalArgumentException("--traceid option is required");
 +		}
 +		
 +		Scanner scanner = opts.getConnector().createScanner(opts.getTableName(), opts.auths);
 +		scanner.setRange(new Range(new Text(opts.traceId)));
 +		TraceDump.printTrace(scanner, new Printer() {
- 			public void print(String line) {
++			@Override
++      public void print(String line) {
 +				System.out.println(line);
 +			}
 +		});
 +	}
 +	
- 	/**
- 	 * @param args
- 	 * @throws AccumuloSecurityException 
- 	 * @throws AccumuloException 
- 	 * @throws TableNotFoundException 
- 	 */
 +	public static void main(String[] args) throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
 +		TraceDumpExample traceDumpExample = new TraceDumpExample();
 +		Opts opts = new Opts();
 +		ScannerOpts scannerOpts = new ScannerOpts();
 +		opts.parseArgs(TraceDumpExample.class.getName(), args, scannerOpts);
 +
 +		traceDumpExample.dump(opts);
 +	}
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/92613388/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/QueryUtil.java
----------------------------------------------------------------------
diff --cc examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/QueryUtil.java
index 744efed,0000000..f11c739
mode 100644,000000..100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/QueryUtil.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/QueryUtil.java
@@@ -1,283 -1,0 +1,280 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.examples.simple.dirlist;
 +
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import java.util.TreeMap;
 +
 +import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.IteratorSetting;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.client.TableNotFoundException;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.iterators.user.RegExFilter;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.hadoop.io.Text;
 +
 +import com.beust.jcommander.Parameter;
 +
 +/**
 + * Provides utility methods for getting the info for a file, listing the contents of a directory, and performing single wild card searches on file or directory
 + * names. See docs/examples/README.dirlist for instructions.
 + */
 +public class QueryUtil {
 +  private Connector conn = null;
 +  private String tableName;
 +  private Authorizations auths;
 +  public static final Text DIR_COLF = new Text("dir");
 +  public static final Text FORWARD_PREFIX = new Text("f");
 +  public static final Text REVERSE_PREFIX = new Text("r");
 +  public static final Text INDEX_COLF = new Text("i");
 +  public static final Text COUNTS_COLQ = new Text("counts");
 +  
 +  public QueryUtil(Opts opts) throws AccumuloException,
 +      AccumuloSecurityException {
 +    conn = opts.getConnector();
 +    this.tableName = opts.tableName;
 +    this.auths = opts.auths;
 +  }
 +  
 +  /**
 +   * Calculates the depth of a path, i.e. the number of forward slashes in the path name.
 +   * 
 +   * @param path
 +   *          the full path of a file or directory
 +   * @return the depth of the path
 +   */
 +  public static int getDepth(String path) {
 +    int numSlashes = 0;
 +    int index = -1;
 +    while ((index = path.indexOf("/", index + 1)) >= 0)
 +      numSlashes++;
 +    return numSlashes;
 +  }
 +  
 +  /**
 +   * Given a path, construct an accumulo row prepended with the path's depth for the directory table.
 +   * 
 +   * @param path
 +   *          the full path of a file or directory
 +   * @return the accumulo row associated with this path
 +   */
 +  public static Text getRow(String path) {
 +    Text row = new Text(String.format("%03d", getDepth(path)));
 +    row.append(path.getBytes(), 0, path.length());
 +    return row;
 +  }
 +  
 +  /**
 +   * Given a path, construct an accumulo row prepended with the {@link #FORWARD_PREFIX} for the index table.
 +   * 
 +   * @param path
 +   *          the full path of a file or directory
 +   * @return the accumulo row associated with this path
 +   */
 +  public static Text getForwardIndex(String path) {
 +    String part = path.substring(path.lastIndexOf("/") + 1);
 +    if (part.length() == 0)
 +      return null;
 +    Text row = new Text(FORWARD_PREFIX);
 +    row.append(part.getBytes(), 0, part.length());
 +    return row;
 +  }
 +  
 +  /**
 +   * Given a path, construct an accumulo row prepended with the {@link #REVERSE_PREFIX} with the path reversed for the index table.
 +   * 
 +   * @param path
 +   *          the full path of a file or directory
 +   * @return the accumulo row associated with this path
 +   */
 +  public static Text getReverseIndex(String path) {
 +    String part = path.substring(path.lastIndexOf("/") + 1);
 +    if (part.length() == 0)
 +      return null;
 +    byte[] rev = new byte[part.length()];
 +    int i = part.length() - 1;
 +    for (byte b : part.getBytes())
 +      rev[i--] = b;
 +    Text row = new Text(REVERSE_PREFIX);
 +    row.append(rev, 0, rev.length);
 +    return row;
 +  }
 +  
 +  /**
 +   * Returns either the {@link #DIR_COLF} or a decoded string version of the colf.
 +   * 
 +   * @param colf
 +   *          the column family
 +   */
 +  public static String getType(Text colf) {
 +    if (colf.equals(DIR_COLF))
 +      return colf.toString() + ":";
 +    return Long.toString(Ingest.encoder.decode(colf.getBytes())) + ":";
 +  }
 +  
 +  /**
 +   * Scans over the directory table and pulls out stat information about a path.
 +   * 
 +   * @param path
 +   *          the full path of a file or directory
 +   */
 +  public Map<String,String> getData(String path) throws TableNotFoundException {
 +    if (path.endsWith("/"))
 +      path = path.substring(0, path.length() - 1);
 +    Scanner scanner = conn.createScanner(tableName, auths);
 +    scanner.setRange(new Range(getRow(path)));
 +    Map<String,String> data = new TreeMap<String,String>();
 +    for (Entry<Key,Value> e : scanner) {
 +      String type = getType(e.getKey().getColumnFamily());
 +      data.put("fullname", e.getKey().getRow().toString().substring(3));
 +      data.put(type + e.getKey().getColumnQualifier().toString() + ":" + e.getKey().getColumnVisibility().toString(), new String(e.getValue().get()));
 +    }
 +    return data;
 +  }
 +  
 +  /**
 +   * Uses the directory table to list the contents of a directory.
 +   * 
 +   * @param path
 +   *          the full path of a directory
 +   */
 +  public Map<String,Map<String,String>> getDirList(String path) throws TableNotFoundException {
 +    if (!path.endsWith("/"))
 +      path = path + "/";
 +    Map<String,Map<String,String>> fim = new TreeMap<String,Map<String,String>>();
 +    Scanner scanner = conn.createScanner(tableName, auths);
 +    scanner.setRange(Range.prefix(getRow(path)));
 +    for (Entry<Key,Value> e : scanner) {
 +      String name = e.getKey().getRow().toString();
 +      name = name.substring(name.lastIndexOf("/") + 1);
 +      String type = getType(e.getKey().getColumnFamily());
 +      if (!fim.containsKey(name)) {
 +        fim.put(name, new TreeMap<String,String>());
 +        fim.get(name).put("fullname", e.getKey().getRow().toString().substring(3));
 +      }
 +      fim.get(name).put(type + e.getKey().getColumnQualifier().toString() + ":" + e.getKey().getColumnVisibility().toString(), new String(e.getValue().get()));
 +    }
 +    return fim;
 +  }
 +  
 +  /**
 +   * Scans over the index table for files or directories with a given name.
 +   * 
 +   * @param term
 +   *          the name a file or directory to search for
 +   */
 +  public Iterable<Entry<Key,Value>> exactTermSearch(String term) throws Exception {
 +    System.out.println("executing exactTermSearch for " + term);
 +    Scanner scanner = conn.createScanner(tableName, auths);
 +    scanner.setRange(new Range(getForwardIndex(term)));
 +    return scanner;
 +  }
 +  
 +  /**
 +   * Scans over the index table for files or directories with a given name, prefix, or suffix (indicated by a wildcard '*' at the beginning or end of the term.
 +   * 
 +   * @param exp
 +   *          the name a file or directory to search for with an optional wildcard '*' at the beginning or end
 +   */
 +  public Iterable<Entry<Key,Value>> singleRestrictedWildCardSearch(String exp) throws Exception {
 +    if (exp.indexOf("/") >= 0)
 +      throw new Exception("this method only works with unqualified names");
 +    
 +    Scanner scanner = conn.createScanner(tableName, auths);
 +    if (exp.startsWith("*")) {
 +      System.out.println("executing beginning wildcard search for " + exp);
 +      exp = exp.substring(1);
 +      scanner.setRange(Range.prefix(getReverseIndex(exp)));
 +    } else if (exp.endsWith("*")) {
 +      System.out.println("executing ending wildcard search for " + exp);
 +      exp = exp.substring(0, exp.length() - 1);
 +      scanner.setRange(Range.prefix(getForwardIndex(exp)));
 +    } else if (exp.indexOf("*") >= 0) {
 +      throw new Exception("this method only works for beginning or ending wild cards");
 +    } else {
 +      return exactTermSearch(exp);
 +    }
 +    return scanner;
 +  }
 +  
 +  /**
 +   * Scans over the index table for files or directories with a given name that can contain a single wildcard '*' anywhere in the term.
 +   * 
 +   * @param exp
 +   *          the name a file or directory to search for with one optional wildcard '*'
 +   */
 +  public Iterable<Entry<Key,Value>> singleWildCardSearch(String exp) throws Exception {
 +    int starIndex = exp.indexOf("*");
 +    if (exp.indexOf("*", starIndex + 1) >= 0)
 +      throw new Exception("only one wild card for search");
 +    
 +    if (starIndex < 0) {
 +      return exactTermSearch(exp);
 +    } else if (starIndex == 0 || starIndex == exp.length() - 1) {
 +      return singleRestrictedWildCardSearch(exp);
 +    }
 +    
 +    String firstPart = exp.substring(0, starIndex);
 +    String lastPart = exp.substring(starIndex + 1);
 +    String regexString = ".*/" + exp.replace("*", "[^/]*");
 +    
 +    Scanner scanner = conn.createScanner(tableName, auths);
 +    if (firstPart.length() >= lastPart.length()) {
 +      System.out.println("executing middle wildcard search for " + regexString + " from entries starting with " + firstPart);
 +      scanner.setRange(Range.prefix(getForwardIndex(firstPart)));
 +    } else {
 +      System.out.println("executing middle wildcard search for " + regexString + " from entries ending with " + lastPart);
 +      scanner.setRange(Range.prefix(getReverseIndex(lastPart)));
 +    }
 +    IteratorSetting regex = new IteratorSetting(50, "regex", RegExFilter.class);
 +    RegExFilter.setRegexs(regex, null, null, regexString, null, false);
 +    scanner.addScanIterator(regex);
 +    return scanner;
 +  }
 +  
 +  public static class Opts extends ClientOnRequiredTable {
 +    @Parameter(names="--path", description="the directory to list")
 +    String path = "/";
 +    @Parameter(names="--search", description="find a file or directorys with the given name")
 +    boolean search = false;
 +  }
 +  
 +  /**
 +   * Lists the contents of a directory using the directory table, or searches for file or directory names (if the -search flag is included).
-    * 
-    * @param args
-    * @throws Exception
 +   */
 +  public static void main(String[] args) throws Exception {
 +    Opts opts = new Opts();
 +    opts.parseArgs(QueryUtil.class.getName(), args);
 +    QueryUtil q = new QueryUtil(opts);
 +    if (opts.search) {
 +      for (Entry<Key,Value> e : q.singleWildCardSearch(opts.path)) {
 +        System.out.println(e.getKey().getColumnQualifier());
 +      }
 +    } else {
 +      for (Entry<String,Map<String,String>> e : q.getDirList(opts.path).entrySet()) {
 +        System.out.println(e);
 +      }
 +    }
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/92613388/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/NGramIngest.java
----------------------------------------------------------------------
diff --cc examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/NGramIngest.java
index cd6ca40,0000000..dc14512
mode 100644,000000..100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/NGramIngest.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/NGramIngest.java
@@@ -1,116 -1,0 +1,113 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.examples.simple.mapreduce;
 +
 +import java.io.IOException;
 +import java.util.SortedSet;
 +import java.util.TreeSet;
 +
 +import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 +import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.util.CachedConfiguration;
 +import org.apache.hadoop.conf.Configured;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.hadoop.io.LongWritable;
 +import org.apache.hadoop.io.Text;
 +import org.apache.hadoop.mapreduce.Job;
 +import org.apache.hadoop.mapreduce.Mapper;
 +import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
 +import org.apache.hadoop.util.Tool;
 +import org.apache.hadoop.util.ToolRunner;
 +import org.apache.log4j.Logger;
 +
 +import com.beust.jcommander.Parameter;
 +
 +/**
 + * Map job to ingest n-gram files from 
 + * http://storage.googleapis.com/books/ngrams/books/datasetsv2.html
 + */
 +public class NGramIngest extends Configured implements Tool  {
 +  
 +  private static final Logger log = Logger.getLogger(NGramIngest.class);
 +  
 +  
 +  static class Opts extends ClientOnRequiredTable {
 +    @Parameter(names = "--input", required=true)
 +    String inputDirectory;
 +  }
 +  static class NGramMapper extends Mapper<LongWritable, Text, Text, Mutation> {
 +
 +    @Override
 +    protected void map(LongWritable location, Text value, Context context) throws IOException, InterruptedException {
 +      String parts[] = value.toString().split("\\t");
 +      if (parts.length >= 4) {
 +        Mutation m = new Mutation(parts[0]);
 +        m.put(parts[1], String.format("%010d", Long.parseLong(parts[2])), new Value(parts[3].trim().getBytes()));
 +        context.write(null, m);
 +      }
 +    }
 +  }
 +
-   /**
-    * @param args
-    */
 +  @Override
 +  public int run(String[] args) throws Exception {
 +    Opts opts = new Opts();
 +    opts.parseArgs(getClass().getName(), args);
 +    
 +    Job job = new Job(getConf(), getClass().getSimpleName());
 +    job.setJarByClass(getClass());
 +    
 +    opts.setAccumuloConfigs(job);
 +    job.setInputFormatClass(TextInputFormat.class);
 +    job.setOutputFormatClass(AccumuloOutputFormat.class);
 +   
 +    job.setMapperClass(NGramMapper.class);
 +    job.setMapOutputKeyClass(Text.class);
 +    job.setMapOutputValueClass(Mutation.class);
 +    
 +    job.setNumReduceTasks(0);
 +    job.setSpeculativeExecution(false);
 +    
 +    
 +    if (!opts.getConnector().tableOperations().exists(opts.tableName)) {
 +      log.info("Creating table " + opts.tableName);
 +      opts.getConnector().tableOperations().create(opts.tableName);
 +      SortedSet<Text> splits = new TreeSet<Text>();
 +      String numbers[] = "1 2 3 4 5 6 7 8 9".split("\\s");
 +      String lower[] = "a b c d e f g h i j k l m n o p q r s t u v w x y z".split("\\s");
 +      String upper[] = "A B C D E F G H I J K L M N O P Q R S T U V W X Y Z".split("\\s");
 +      for (String[] array : new String[][]{numbers, lower, upper}) {
 +        for (String s : array) {
 +          splits.add(new Text(s));
 +        }
 +      }
 +      opts.getConnector().tableOperations().addSplits(opts.tableName, splits);
 +    }
 +      
 +    TextInputFormat.addInputPath(job, new Path(opts.inputDirectory));
 +    job.waitForCompletion(true);
 +    return job.isSuccessful() ? 0 : 1;
 +  }
 +  
 +  public static void main(String[] args) throws Exception {
 +    int res = ToolRunner.run(CachedConfiguration.getInstance(), new NGramIngest(), args);
 +    if (res != 0)
 +      System.exit(res);
 +  }
 +  
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/92613388/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java
----------------------------------------------------------------------
diff --cc examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java
index d8eedef,0000000..669c76d
mode 100644,000000..100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java
@@@ -1,130 -1,0 +1,129 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.examples.simple.mapreduce;
 +
 +import java.io.IOException;
 +import java.util.HashSet;
 +import java.util.Map;
 +
 +import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.util.CachedConfiguration;
 +import org.apache.accumulo.core.util.Pair;
 +import org.apache.accumulo.core.util.format.DefaultFormatter;
 +import org.apache.hadoop.conf.Configured;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.hadoop.io.NullWritable;
 +import org.apache.hadoop.io.Text;
 +import org.apache.hadoop.mapreduce.Job;
 +import org.apache.hadoop.mapreduce.Mapper;
 +import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
 +import org.apache.hadoop.util.Tool;
 +import org.apache.hadoop.util.ToolRunner;
 +
 +import com.beust.jcommander.Parameter;
 +
 +/**
 + * Takes a table and outputs the specified column to a set of part files on hdfs accumulo accumulo.examples.mapreduce.TableToFile <username> <password>
 + * <tablename> <column> <hdfs-output-path>
 + */
 +public class TableToFile extends Configured implements Tool {
 +  
 +  static class Opts extends ClientOnRequiredTable {
 +    @Parameter(names = "--output", description = "output directory", required = true)
 +    String output;
 +    @Parameter(names = "--columns", description = "columns to extract, in cf:cq{,cf:cq,...} form")
 +    String columns = "";
 +  }
 +  
 +  /**
 +   * The Mapper class that given a row number, will generate the appropriate output line.
 +   */
 +  public static class TTFMapper extends Mapper<Key,Value,NullWritable,Text> {
 +    @Override
 +    public void map(Key row, Value data, Context context) throws IOException, InterruptedException {
 +      final Key r = row;
 +      final Value v = data;
 +      Map.Entry<Key,Value> entry = new Map.Entry<Key,Value>() {
 +        @Override
 +        public Key getKey() {
 +          return r;
 +        }
 +        
 +        @Override
 +        public Value getValue() {
 +          return v;
 +        }
 +        
 +        @Override
 +        public Value setValue(Value value) {
 +          return null;
 +        }
 +      };
 +      context.write(NullWritable.get(), new Text(DefaultFormatter.formatEntry(entry, false)));
 +      context.setStatus("Outputed Value");
 +    }
 +  }
 +  
 +  @Override
 +  public int run(String[] args) throws IOException, InterruptedException, ClassNotFoundException, AccumuloSecurityException {
 +    Job job = new Job(getConf(), this.getClass().getSimpleName() + "_" + System.currentTimeMillis());
 +    job.setJarByClass(this.getClass());
 +    Opts opts = new Opts();
 +    opts.parseArgs(getClass().getName(), args);
 +    
 +    job.setInputFormatClass(AccumuloInputFormat.class);
 +    opts.setAccumuloConfigs(job);
 +    
 +    HashSet<Pair<Text,Text>> columnsToFetch = new HashSet<Pair<Text,Text>>();
 +    for (String col : opts.columns.split(",")) {
 +      int idx = col.indexOf(":");
 +      Text cf = new Text(idx < 0 ? col : col.substring(0, idx));
 +      Text cq = idx < 0 ? null : new Text(col.substring(idx + 1));
 +      if (cf.getLength() > 0)
 +        columnsToFetch.add(new Pair<Text,Text>(cf, cq));
 +    }
 +    if (!columnsToFetch.isEmpty())
 +      AccumuloInputFormat.fetchColumns(job, columnsToFetch);
 +    
 +    job.setMapperClass(TTFMapper.class);
 +    job.setMapOutputKeyClass(NullWritable.class);
 +    job.setMapOutputValueClass(Text.class);
 +    
 +    job.setNumReduceTasks(0);
 +    
 +    job.setOutputFormatClass(TextOutputFormat.class);
 +    TextOutputFormat.setOutputPath(job, new Path(opts.output));
 +    
 +    job.waitForCompletion(true);
 +    return job.isSuccessful() ? 0 : 1;
 +  }
 +  
 +  /**
 +   * 
 +   * @param args
 +   *          instanceName zookeepers username password table columns outputpath
-    * @throws Exception
 +   */
 +  public static void main(String[] args) throws Exception {
 +    int res = ToolRunner.run(CachedConfiguration.getInstance(), new TableToFile(), args);
 +    if (res != 0)
 +      System.exit(res);
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/92613388/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Query.java
----------------------------------------------------------------------
diff --cc examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Query.java
index f6d610e,0000000..d98d78b
mode 100644,000000..100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Query.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Query.java
@@@ -1,78 -1,0 +1,75 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.examples.simple.shard;
 +
 +import java.util.ArrayList;
 +import java.util.Collections;
 +import java.util.List;
 +import java.util.Map.Entry;
 +import java.util.concurrent.TimeUnit;
 +
 +import org.apache.accumulo.core.cli.BatchScannerOpts;
 +import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 +import org.apache.accumulo.core.client.BatchScanner;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.IteratorSetting;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Range;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.iterators.user.IntersectingIterator;
 +import org.apache.hadoop.io.Text;
 +
 +import com.beust.jcommander.Parameter;
 +
 +/**
 + * This program queries a set of terms in the shard table (populated by {@link Index}) using the {@link IntersectingIterator}.
 + * 
 + * See docs/examples/README.shard for instructions.
 + */
 +
 +public class Query {
 +  
 +  static class Opts extends ClientOnRequiredTable {
 +    @Parameter(description=" term { <term> ... }")
 +    List<String> terms = new ArrayList<String>();
 +  }
 +  
-   /**
-    * @param args
-    */
 +  public static void main(String[] args) throws Exception {
 +    Opts opts = new Opts();
 +    BatchScannerOpts bsOpts = new BatchScannerOpts();
 +    opts.parseArgs(Query.class.getName(), args, bsOpts);
 +    
 +    Connector conn = opts.getConnector();
 +    BatchScanner bs = conn.createBatchScanner(opts.tableName, opts.auths, bsOpts.scanThreads);
 +    bs.setTimeout(bsOpts.scanTimeout, TimeUnit.MILLISECONDS);
 +    
 +    Text columns[] = new Text[opts.terms.size()];
 +    int i = 0;
 +    for (String term : opts.terms) {
 +      columns[i++] = new Text(term);
 +    }
 +    IteratorSetting ii = new IteratorSetting(20, "ii", IntersectingIterator.class);
 +    IntersectingIterator.setColumnFamilies(ii, columns);
 +    bs.addScanIterator(ii);
 +    bs.setRanges(Collections.singleton(new Range()));
 +    for (Entry<Key,Value> entry : bs) {
 +      System.out.println("  " + entry.getKey().getColumnQualifier());
 +    }
 +    
 +  }
 +  
 +}