You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by mw...@apache.org on 2016/12/09 17:16:47 UTC

[1/7] accumulo git commit: ACCUMULO-4511 Removed Accumulo Examples

Repository: accumulo
Updated Branches:
  refs/heads/master 13201a814 -> 8e0f19a1c


http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/src/main/java/org/apache/accumulo/test/examples/simple/filedata/ChunkInputStreamIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/examples/simple/filedata/ChunkInputStreamIT.java b/test/src/main/java/org/apache/accumulo/test/examples/simple/filedata/ChunkInputStreamIT.java
deleted file mode 100644
index 5b956d7..0000000
--- a/test/src/main/java/org/apache/accumulo/test/examples/simple/filedata/ChunkInputStreamIT.java
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.accumulo.test.examples.simple.filedata;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.KeyValue;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.core.util.PeekingIterator;
-import org.apache.accumulo.examples.simple.filedata.ChunkInputStream;
-import org.apache.accumulo.examples.simple.filedata.FileDataIngest;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Before;
-import org.junit.Test;
-
-public class ChunkInputStreamIT extends AccumuloClusterHarness {
-
-  private static final Authorizations AUTHS = new Authorizations("A", "B", "C", "D");
-
-  private Connector conn;
-  private String tableName;
-  private List<Entry<Key,Value>> data;
-  private List<Entry<Key,Value>> baddata;
-  private List<Entry<Key,Value>> multidata;
-
-  @Before
-  public void setupInstance() throws Exception {
-    conn = getConnector();
-    tableName = getUniqueNames(1)[0];
-    conn.securityOperations().changeUserAuthorizations(conn.whoami(), AUTHS);
-  }
-
-  @Before
-  public void setupData() {
-    data = new ArrayList<>();
-    addData(data, "a", "refs", "id\0ext", "A&B", "ext");
-    addData(data, "a", "refs", "id\0name", "A&B", "name");
-    addData(data, "a", "~chunk", 100, 0, "A&B", "asdfjkl;");
-    addData(data, "a", "~chunk", 100, 1, "A&B", "");
-    addData(data, "b", "refs", "id\0ext", "A&B", "ext");
-    addData(data, "b", "refs", "id\0name", "A&B", "name");
-    addData(data, "b", "~chunk", 100, 0, "A&B", "qwertyuiop");
-    addData(data, "b", "~chunk", 100, 0, "B&C", "qwertyuiop");
-    addData(data, "b", "~chunk", 100, 1, "A&B", "");
-    addData(data, "b", "~chunk", 100, 1, "B&C", "");
-    addData(data, "b", "~chunk", 100, 1, "D", "");
-    addData(data, "c", "~chunk", 100, 0, "A&B", "asdfjkl;");
-    addData(data, "c", "~chunk", 100, 1, "A&B", "asdfjkl;");
-    addData(data, "c", "~chunk", 100, 2, "A&B", "");
-    addData(data, "d", "~chunk", 100, 0, "A&B", "");
-    addData(data, "e", "~chunk", 100, 0, "A&B", "asdfjkl;");
-    addData(data, "e", "~chunk", 100, 1, "A&B", "");
-    baddata = new ArrayList<>();
-    addData(baddata, "a", "~chunk", 100, 0, "A", "asdfjkl;");
-    addData(baddata, "b", "~chunk", 100, 0, "B", "asdfjkl;");
-    addData(baddata, "b", "~chunk", 100, 2, "C", "");
-    addData(baddata, "c", "~chunk", 100, 0, "D", "asdfjkl;");
-    addData(baddata, "c", "~chunk", 100, 2, "E", "");
-    addData(baddata, "d", "~chunk", 100, 0, "F", "asdfjkl;");
-    addData(baddata, "d", "~chunk", 100, 1, "G", "");
-    addData(baddata, "d", "~zzzzz", "colq", "H", "");
-    addData(baddata, "e", "~chunk", 100, 0, "I", "asdfjkl;");
-    addData(baddata, "e", "~chunk", 100, 1, "J", "");
-    addData(baddata, "e", "~chunk", 100, 2, "I", "asdfjkl;");
-    addData(baddata, "f", "~chunk", 100, 2, "K", "asdfjkl;");
-    addData(baddata, "g", "~chunk", 100, 0, "L", "");
-    multidata = new ArrayList<>();
-    addData(multidata, "a", "~chunk", 100, 0, "A&B", "asdfjkl;");
-    addData(multidata, "a", "~chunk", 100, 1, "A&B", "");
-    addData(multidata, "a", "~chunk", 200, 0, "B&C", "asdfjkl;");
-    addData(multidata, "b", "~chunk", 100, 0, "A&B", "asdfjkl;");
-    addData(multidata, "b", "~chunk", 200, 0, "B&C", "asdfjkl;");
-    addData(multidata, "b", "~chunk", 200, 1, "B&C", "asdfjkl;");
-    addData(multidata, "c", "~chunk", 100, 0, "A&B", "asdfjkl;");
-    addData(multidata, "c", "~chunk", 100, 1, "B&C", "");
-  }
-
-  static void addData(List<Entry<Key,Value>> data, String row, String cf, String cq, String vis, String value) {
-    data.add(new KeyValue(new Key(new Text(row), new Text(cf), new Text(cq), new Text(vis)), value.getBytes()));
-  }
-
-  static void addData(List<Entry<Key,Value>> data, String row, String cf, int chunkSize, int chunkCount, String vis, String value) {
-    Text chunkCQ = new Text(FileDataIngest.intToBytes(chunkSize));
-    chunkCQ.append(FileDataIngest.intToBytes(chunkCount), 0, 4);
-    data.add(new KeyValue(new Key(new Text(row), new Text(cf), chunkCQ, new Text(vis)), value.getBytes()));
-  }
-
-  @Test
-  public void testWithAccumulo() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException, IOException {
-    conn.tableOperations().create(tableName);
-    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
-
-    for (Entry<Key,Value> e : data) {
-      Key k = e.getKey();
-      Mutation m = new Mutation(k.getRow());
-      m.put(k.getColumnFamily(), k.getColumnQualifier(), new ColumnVisibility(k.getColumnVisibility()), e.getValue());
-      bw.addMutation(m);
-    }
-    bw.close();
-
-    Scanner scan = conn.createScanner(tableName, AUTHS);
-
-    ChunkInputStream cis = new ChunkInputStream();
-    byte[] b = new byte[20];
-    int read;
-    PeekingIterator<Entry<Key,Value>> pi = new PeekingIterator<>(scan.iterator());
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), 8);
-    assertEquals(new String(b, 0, read), "asdfjkl;");
-    assertEquals(read = cis.read(b), -1);
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), 10);
-    assertEquals(new String(b, 0, read), "qwertyuiop");
-    assertEquals(read = cis.read(b), -1);
-    assertEquals(cis.getVisibilities().toString(), "[A&B, B&C, D]");
-    cis.close();
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), 16);
-    assertEquals(new String(b, 0, read), "asdfjkl;asdfjkl;");
-    assertEquals(read = cis.read(b), -1);
-    assertEquals(cis.getVisibilities().toString(), "[A&B]");
-    cis.close();
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), -1);
-    cis.close();
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), 8);
-    assertEquals(new String(b, 0, read), "asdfjkl;");
-    assertEquals(read = cis.read(b), -1);
-    cis.close();
-
-    assertFalse(pi.hasNext());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/src/main/java/org/apache/accumulo/test/functional/ConstraintIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ConstraintIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ConstraintIT.java
index 27d84de..181ac08 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/ConstraintIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ConstraintIT.java
@@ -37,8 +37,8 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.examples.simple.constraints.AlphaNumKeyConstraint;
-import org.apache.accumulo.examples.simple.constraints.NumericValueConstraint;
+import org.apache.accumulo.test.constraints.AlphaNumKeyConstraint;
+import org.apache.accumulo.test.constraints.NumericValueConstraint;
 import org.apache.accumulo.harness.AccumuloClusterHarness;
 import org.apache.hadoop.io.Text;
 import org.junit.Test;
@@ -295,8 +295,8 @@ public class ConstraintIT extends AccumuloClusterHarness {
 
       HashMap<String,Integer> expected = new HashMap<>();
 
-      expected.put("org.apache.accumulo.examples.simple.constraints.NumericValueConstraint", numericErrors);
-      expected.put("org.apache.accumulo.examples.simple.constraints.AlphaNumKeyConstraint", 1);
+      expected.put("org.apache.accumulo.test.constraints.NumericValueConstraint", numericErrors);
+      expected.put("org.apache.accumulo.test.constraints.AlphaNumKeyConstraint", 1);
 
       for (ConstraintViolationSummary cvs : cvsl) {
         if (expected.get(cvs.constrainClass) != cvs.numberOfViolatingMutations) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/src/main/java/org/apache/accumulo/test/functional/ExamplesIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ExamplesIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ExamplesIT.java
deleted file mode 100644
index a69f4a5..0000000
--- a/test/src/main/java/org/apache/accumulo/test/functional/ExamplesIT.java
+++ /dev/null
@@ -1,673 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeTrue;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.concurrent.TimeUnit;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster;
-import org.apache.accumulo.cluster.standalone.StandaloneClusterControl;
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.KerberosToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.user.AgeOffFilter;
-import org.apache.accumulo.core.iterators.user.SummingCombiner;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.examples.simple.client.Flush;
-import org.apache.accumulo.examples.simple.client.RandomBatchScanner;
-import org.apache.accumulo.examples.simple.client.RandomBatchWriter;
-import org.apache.accumulo.examples.simple.client.ReadWriteExample;
-import org.apache.accumulo.examples.simple.client.RowOperations;
-import org.apache.accumulo.examples.simple.client.SequentialBatchWriter;
-import org.apache.accumulo.examples.simple.client.TraceDumpExample;
-import org.apache.accumulo.examples.simple.client.TracingExample;
-import org.apache.accumulo.examples.simple.combiner.StatsCombiner;
-import org.apache.accumulo.examples.simple.constraints.MaxMutationSize;
-import org.apache.accumulo.examples.simple.dirlist.Ingest;
-import org.apache.accumulo.examples.simple.dirlist.QueryUtil;
-import org.apache.accumulo.examples.simple.helloworld.InsertWithBatchWriter;
-import org.apache.accumulo.examples.simple.helloworld.ReadData;
-import org.apache.accumulo.examples.simple.isolation.InterferenceTest;
-import org.apache.accumulo.examples.simple.mapreduce.RegexExample;
-import org.apache.accumulo.examples.simple.mapreduce.RowHash;
-import org.apache.accumulo.examples.simple.mapreduce.TableToFile;
-import org.apache.accumulo.examples.simple.mapreduce.TeraSortIngest;
-import org.apache.accumulo.examples.simple.mapreduce.WordCount;
-import org.apache.accumulo.examples.simple.mapreduce.bulk.BulkIngestExample;
-import org.apache.accumulo.examples.simple.mapreduce.bulk.GenerateTestData;
-import org.apache.accumulo.examples.simple.mapreduce.bulk.SetupTable;
-import org.apache.accumulo.examples.simple.mapreduce.bulk.VerifyIngest;
-import org.apache.accumulo.examples.simple.shard.ContinuousQuery;
-import org.apache.accumulo.examples.simple.shard.Index;
-import org.apache.accumulo.examples.simple.shard.Query;
-import org.apache.accumulo.examples.simple.shard.Reverse;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.MemoryUnit;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl.LogWriter;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.start.Main;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.categories.StandaloneCapableClusterTests;
-import org.apache.accumulo.test.categories.SunnyDayTests;
-import org.apache.accumulo.tracer.TraceServer;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.util.Tool;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Iterators;
-
-@Category({StandaloneCapableClusterTests.class, SunnyDayTests.class})
-public class ExamplesIT extends AccumuloClusterHarness {
-  private static final Logger log = LoggerFactory.getLogger(ExamplesIT.class);
-  private static final BatchWriterOpts bwOpts = new BatchWriterOpts();
-  private static final BatchWriterConfig bwc = new BatchWriterConfig();
-  private static final String visibility = "A|B";
-  private static final String auths = "A,B";
-
-  Connector c;
-  String instance;
-  String keepers;
-  String user;
-  String passwd;
-  String keytab;
-  BatchWriter bw;
-  IteratorSetting is;
-  String dir;
-  FileSystem fs;
-  Authorizations origAuths;
-  boolean saslEnabled;
-
-  @Override
-  public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopConf) {
-    // 128MB * 3
-    cfg.setDefaultMemory(cfg.getDefaultMemory() * 3, MemoryUnit.BYTE);
-  }
-
-  @Before
-  public void getClusterInfo() throws Exception {
-    c = getConnector();
-    user = getAdminPrincipal();
-    AuthenticationToken token = getAdminToken();
-    if (token instanceof KerberosToken) {
-      keytab = getAdminUser().getKeytab().getAbsolutePath();
-      saslEnabled = true;
-    } else if (token instanceof PasswordToken) {
-      passwd = new String(((PasswordToken) getAdminToken()).getPassword(), UTF_8);
-      saslEnabled = false;
-    } else {
-      Assert.fail("Unknown token type: " + token);
-    }
-    fs = getCluster().getFileSystem();
-    instance = c.getInstance().getInstanceName();
-    keepers = c.getInstance().getZooKeepers();
-    dir = new Path(cluster.getTemporaryPath(), getClass().getName()).toString();
-
-    origAuths = c.securityOperations().getUserAuthorizations(user);
-    c.securityOperations().changeUserAuthorizations(user, new Authorizations(auths.split(",")));
-  }
-
-  @After
-  public void resetAuths() throws Exception {
-    if (null != origAuths) {
-      getConnector().securityOperations().changeUserAuthorizations(getAdminPrincipal(), origAuths);
-    }
-  }
-
-  @Override
-  public int defaultTimeoutSeconds() {
-    return 6 * 60;
-  }
-
-  @Test
-  public void testTrace() throws Exception {
-    Process trace = null;
-    if (ClusterType.MINI == getClusterType()) {
-      MiniAccumuloClusterImpl impl = (MiniAccumuloClusterImpl) cluster;
-      trace = impl.exec(TraceServer.class);
-      while (!c.tableOperations().exists("trace"))
-        sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
-    }
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-C", "-D", "-c"};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-C", "-D", "-c"};
-    }
-    Entry<Integer,String> pair = cluster.getClusterControl().execWithStdout(TracingExample.class, args);
-    Assert.assertEquals("Expected return code of zero. STDOUT=" + pair.getValue(), 0, pair.getKey().intValue());
-    String result = pair.getValue();
-    Pattern pattern = Pattern.compile("TraceID: ([0-9a-f]+)");
-    Matcher matcher = pattern.matcher(result);
-    int count = 0;
-    while (matcher.find()) {
-      if (saslEnabled) {
-        args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--traceid", matcher.group(1)};
-      } else {
-        args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--traceid", matcher.group(1)};
-      }
-      pair = cluster.getClusterControl().execWithStdout(TraceDumpExample.class, args);
-      count++;
-    }
-    assertTrue(count > 0);
-    assertTrue("Output did not contain myApp@myHost", pair.getValue().contains("myApp@myHost"));
-    if (ClusterType.MINI == getClusterType() && null != trace) {
-      trace.destroy();
-    }
-  }
-
-  @Test
-  public void testClasspath() throws Exception {
-    Entry<Integer,String> entry = getCluster().getClusterControl().execWithStdout(Main.class, new String[] {"classpath"});
-    assertEquals(0, entry.getKey().intValue());
-    String result = entry.getValue();
-    int level1 = result.indexOf("Level 1");
-    int level2 = result.indexOf("Level 2");
-    int level3 = result.indexOf("Level 3");
-    int level4 = result.indexOf("Level 4");
-    assertTrue("Level 1 classloader not present.", level1 >= 0);
-    assertTrue("Level 2 classloader not present.", level2 > 0);
-    assertTrue("Level 3 classloader not present.", level3 > 0);
-    assertTrue("Level 4 classloader not present.", level4 > 0);
-    assertTrue(level1 < level2);
-    assertTrue(level2 < level3);
-    assertTrue(level3 < level4);
-  }
-
-  @Test
-  public void testDirList() throws Exception {
-    String[] names = getUniqueNames(3);
-    String dirTable = names[0], indexTable = names[1], dataTable = names[2];
-    String[] args;
-    String dirListDirectory;
-    switch (getClusterType()) {
-      case MINI:
-        dirListDirectory = ((MiniAccumuloClusterImpl) getCluster()).getConfig().getDir().getAbsolutePath();
-        break;
-      case STANDALONE:
-        dirListDirectory = ((StandaloneAccumuloCluster) getCluster()).getAccumuloHome();
-        break;
-      default:
-        throw new RuntimeException("Unknown cluster type");
-    }
-    assumeTrue(new File(dirListDirectory).exists());
-    // Index a directory listing on /tmp. If this is running against a standalone cluster, we can't guarantee Accumulo source will be there.
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--dirTable", dirTable, "--indexTable", indexTable, "--dataTable",
-          dataTable, "--vis", visibility, "--chunkSize", Integer.toString(10000), dirListDirectory};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--dirTable", dirTable, "--indexTable", indexTable, "--dataTable",
-          dataTable, "--vis", visibility, "--chunkSize", Integer.toString(10000), dirListDirectory};
-    }
-    Entry<Integer,String> entry = getClusterControl().execWithStdout(Ingest.class, args);
-    assertEquals("Got non-zero return code. Stdout=" + entry.getValue(), 0, entry.getKey().intValue());
-
-    String expectedFile;
-    switch (getClusterType()) {
-      case MINI:
-        // Should be present in a minicluster dir
-        expectedFile = "accumulo-site.xml";
-        break;
-      case STANDALONE:
-        // Should be in place on standalone installs (not having to follow symlinks)
-        expectedFile = "LICENSE";
-        break;
-      default:
-        throw new RuntimeException("Unknown cluster type");
-    }
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "--keytab", keytab, "-u", user, "-t", indexTable, "--auths", auths, "--search", "--path",
-          expectedFile};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-p", passwd, "-u", user, "-t", indexTable, "--auths", auths, "--search", "--path", expectedFile};
-    }
-    entry = getClusterControl().execWithStdout(QueryUtil.class, args);
-    if (ClusterType.MINI == getClusterType()) {
-      MiniAccumuloClusterImpl impl = (MiniAccumuloClusterImpl) cluster;
-      for (LogWriter writer : impl.getLogWriters()) {
-        writer.flush();
-      }
-    }
-
-    log.info("result " + entry.getValue());
-    assertEquals(0, entry.getKey().intValue());
-    assertTrue(entry.getValue().contains(expectedFile));
-  }
-
-  @Test
-  public void testAgeoffFilter() throws Exception {
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    is = new IteratorSetting(10, AgeOffFilter.class);
-    AgeOffFilter.setTTL(is, 1000L);
-    c.tableOperations().attachIterator(tableName, is);
-    sleepUninterruptibly(500, TimeUnit.MILLISECONDS); // let zookeeper updates propagate.
-    bw = c.createBatchWriter(tableName, bwc);
-    Mutation m = new Mutation("foo");
-    m.put("a", "b", "c");
-    bw.addMutation(m);
-    bw.close();
-    sleepUninterruptibly(1, TimeUnit.SECONDS);
-    assertEquals(0, Iterators.size(c.createScanner(tableName, Authorizations.EMPTY).iterator()));
-  }
-
-  @Test
-  public void testStatsCombiner() throws Exception {
-    String table = getUniqueNames(1)[0];
-    c.tableOperations().create(table);
-    is = new IteratorSetting(10, StatsCombiner.class);
-    StatsCombiner.setCombineAllColumns(is, true);
-
-    c.tableOperations().attachIterator(table, is);
-    bw = c.createBatchWriter(table, bwc);
-    // Write two mutations otherwise the NativeMap would dedupe them into a single update
-    Mutation m = new Mutation("foo");
-    m.put("a", "b", "1");
-    bw.addMutation(m);
-    m = new Mutation("foo");
-    m.put("a", "b", "3");
-    bw.addMutation(m);
-    bw.flush();
-
-    Iterator<Entry<Key,Value>> iter = c.createScanner(table, Authorizations.EMPTY).iterator();
-    assertTrue("Iterator had no results", iter.hasNext());
-    Entry<Key,Value> e = iter.next();
-    assertEquals("Results ", "1,3,4,2", e.getValue().toString());
-    assertFalse("Iterator had additional results", iter.hasNext());
-
-    m = new Mutation("foo");
-    m.put("a", "b", "0,20,20,2");
-    bw.addMutation(m);
-    bw.close();
-
-    iter = c.createScanner(table, Authorizations.EMPTY).iterator();
-    assertTrue("Iterator had no results", iter.hasNext());
-    e = iter.next();
-    assertEquals("Results ", "0,20,24,4", e.getValue().toString());
-    assertFalse("Iterator had additional results", iter.hasNext());
-  }
-
-  @Test
-  public void testBloomFilters() throws Exception {
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    c.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_ENABLED.getKey(), "true");
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"--seed", "7", "-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--num", "100000", "--min", "0", "--max",
-          "1000000000", "--size", "50", "--batchMemory", "2M", "--batchLatency", "60s", "--batchThreads", "3", "-t", tableName};
-    } else {
-      args = new String[] {"--seed", "7", "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--num", "100000", "--min", "0", "--max", "1000000000",
-          "--size", "50", "--batchMemory", "2M", "--batchLatency", "60s", "--batchThreads", "3", "-t", tableName};
-    }
-    goodExec(RandomBatchWriter.class, args);
-    c.tableOperations().flush(tableName, null, null, true);
-    long diff = 0, diff2 = 0;
-    // try the speed test a couple times in case the system is loaded with other tests
-    for (int i = 0; i < 2; i++) {
-      long now = System.currentTimeMillis();
-      if (saslEnabled) {
-        args = new String[] {"--seed", "7", "-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--num", "10000", "--min", "0", "--max",
-            "1000000000", "--size", "50", "--scanThreads", "4", "-t", tableName};
-      } else {
-        args = new String[] {"--seed", "7", "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--num", "10000", "--min", "0", "--max", "1000000000",
-            "--size", "50", "--scanThreads", "4", "-t", tableName};
-      }
-      goodExec(RandomBatchScanner.class, args);
-      diff = System.currentTimeMillis() - now;
-      now = System.currentTimeMillis();
-      if (saslEnabled) {
-        args = new String[] {"--seed", "8", "-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--num", "10000", "--min", "0", "--max",
-            "1000000000", "--size", "50", "--scanThreads", "4", "-t", tableName};
-      } else {
-        args = new String[] {"--seed", "8", "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--num", "10000", "--min", "0", "--max", "1000000000",
-            "--size", "50", "--scanThreads", "4", "-t", tableName};
-      }
-      int retCode = getClusterControl().exec(RandomBatchScanner.class, args);
-      assertEquals(1, retCode);
-      diff2 = System.currentTimeMillis() - now;
-      if (diff2 < diff)
-        break;
-    }
-    assertTrue(diff2 < diff);
-  }
-
-  @Test
-  public void testShardedIndex() throws Exception {
-    File src = new File(System.getProperty("user.dir") + "/src");
-    assumeTrue(src.exists());
-    String[] names = getUniqueNames(3);
-    final String shard = names[0], index = names[1];
-    c.tableOperations().create(shard);
-    c.tableOperations().create(index);
-    bw = c.createBatchWriter(shard, bwc);
-    Index.index(30, src, "\\W+", bw);
-    bw.close();
-    BatchScanner bs = c.createBatchScanner(shard, Authorizations.EMPTY, 4);
-    List<String> found = Query.query(bs, Arrays.asList("foo", "bar"), null);
-    bs.close();
-    // should find ourselves
-    boolean thisFile = false;
-    for (String file : found) {
-      if (file.endsWith("/ExamplesIT.java"))
-        thisFile = true;
-    }
-    assertTrue(thisFile);
-
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "--shardTable", shard, "--doc2Term", index, "-u", user, "--keytab", keytab};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "--shardTable", shard, "--doc2Term", index, "-u", getAdminPrincipal(), "-p", passwd};
-    }
-    // create a reverse index
-    goodExec(Reverse.class, args);
-
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "--shardTable", shard, "--doc2Term", index, "-u", user, "--keytab", keytab, "--terms", "5",
-          "--count", "1000"};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "--shardTable", shard, "--doc2Term", index, "-u", user, "-p", passwd, "--terms", "5", "--count",
-          "1000"};
-    }
-    // run some queries
-    goodExec(ContinuousQuery.class, args);
-  }
-
-  @Test
-  public void testMaxMutationConstraint() throws Exception {
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    c.tableOperations().addConstraint(tableName, MaxMutationSize.class.getName());
-    TestIngest.Opts opts = new TestIngest.Opts();
-    opts.rows = 1;
-    opts.cols = 1000;
-    opts.setTableName(tableName);
-    if (saslEnabled) {
-      opts.updateKerberosCredentials(cluster.getClientConfig());
-    } else {
-      opts.setPrincipal(getAdminPrincipal());
-    }
-    try {
-      TestIngest.ingest(c, opts, bwOpts);
-    } catch (MutationsRejectedException ex) {
-      assertEquals(1, ex.getConstraintViolationSummaries().size());
-    }
-  }
-
-  @Test
-  public void testBulkIngest() throws Exception {
-    // TODO Figure out a way to run M/R with Kerberos
-    assumeTrue(getAdminToken() instanceof PasswordToken);
-    String tableName = getUniqueNames(1)[0];
-    FileSystem fs = getFileSystem();
-    Path p = new Path(dir, "tmp");
-    if (fs.exists(p)) {
-      fs.delete(p, true);
-    }
-    goodExec(GenerateTestData.class, "--start-row", "0", "--count", "10000", "--output", dir + "/tmp/input/data");
-
-    List<String> commonArgs = new ArrayList<>(Arrays.asList(new String[] {"-i", instance, "-z", keepers, "-u", user, "--table", tableName}));
-    if (saslEnabled) {
-      commonArgs.add("--keytab");
-      commonArgs.add(keytab);
-    } else {
-      commonArgs.add("-p");
-      commonArgs.add(passwd);
-    }
-
-    List<String> args = new ArrayList<>(commonArgs);
-    goodExec(SetupTable.class, args.toArray(new String[0]));
-
-    args = new ArrayList<>(commonArgs);
-    args.addAll(Arrays.asList(new String[] {"--inputDir", dir + "/tmp/input", "--workDir", dir + "/tmp"}));
-    goodExec(BulkIngestExample.class, args.toArray(new String[0]));
-
-    args = new ArrayList<>(commonArgs);
-    args.addAll(Arrays.asList(new String[] {"--start-row", "0", "--count", "10000"}));
-    goodExec(VerifyIngest.class, args.toArray(new String[0]));
-  }
-
-  @Test
-  public void testTeraSortAndRead() throws Exception {
-    // TODO Figure out a way to run M/R with Kerberos
-    assumeTrue(getAdminToken() instanceof PasswordToken);
-    String tableName = getUniqueNames(1)[0];
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"--count", (1000 * 1000) + "", "-nk", "10", "-xk", "10", "-nv", "10", "-xv", "10", "-t", tableName, "-i", instance, "-z", keepers,
-          "-u", user, "--keytab", keytab, "--splits", "4"};
-    } else {
-      args = new String[] {"--count", (1000 * 1000) + "", "-nk", "10", "-xk", "10", "-nv", "10", "-xv", "10", "-t", tableName, "-i", instance, "-z", keepers,
-          "-u", user, "-p", passwd, "--splits", "4"};
-    }
-    goodExec(TeraSortIngest.class, args);
-    Path output = new Path(dir, "tmp/nines");
-    if (fs.exists(output)) {
-      fs.delete(output, true);
-    }
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName, "--rowRegex", ".*999.*", "--output",
-          output.toString()};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName, "--rowRegex", ".*999.*", "--output", output.toString()};
-    }
-    goodExec(RegexExample.class, args);
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName, "--column", "c:"};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName, "--column", "c:"};
-    }
-    goodExec(RowHash.class, args);
-    output = new Path(dir, "tmp/tableFile");
-    if (fs.exists(output)) {
-      fs.delete(output, true);
-    }
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName, "--output", output.toString()};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName, "--output", output.toString()};
-    }
-    goodExec(TableToFile.class, args);
-  }
-
-  @Test
-  public void testWordCount() throws Exception {
-    // TODO Figure out a way to run M/R with Kerberos
-    assumeTrue(getAdminToken() instanceof PasswordToken);
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    is = new IteratorSetting(10, SummingCombiner.class);
-    SummingCombiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column(new Text("count"))));
-    SummingCombiner.setEncodingType(is, SummingCombiner.Type.STRING);
-    c.tableOperations().attachIterator(tableName, is);
-    Path readme = new Path(new Path(System.getProperty("user.dir")).getParent(), "README.md");
-    if (!new File(readme.toString()).exists()) {
-      log.info("Not running test: README.md does not exist)");
-      return;
-    }
-    fs.copyFromLocalFile(readme, new Path(dir + "/tmp/wc/README.md"));
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-u", user, "--keytab", keytab, "-z", keepers, "--input", dir + "/tmp/wc", "-t", tableName};
-    } else {
-      args = new String[] {"-i", instance, "-u", user, "-p", passwd, "-z", keepers, "--input", dir + "/tmp/wc", "-t", tableName};
-    }
-    goodExec(WordCount.class, args);
-  }
-
-  @Test
-  public void testInsertWithBatchWriterAndReadData() throws Exception {
-    String tableName = getUniqueNames(1)[0];
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName};
-    }
-    goodExec(InsertWithBatchWriter.class, args);
-    goodExec(ReadData.class, args);
-  }
-
-  @Test
-  public void testIsolatedScansWithInterference() throws Exception {
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", getUniqueNames(1)[0], "--iterations", "100000", "--isolated"};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", getUniqueNames(1)[0], "--iterations", "100000", "--isolated"};
-    }
-    goodExec(InterferenceTest.class, args);
-  }
-
-  @Test
-  public void testScansWithInterference() throws Exception {
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", getUniqueNames(1)[0], "--iterations", "100000"};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", getUniqueNames(1)[0], "--iterations", "100000"};
-    }
-    goodExec(InterferenceTest.class, args);
-  }
-
-  @Test
-  public void testRowOperations() throws Exception {
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd};
-    }
-    goodExec(RowOperations.class, args);
-  }
-
-  @Test
-  public void testBatchWriter() throws Exception {
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName, "--start", "0", "--num", "100000", "--size", "50",
-          "--batchMemory", "10000000", "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName, "--start", "0", "--num", "100000", "--size", "50",
-          "--batchMemory", "10000000", "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility};
-    }
-    goodExec(SequentialBatchWriter.class, args);
-
-  }
-
-  @Test
-  public void testReadWriteAndDelete() throws Exception {
-    String tableName = getUniqueNames(1)[0];
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--auths", auths, "--table", tableName, "--createtable", "-c",
-          "--debug"};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--auths", auths, "--table", tableName, "--createtable", "-c", "--debug"};
-    }
-    goodExec(ReadWriteExample.class, args);
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--auths", auths, "--table", tableName, "-d", "--debug"};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--auths", auths, "--table", tableName, "-d", "--debug"};
-    }
-    goodExec(ReadWriteExample.class, args);
-
-  }
-
-  @Test
-  public void testRandomBatchesAndFlush() throws Exception {
-    String tableName = getUniqueNames(1)[0];
-    c.tableOperations().create(tableName);
-    String[] args;
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--table", tableName, "--num", "100000", "--min", "0", "--max",
-          "100000", "--size", "100", "--batchMemory", "1000000", "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--table", tableName, "--num", "100000", "--min", "0", "--max", "100000",
-          "--size", "100", "--batchMemory", "1000000", "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility};
-    }
-    goodExec(RandomBatchWriter.class, args);
-
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--table", tableName, "--num", "10000", "--min", "0", "--max",
-          "100000", "--size", "100", "--scanThreads", "4", "--auths", auths};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--table", tableName, "--num", "10000", "--min", "0", "--max", "100000",
-          "--size", "100", "--scanThreads", "4", "--auths", auths};
-    }
-    goodExec(RandomBatchScanner.class, args);
-
-    if (saslEnabled) {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--table", tableName};
-    } else {
-      args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--table", tableName};
-    }
-    goodExec(Flush.class, args);
-  }
-
-  private void goodExec(Class<?> theClass, String... args) throws InterruptedException, IOException {
-    Entry<Integer,String> pair;
-    if (Tool.class.isAssignableFrom(theClass) && ClusterType.STANDALONE == getClusterType()) {
-      StandaloneClusterControl control = (StandaloneClusterControl) getClusterControl();
-      pair = control.execMapreduceWithStdout(theClass, args);
-    } else {
-      // We're already slurping stdout into memory (not redirecting to file). Might as well add it to error message.
-      pair = getClusterControl().execWithStdout(theClass, args);
-    }
-    Assert.assertEquals("stdout=" + pair.getValue(), 0, pair.getKey().intValue());
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/src/main/java/org/apache/accumulo/test/functional/MapReduceIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/MapReduceIT.java b/test/src/main/java/org/apache/accumulo/test/functional/MapReduceIT.java
index 3797e5b..c690baf 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/MapReduceIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MapReduceIT.java
@@ -38,7 +38,7 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.examples.simple.mapreduce.RowHash;
+import org.apache.accumulo.test.mapreduce.RowHash;
 import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
 import org.apache.hadoop.io.Text;
 import org.junit.Test;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/src/main/java/org/apache/accumulo/test/mapreduce/RowHash.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/mapreduce/RowHash.java b/test/src/main/java/org/apache/accumulo/test/mapreduce/RowHash.java
new file mode 100644
index 0000000..48b5b33
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/mapreduce/RowHash.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.mapreduce;
+
+import java.io.IOException;
+import java.util.Base64;
+import java.util.Collections;
+
+import org.apache.accumulo.core.cli.MapReduceClientOnRequiredTable;
+import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
+import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.util.Pair;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.io.MD5Hash;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+
+import com.beust.jcommander.Parameter;
+
+public class RowHash extends Configured implements Tool {
+  /**
+   * The Mapper class that given a row number, will generate the appropriate output line.
+   */
+  public static class HashDataMapper extends Mapper<Key,Value,Text,Mutation> {
+    @Override
+    public void map(Key row, Value data, Context context) throws IOException, InterruptedException {
+      Mutation m = new Mutation(row.getRow());
+      m.put(new Text("cf-HASHTYPE"), new Text("cq-MD5BASE64"), new Value(Base64.getEncoder().encode(MD5Hash.digest(data.toString()).getDigest())));
+      context.write(null, m);
+      context.progress();
+    }
+
+    @Override
+    public void setup(Context job) {}
+  }
+
+  private static class Opts extends MapReduceClientOnRequiredTable {
+    @Parameter(names = "--column", required = true)
+    String column;
+  }
+
+  @Override
+  public int run(String[] args) throws Exception {
+    Job job = Job.getInstance(getConf());
+    job.setJobName(this.getClass().getName());
+    job.setJarByClass(this.getClass());
+    Opts opts = new Opts();
+    opts.parseArgs(RowHash.class.getName(), args);
+    job.setInputFormatClass(AccumuloInputFormat.class);
+    opts.setAccumuloConfigs(job);
+
+    String col = opts.column;
+    int idx = col.indexOf(":");
+    Text cf = new Text(idx < 0 ? col : col.substring(0, idx));
+    Text cq = idx < 0 ? null : new Text(col.substring(idx + 1));
+    if (cf.getLength() > 0)
+      AccumuloInputFormat.fetchColumns(job, Collections.singleton(new Pair<>(cf, cq)));
+
+    job.setMapperClass(HashDataMapper.class);
+    job.setMapOutputKeyClass(Text.class);
+    job.setMapOutputValueClass(Mutation.class);
+
+    job.setNumReduceTasks(0);
+
+    job.setOutputFormatClass(AccumuloOutputFormat.class);
+
+    job.waitForCompletion(true);
+    return job.isSuccessful() ? 0 : 1;
+  }
+
+  public static void main(String[] args) throws Exception {
+    ToolRunner.run(new Configuration(), new RowHash(), args);
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/src/main/java/org/apache/accumulo/test/mapreduce/TeraSortIngest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/mapreduce/TeraSortIngest.java b/test/src/main/java/org/apache/accumulo/test/mapreduce/TeraSortIngest.java
new file mode 100644
index 0000000..28762e0
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/mapreduce/TeraSortIngest.java
@@ -0,0 +1,399 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.accumulo.test.mapreduce;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.accumulo.core.cli.MapReduceClientOnRequiredTable;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.mapreduce.InputFormat;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.RecordReader;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+
+import com.beust.jcommander.Parameter;
+
+/**
+ * Generate the *almost* official terasort input data set. (See below) The user specifies the number of rows and the output directory and this class runs a
+ * map/reduce program to generate the data. The format of the data is:
+ * <ul>
+ * <li>(10 bytes key) (10 bytes rowid) (78 bytes filler) \r \n
+ * <li>The keys are random characters from the set ' ' .. '~'.
+ * <li>The rowid is the right justified row id as a int.
+ * <li>The filler consists of 7 runs of 10 characters from 'A' to 'Z'.
+ * </ul>
+ *
+ * This TeraSort is slightly modified to allow for variable length key sizes and value sizes. The row length isn't variable. To generate a terabyte of data in
+ * the same way TeraSort does use 10000000000 rows and 10/10 byte key length and 78/78 byte value length. Along with the 10 byte row id and \r\n this gives you
+ * 100 byte row * 10000000000 rows = 1tb. Min/Max ranges for key and value parameters are inclusive/inclusive respectively.
+ *
+ *
+ */
+public class TeraSortIngest extends Configured implements Tool {
+  /**
+   * An input format that assigns ranges of longs to each mapper.
+   */
+  static class RangeInputFormat extends InputFormat<LongWritable,NullWritable> {
+    /**
+     * An input split consisting of a range on numbers.
+     */
+    static class RangeInputSplit extends InputSplit implements Writable {
+      long firstRow;
+      long rowCount;
+
+      public RangeInputSplit() {}
+
+      public RangeInputSplit(long offset, long length) {
+        firstRow = offset;
+        rowCount = length;
+      }
+
+      @Override
+      public long getLength() throws IOException {
+        return 0;
+      }
+
+      @Override
+      public String[] getLocations() throws IOException {
+        return new String[] {};
+      }
+
+      @Override
+      public void readFields(DataInput in) throws IOException {
+        firstRow = WritableUtils.readVLong(in);
+        rowCount = WritableUtils.readVLong(in);
+      }
+
+      @Override
+      public void write(DataOutput out) throws IOException {
+        WritableUtils.writeVLong(out, firstRow);
+        WritableUtils.writeVLong(out, rowCount);
+      }
+    }
+
+    /**
+     * A record reader that will generate a range of numbers.
+     */
+    static class RangeRecordReader extends RecordReader<LongWritable,NullWritable> {
+      long startRow;
+      long finishedRows;
+      long totalRows;
+
+      public RangeRecordReader(RangeInputSplit split) {
+        startRow = split.firstRow;
+        finishedRows = 0;
+        totalRows = split.rowCount;
+      }
+
+      @Override
+      public void close() throws IOException {}
+
+      @Override
+      public float getProgress() throws IOException {
+        return finishedRows / (float) totalRows;
+      }
+
+      @Override
+      public LongWritable getCurrentKey() throws IOException, InterruptedException {
+        return new LongWritable(startRow + finishedRows);
+      }
+
+      @Override
+      public NullWritable getCurrentValue() throws IOException, InterruptedException {
+        return NullWritable.get();
+      }
+
+      @Override
+      public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {}
+
+      @Override
+      public boolean nextKeyValue() throws IOException, InterruptedException {
+        if (finishedRows < totalRows) {
+          ++finishedRows;
+          return true;
+        }
+        return false;
+      }
+    }
+
+    @Override
+    public RecordReader<LongWritable,NullWritable> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException {
+      // reporter.setStatus("Creating record reader");
+      return new RangeRecordReader((RangeInputSplit) split);
+    }
+
+    /**
+     * Create the desired number of splits, dividing the number of rows between the mappers.
+     */
+    @Override
+    public List<InputSplit> getSplits(JobContext job) {
+      long totalRows = job.getConfiguration().getLong(NUMROWS, 0);
+      int numSplits = job.getConfiguration().getInt(NUMSPLITS, 1);
+      long rowsPerSplit = totalRows / numSplits;
+      System.out.println("Generating " + totalRows + " using " + numSplits + " maps with step of " + rowsPerSplit);
+      ArrayList<InputSplit> splits = new ArrayList<>(numSplits);
+      long currentRow = 0;
+      for (int split = 0; split < numSplits - 1; ++split) {
+        splits.add(new RangeInputSplit(currentRow, rowsPerSplit));
+        currentRow += rowsPerSplit;
+      }
+      splits.add(new RangeInputSplit(currentRow, totalRows - currentRow));
+      System.out.println("Done Generating.");
+      return splits;
+    }
+
+  }
+
+  private static String NUMSPLITS = "terasort.overridesplits";
+  private static String NUMROWS = "terasort.numrows";
+
+  static class RandomGenerator {
+    private long seed = 0;
+    private static final long mask32 = (1l << 32) - 1;
+    /**
+     * The number of iterations separating the precomputed seeds.
+     */
+    private static final int seedSkip = 128 * 1024 * 1024;
+    /**
+     * The precomputed seed values after every seedSkip iterations. There should be enough values so that a 2**32 iterations are covered.
+     */
+    private static final long[] seeds = new long[] {0L, 4160749568L, 4026531840L, 3892314112L, 3758096384L, 3623878656L, 3489660928L, 3355443200L, 3221225472L,
+        3087007744L, 2952790016L, 2818572288L, 2684354560L, 2550136832L, 2415919104L, 2281701376L, 2147483648L, 2013265920L, 1879048192L, 1744830464L,
+        1610612736L, 1476395008L, 1342177280L, 1207959552L, 1073741824L, 939524096L, 805306368L, 671088640L, 536870912L, 402653184L, 268435456L, 134217728L,};
+
+    /**
+     * Start the random number generator on the given iteration.
+     *
+     * @param initalIteration
+     *          the iteration number to start on
+     */
+    RandomGenerator(long initalIteration) {
+      int baseIndex = (int) ((initalIteration & mask32) / seedSkip);
+      seed = seeds[baseIndex];
+      for (int i = 0; i < initalIteration % seedSkip; ++i) {
+        next();
+      }
+    }
+
+    RandomGenerator() {
+      this(0);
+    }
+
+    long next() {
+      seed = (seed * 3141592621l + 663896637) & mask32;
+      return seed;
+    }
+  }
+
+  /**
+   * The Mapper class that given a row number, will generate the appropriate output line.
+   */
+  public static class SortGenMapper extends Mapper<LongWritable,NullWritable,Text,Mutation> {
+    private Text tableName = null;
+    private int minkeylength = 0;
+    private int maxkeylength = 0;
+    private int minvaluelength = 0;
+    private int maxvaluelength = 0;
+
+    private Text key = new Text();
+    private Text value = new Text();
+    private RandomGenerator rand;
+    private byte[] keyBytes; // = new byte[12];
+    private byte[] spaces = "          ".getBytes();
+    private byte[][] filler = new byte[26][];
+    {
+      for (int i = 0; i < 26; ++i) {
+        filler[i] = new byte[10];
+        for (int j = 0; j < 10; ++j) {
+          filler[i][j] = (byte) ('A' + i);
+        }
+      }
+    }
+
+    /**
+     * Add a random key to the text
+     */
+    private Random random = new Random();
+
+    private void addKey() {
+      int range = random.nextInt(maxkeylength - minkeylength + 1);
+      int keylen = range + minkeylength;
+      int keyceil = keylen + (4 - (keylen % 4));
+      keyBytes = new byte[keyceil];
+
+      long temp = 0;
+      for (int i = 0; i < keyceil / 4; i++) {
+        temp = rand.next() / 52;
+        keyBytes[3 + 4 * i] = (byte) (' ' + (temp % 95));
+        temp /= 95;
+        keyBytes[2 + 4 * i] = (byte) (' ' + (temp % 95));
+        temp /= 95;
+        keyBytes[1 + 4 * i] = (byte) (' ' + (temp % 95));
+        temp /= 95;
+        keyBytes[4 * i] = (byte) (' ' + (temp % 95));
+      }
+      key.set(keyBytes, 0, keylen);
+    }
+
+    /**
+     * Add the rowid to the row.
+     */
+    private Text getRowIdString(long rowId) {
+      Text paddedRowIdString = new Text();
+      byte[] rowid = Integer.toString((int) rowId).getBytes();
+      int padSpace = 10 - rowid.length;
+      if (padSpace > 0) {
+        paddedRowIdString.append(spaces, 0, 10 - rowid.length);
+      }
+      paddedRowIdString.append(rowid, 0, Math.min(rowid.length, 10));
+      return paddedRowIdString;
+    }
+
+    /**
+     * Add the required filler bytes. Each row consists of 7 blocks of 10 characters and 1 block of 8 characters.
+     *
+     * @param rowId
+     *          the current row number
+     */
+    private void addFiller(long rowId) {
+      int base = (int) ((rowId * 8) % 26);
+
+      // Get Random var
+      Random random = new Random(rand.seed);
+
+      int range = random.nextInt(maxvaluelength - minvaluelength + 1);
+      int valuelen = range + minvaluelength;
+
+      while (valuelen > 10) {
+        value.append(filler[(base + valuelen) % 26], 0, 10);
+        valuelen -= 10;
+      }
+
+      if (valuelen > 0)
+        value.append(filler[(base + valuelen) % 26], 0, valuelen);
+    }
+
+    @Override
+    public void map(LongWritable row, NullWritable ignored, Context context) throws IOException, InterruptedException {
+      context.setStatus("Entering");
+      long rowId = row.get();
+      if (rand == null) {
+        // we use 3 random numbers per a row
+        rand = new RandomGenerator(rowId * 3);
+      }
+      addKey();
+      value.clear();
+      // addRowId(rowId);
+      addFiller(rowId);
+
+      // New
+      Mutation m = new Mutation(key);
+      m.put(new Text("c"), // column family
+          getRowIdString(rowId), // column qual
+          new Value(value.toString().getBytes())); // data
+
+      context.setStatus("About to add to accumulo");
+      context.write(tableName, m);
+      context.setStatus("Added to accumulo " + key.toString());
+    }
+
+    @Override
+    public void setup(Context job) {
+      minkeylength = job.getConfiguration().getInt("cloudgen.minkeylength", 0);
+      maxkeylength = job.getConfiguration().getInt("cloudgen.maxkeylength", 0);
+      minvaluelength = job.getConfiguration().getInt("cloudgen.minvaluelength", 0);
+      maxvaluelength = job.getConfiguration().getInt("cloudgen.maxvaluelength", 0);
+      tableName = new Text(job.getConfiguration().get("cloudgen.tablename"));
+    }
+  }
+
+  public static void main(String[] args) throws Exception {
+    ToolRunner.run(new Configuration(), new TeraSortIngest(), args);
+  }
+
+  static class Opts extends MapReduceClientOnRequiredTable {
+    @Parameter(names = "--count", description = "number of rows to ingest", required = true)
+    long numRows;
+    @Parameter(names = {"-nk", "--minKeySize"}, description = "miniumum key size", required = true)
+    int minKeyLength;
+    @Parameter(names = {"-xk", "--maxKeySize"}, description = "maximum key size", required = true)
+    int maxKeyLength;
+    @Parameter(names = {"-nv", "--minValueSize"}, description = "minimum key size", required = true)
+    int minValueLength;
+    @Parameter(names = {"-xv", "--maxValueSize"}, description = "maximum key size", required = true)
+    int maxValueLength;
+    @Parameter(names = "--splits", description = "number of splits to create in the table")
+    int splits = 0;
+  }
+
+  @Override
+  public int run(String[] args) throws Exception {
+    Job job = Job.getInstance(getConf());
+    job.setJobName("TeraSortCloud");
+    job.setJarByClass(this.getClass());
+    Opts opts = new Opts();
+    opts.parseArgs(TeraSortIngest.class.getName(), args);
+
+    job.setInputFormatClass(RangeInputFormat.class);
+    job.setMapperClass(SortGenMapper.class);
+    job.setMapOutputKeyClass(Text.class);
+    job.setMapOutputValueClass(Mutation.class);
+
+    job.setNumReduceTasks(0);
+
+    job.setOutputFormatClass(AccumuloOutputFormat.class);
+    opts.setAccumuloConfigs(job);
+    BatchWriterConfig bwConfig = new BatchWriterConfig().setMaxMemory(10L * 1000 * 1000);
+    AccumuloOutputFormat.setBatchWriterOptions(job, bwConfig);
+
+    Configuration conf = job.getConfiguration();
+    conf.setLong(NUMROWS, opts.numRows);
+    conf.setInt("cloudgen.minkeylength", opts.minKeyLength);
+    conf.setInt("cloudgen.maxkeylength", opts.maxKeyLength);
+    conf.setInt("cloudgen.minvaluelength", opts.minValueLength);
+    conf.setInt("cloudgen.maxvaluelength", opts.maxValueLength);
+    conf.set("cloudgen.tablename", opts.getTableName());
+
+    if (args.length > 10)
+      conf.setInt(NUMSPLITS, opts.splits);
+
+    job.waitForCompletion(true);
+    return job.isSuccessful() ? 0 : 1;
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/src/main/java/org/apache/accumulo/test/proxy/SimpleProxyBase.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/proxy/SimpleProxyBase.java b/test/src/main/java/org/apache/accumulo/test/proxy/SimpleProxyBase.java
index 5bb4ad6..0ba8bf2 100644
--- a/test/src/main/java/org/apache/accumulo/test/proxy/SimpleProxyBase.java
+++ b/test/src/main/java/org/apache/accumulo/test/proxy/SimpleProxyBase.java
@@ -64,8 +64,8 @@ import org.apache.accumulo.core.iterators.user.VersioningIterator;
 import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.ByteBufferUtil;
-import org.apache.accumulo.examples.simple.constraints.MaxMutationSize;
-import org.apache.accumulo.examples.simple.constraints.NumericValueConstraint;
+import org.apache.accumulo.test.constraints.MaxMutationSize;
+import org.apache.accumulo.test.constraints.NumericValueConstraint;
 import org.apache.accumulo.harness.MiniClusterHarness;
 import org.apache.accumulo.harness.SharedMiniClusterBase;
 import org.apache.accumulo.harness.TestingKdc;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyNamespaceOperations.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyNamespaceOperations.java b/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyNamespaceOperations.java
index 8dc2990..301145d 100644
--- a/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyNamespaceOperations.java
+++ b/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyNamespaceOperations.java
@@ -148,12 +148,11 @@ public class TestProxyNamespaceOperations {
 
   @Test
   public void namespaceConstraints() throws TException {
-    int constraintId = tpc.proxy().addNamespaceConstraint(userpass, testnamespace, "org.apache.accumulo.examples.simple.constraints.MaxMutationSize");
-    assertTrue(tpc.proxy().listNamespaceConstraints(userpass, testnamespace).containsKey("org.apache.accumulo.examples.simple.constraints.MaxMutationSize"));
-    assertEquals(constraintId,
-        (int) tpc.proxy().listNamespaceConstraints(userpass, testnamespace).get("org.apache.accumulo.examples.simple.constraints.MaxMutationSize"));
+    int constraintId = tpc.proxy().addNamespaceConstraint(userpass, testnamespace, "org.apache.accumulo.test.constraints.MaxMutationSize");
+    assertTrue(tpc.proxy().listNamespaceConstraints(userpass, testnamespace).containsKey("org.apache.accumulo.test.constraints.MaxMutationSize"));
+    assertEquals(constraintId, (int) tpc.proxy().listNamespaceConstraints(userpass, testnamespace).get("org.apache.accumulo.test.constraints.MaxMutationSize"));
     tpc.proxy().removeNamespaceConstraint(userpass, testnamespace, constraintId);
-    assertFalse(tpc.proxy().listNamespaceConstraints(userpass, testnamespace).containsKey("org.apache.accumulo.examples.simple.constraints.MaxMutationSize"));
+    assertFalse(tpc.proxy().listNamespaceConstraints(userpass, testnamespace).containsKey("org.apache.accumulo.test.constraints.MaxMutationSize"));
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/src/test/java/org/apache/accumulo/test/constraints/AlphaNumKeyConstraintTest.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/constraints/AlphaNumKeyConstraintTest.java b/test/src/test/java/org/apache/accumulo/test/constraints/AlphaNumKeyConstraintTest.java
new file mode 100644
index 0000000..7729743
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/constraints/AlphaNumKeyConstraintTest.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.constraints;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+import com.google.common.collect.ImmutableList;
+
+public class AlphaNumKeyConstraintTest {
+
+  private AlphaNumKeyConstraint ankc = new AlphaNumKeyConstraint();
+
+  @Test
+  public void test() {
+    Mutation goodMutation = new Mutation(new Text("Row1"));
+    goodMutation.put(new Text("Colf2"), new Text("ColQ3"), new Value("value".getBytes()));
+    assertNull(ankc.check(null, goodMutation));
+
+    // Check that violations are in row, cf, cq order
+    Mutation badMutation = new Mutation(new Text("Row#1"));
+    badMutation.put(new Text("Colf$2"), new Text("Colq%3"), new Value("value".getBytes()));
+    assertEquals(ImmutableList.of(AlphaNumKeyConstraint.NON_ALPHA_NUM_ROW, AlphaNumKeyConstraint.NON_ALPHA_NUM_COLF, AlphaNumKeyConstraint.NON_ALPHA_NUM_COLQ),
+        ankc.check(null, badMutation));
+  }
+
+  @Test
+  public void testGetViolationDescription() {
+    assertEquals(AlphaNumKeyConstraint.ROW_VIOLATION_MESSAGE, ankc.getViolationDescription(AlphaNumKeyConstraint.NON_ALPHA_NUM_ROW));
+    assertEquals(AlphaNumKeyConstraint.COLF_VIOLATION_MESSAGE, ankc.getViolationDescription(AlphaNumKeyConstraint.NON_ALPHA_NUM_COLF));
+    assertEquals(AlphaNumKeyConstraint.COLQ_VIOLATION_MESSAGE, ankc.getViolationDescription(AlphaNumKeyConstraint.NON_ALPHA_NUM_COLQ));
+    assertNull(ankc.getViolationDescription((short) 4));
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/src/test/java/org/apache/accumulo/test/constraints/NumericValueConstraintTest.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/constraints/NumericValueConstraintTest.java b/test/src/test/java/org/apache/accumulo/test/constraints/NumericValueConstraintTest.java
new file mode 100644
index 0000000..f13fd28
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/constraints/NumericValueConstraintTest.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.constraints;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+import com.google.common.collect.Iterables;
+
+public class NumericValueConstraintTest {
+
+  private NumericValueConstraint nvc = new NumericValueConstraint();
+
+  @Test
+  public void testCheck() {
+    Mutation goodMutation = new Mutation(new Text("r"));
+    goodMutation.put(new Text("cf"), new Text("cq"), new Value("1234".getBytes()));
+    assertNull(nvc.check(null, goodMutation));
+
+    // Check that multiple bad mutations result in one violation only
+    Mutation badMutation = new Mutation(new Text("r"));
+    badMutation.put(new Text("cf"), new Text("cq"), new Value("foo1234".getBytes()));
+    badMutation.put(new Text("cf2"), new Text("cq2"), new Value("foo1234".getBytes()));
+    assertEquals(NumericValueConstraint.NON_NUMERIC_VALUE, Iterables.getOnlyElement(nvc.check(null, badMutation)).shortValue());
+  }
+
+  @Test
+  public void testGetViolationDescription() {
+    assertEquals(NumericValueConstraint.VIOLATION_MESSAGE, nvc.getViolationDescription(NumericValueConstraint.NON_NUMERIC_VALUE));
+    assertNull(nvc.getViolationDescription((short) 2));
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/system/bench/lib/Benchmark.py
----------------------------------------------------------------------
diff --git a/test/system/bench/lib/Benchmark.py b/test/system/bench/lib/Benchmark.py
index d205e10..1481ccf 100755
--- a/test/system/bench/lib/Benchmark.py
+++ b/test/system/bench/lib/Benchmark.py
@@ -94,9 +94,9 @@ class Benchmark(unittest.TestCase):
         globjar = [ j for j in glob.glob(path) if j.find('javadoc') == -1 and j.find('sources') == -1 ]
         return globjar[0]
         
-    # Returns the location of the local examples jar
-    def getexamplejar(self):
-        return self.findjar(accumulo() + '/lib/accumulo-examples-simple.jar')
+    # Returns the location of the local test jar
+    def gettestjar(self):
+        return self.findjar(accumulo() + '/lib/accumulo-test.jar')
     
     # Returns a string of core, thrift and zookeeper jars with a specified delim
     def getjars(self, delim=','):

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/system/bench/lib/RowHashBenchmark.py
----------------------------------------------------------------------
diff --git a/test/system/bench/lib/RowHashBenchmark.py b/test/system/bench/lib/RowHashBenchmark.py
index 89b0fbb..34009d6 100755
--- a/test/system/bench/lib/RowHashBenchmark.py
+++ b/test/system/bench/lib/RowHashBenchmark.py
@@ -55,7 +55,7 @@ class RowHashBenchmark(Benchmark):
                     self.output_table) 
             self.sleep(15)
         code, out, err = cloudshell.run(self.username, self.password, "createtable %s -sf %s\n" % (self.output_table, file))
-        command = self.buildcommand('org.apache.accumulo.examples.simple.mapreduce.TeraSortIngest',
+        command = self.buildcommand('org.apache.accumulo.test.mapreduce.TeraSortIngest',
                                     '--count', self.numrows(),
                                     '-nk', self.keysizemin(),
                                     '-xk', self.keysizemax(),
@@ -102,7 +102,7 @@ class RowHashBenchmark(Benchmark):
         return self.valmax
         
     def runTest(self):   
-        command = self.buildcommand('org.apache.accumulo.examples.simple.mapreduce.RowHash',
+        command = self.buildcommand('org.apache.accumulo.test.mapreduce.RowHash',
                                     self.getInstance(),
                                     self.getZookeepers(),
                                     self.getUsername(),

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/system/bench/lib/TeraSortBenchmark.py
----------------------------------------------------------------------
diff --git a/test/system/bench/lib/TeraSortBenchmark.py b/test/system/bench/lib/TeraSortBenchmark.py
index 0b1d9f4..f9984b2 100755
--- a/test/system/bench/lib/TeraSortBenchmark.py
+++ b/test/system/bench/lib/TeraSortBenchmark.py
@@ -72,7 +72,7 @@ class TeraSortBenchmark(Benchmark):
         dir = os.path.dirname(os.path.realpath(__file__))
         file = os.path.join( dir, 'splits' )
         code, out, err = cloudshell.run(self.username, self.password, "createtable %s -sf %s\n" % (self.tablename, file))
-        command = self.buildcommand('org.apache.accumulo.examples.simple.mapreduce.TeraSortIngest',
+        command = self.buildcommand('org.apache.accumulo.test.mapreduce.TeraSortIngest',
                                     '--count', self.numrows(),
                                     '-nk', self.keysizemin(),
                                     '-xk', self.keysizemax(),


[7/7] accumulo git commit: ACCUMULO-4511 Removed Accumulo Examples

Posted by mw...@apache.org.
ACCUMULO-4511 Removed Accumulo Examples

* Moved code and documentation to new accumulo-examples repo
* Updated references in user manual
* Update README.md to reflect the change
* Example docs and source are no longer included in tarball


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/8e0f19a1
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/8e0f19a1
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/8e0f19a1

Branch: refs/heads/master
Commit: 8e0f19a1c9b99cd30368a2cee64d898f896c9cbf
Parents: 13201a8
Author: Mike Walch <mw...@apache.org>
Authored: Wed Dec 7 10:49:34 2016 -0500
Committer: Mike Walch <mw...@apache.org>
Committed: Wed Dec 7 12:03:43 2016 -0500

----------------------------------------------------------------------
 README.md                                       |  30 +-
 .../conf/examples/generic_logger.properties     |   2 -
 assemble/pom.xml                                |   5 -
 assemble/src/main/assemblies/component.xml      |  20 -
 .../core/client/admin/TableOperations.java      |   2 +-
 .../iterators/user/IntersectingIterator.java    |   2 +-
 docs/src/main/asciidoc/chapters/analytics.txt   |   3 +-
 docs/src/main/asciidoc/chapters/clients.txt     |  11 +-
 .../asciidoc/chapters/high_speed_ingest.txt     |   8 +-
 docs/src/main/asciidoc/chapters/sampling.txt    |  10 +-
 .../asciidoc/chapters/table_configuration.txt   |  13 +-
 docs/src/main/resources/examples/batch.md       |  57 --
 docs/src/main/resources/examples/bloom.md       | 221 ------
 docs/src/main/resources/examples/bulkIngest.md  |  35 -
 docs/src/main/resources/examples/classpath.md   |  69 --
 docs/src/main/resources/examples/client.md      |  81 ---
 docs/src/main/resources/examples/combiner.md    |  72 --
 .../resources/examples/compactionStrategy.md    |  67 --
 docs/src/main/resources/examples/constraints.md |  56 --
 docs/src/main/resources/examples/dirlist.md     | 118 ----
 docs/src/main/resources/examples/export.md      |  93 ---
 docs/src/main/resources/examples/filedata.md    |  51 --
 docs/src/main/resources/examples/filter.md      | 112 ---
 docs/src/main/resources/examples/helloworld.md  |  49 --
 docs/src/main/resources/examples/index.md       | 100 ---
 docs/src/main/resources/examples/isolation.md   |  51 --
 docs/src/main/resources/examples/mapred.md      | 156 -----
 docs/src/main/resources/examples/maxmutation.md |  51 --
 docs/src/main/resources/examples/regex.md       |  59 --
 .../src/main/resources/examples/reservations.md |  68 --
 docs/src/main/resources/examples/rgbalancer.md  | 161 -----
 docs/src/main/resources/examples/rowhash.md     |  61 --
 docs/src/main/resources/examples/sample.md      | 193 ------
 docs/src/main/resources/examples/shard.md       |  68 --
 docs/src/main/resources/examples/tabletofile.md |  61 --
 docs/src/main/resources/examples/terasort.md    |  52 --
 docs/src/main/resources/examples/visibility.md  | 133 ----
 examples/simple/.gitignore                      |  28 -
 examples/simple/pom.xml                         | 104 ---
 .../simple/src/main/findbugs/exclude-filter.xml |  23 -
 .../client/CountingVerifyingReceiver.java       |  64 --
 .../accumulo/examples/simple/client/Flush.java  |  37 -
 .../simple/client/RandomBatchScanner.java       | 194 ------
 .../simple/client/RandomBatchWriter.java        | 178 -----
 .../simple/client/ReadWriteExample.java         | 151 -----
 .../examples/simple/client/RowOperations.java   | 215 ------
 .../simple/client/SequentialBatchWriter.java    |  68 --
 .../simple/client/TraceDumpExample.java         |  98 ---
 .../examples/simple/client/TracingExample.java  | 149 ----
 .../examples/simple/combiner/StatsCombiner.java | 111 ---
 .../constraints/AlphaNumKeyConstraint.java      | 100 ---
 .../simple/constraints/MaxMutationSize.java     |  45 --
 .../constraints/NumericValueConstraint.java     |  71 --
 .../examples/simple/dirlist/FileCount.java      | 290 --------
 .../examples/simple/dirlist/Ingest.java         | 173 -----
 .../examples/simple/dirlist/QueryUtil.java      | 279 --------
 .../examples/simple/dirlist/Viewer.java         | 217 ------
 .../simple/filedata/CharacterHistogram.java     | 107 ---
 .../examples/simple/filedata/ChunkCombiner.java | 184 -----
 .../simple/filedata/ChunkInputFormat.java       |  76 ---
 .../simple/filedata/ChunkInputStream.java       | 253 -------
 .../simple/filedata/FileDataIngest.java         | 202 ------
 .../examples/simple/filedata/FileDataQuery.java |  86 ---
 .../examples/simple/filedata/KeyUtil.java       |  65 --
 .../simple/filedata/VisibilityCombiner.java     | 107 ---
 .../helloworld/InsertWithBatchWriter.java       |  65 --
 .../examples/simple/helloworld/ReadData.java    |  78 ---
 .../simple/isolation/InterferenceTest.java      | 181 -----
 .../examples/simple/mapreduce/NGramIngest.java  | 113 ----
 .../examples/simple/mapreduce/RegexExample.java |  98 ---
 .../examples/simple/mapreduce/RowHash.java      |  95 ---
 .../examples/simple/mapreduce/TableToFile.java  | 112 ---
 .../simple/mapreduce/TeraSortIngest.java        | 399 -----------
 .../simple/mapreduce/TokenFileWordCount.java    | 104 ---
 .../simple/mapreduce/UniqueColumns.java         | 143 ----
 .../examples/simple/mapreduce/WordCount.java    |  99 ---
 .../mapreduce/bulk/BulkIngestExample.java       | 167 -----
 .../simple/mapreduce/bulk/GenerateTestData.java |  53 --
 .../simple/mapreduce/bulk/SetupTable.java       |  50 --
 .../simple/mapreduce/bulk/VerifyIngest.java     |  91 ---
 .../examples/simple/reservations/ARS.java       | 303 ---------
 .../examples/simple/sample/SampleExample.java   | 150 -----
 .../examples/simple/shard/ContinuousQuery.java  | 138 ----
 .../shard/CutoffIntersectingIterator.java       | 123 ----
 .../accumulo/examples/simple/shard/Index.java   | 118 ----
 .../accumulo/examples/simple/shard/Query.java   | 104 ---
 .../accumulo/examples/simple/shard/Reverse.java |  72 --
 .../examples/simple/shell/DebugCommand.java     |  46 --
 .../simple/shell/ExampleShellExtension.java     |  37 -
 .../constraints/AlphaNumKeyConstraintTest.java  |  53 --
 .../constraints/NumericValueConstraintTest.java |  51 --
 .../simple/filedata/ChunkCombinerTest.java      | 258 -------
 .../simple/filedata/ChunkInputStreamTest.java   | 395 -----------
 .../examples/simple/filedata/KeyUtilTest.java   |  44 --
 .../simple/src/test/resources/log4j.properties  |  24 -
 pom.xml                                         |   6 -
 proxy/examples/python/TestNamespace.py          |   6 +-
 proxy/pom.xml                                   |   5 -
 test/pom.xml                                    |   4 -
 .../accumulo/test/ConditionalWriterIT.java      |   2 +-
 .../org/apache/accumulo/test/NamespacesIT.java  |   2 +-
 .../org/apache/accumulo/test/ShellServerIT.java |   2 +-
 .../test/constraints/AlphaNumKeyConstraint.java |  96 +++
 .../test/constraints/MaxMutationSize.java       |  45 ++
 .../constraints/NumericValueConstraint.java     |  71 ++
 .../test/examples/simple/dirlist/CountIT.java   | 101 ---
 .../simple/filedata/ChunkInputFormatIT.java     | 320 ---------
 .../simple/filedata/ChunkInputStreamIT.java     | 174 -----
 .../accumulo/test/functional/ConstraintIT.java  |   8 +-
 .../accumulo/test/functional/ExamplesIT.java    | 673 -------------------
 .../accumulo/test/functional/MapReduceIT.java   |   2 +-
 .../apache/accumulo/test/mapreduce/RowHash.java |  95 +++
 .../accumulo/test/mapreduce/TeraSortIngest.java | 399 +++++++++++
 .../accumulo/test/proxy/SimpleProxyBase.java    |   4 +-
 .../proxy/TestProxyNamespaceOperations.java     |   9 +-
 .../constraints/AlphaNumKeyConstraintTest.java  |  53 ++
 .../constraints/NumericValueConstraintTest.java |  51 ++
 test/system/bench/lib/Benchmark.py              |   6 +-
 test/system/bench/lib/RowHashBenchmark.py       |   4 +-
 test/system/bench/lib/TeraSortBenchmark.py      |   2 +-
 120 files changed, 871 insertions(+), 11109 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/README.md
----------------------------------------------------------------------
diff --git a/README.md b/README.md
index d3b8b5d..dcd5632 100644
--- a/README.md
+++ b/README.md
@@ -35,22 +35,15 @@ Follow [these instructions][install] to install and run an Accumulo binary distr
 Documentation
 -------------
 
-Accumulo has the following documentation which is viewable on the [Accumulo website][accumulo]
-using the links below:
+Apache Accumulo has the following documentation:
 
-* [User Manual][man-web] - In-depth developer and administrator documentation.
-* [Accumulo Examples][ex-web] - Code with corresponding README files that give step by step
-instructions for running the example.
+* [User Manual][manual] - In-depth developer and administrator documentation.
+  - Available in binary tarball at `docs/accumulo_user_manual.html`
+  - Available in source tarball at `docs/src/main/asciidoc`
+* [Accumulo Examples][examples] - Documented example code for using Accumulo
+* [Javadocs][javadocs] - Accumulo API documentation
 
-This documentation can also be found in Accumulo distributions:
-
-* **Binary distribution**
-  - User manual is located at `docs/accumulo_user_manual.html`.
-  - Accumulo Examples: READMEs and source are in `docs/examples`. The distribution also has a jar with
-    the compiled examples. This makes it easy to run them after following the [install] instructions.
-
-* **Source distribution** - The [Example Source][ex-src], [Example Readmes][rm-src], and
-[User Manual Source][man-src] are available.
+More documentation can be found on the [project website][accumulo].
 
 Building
 --------
@@ -126,7 +119,6 @@ the bouncycastle library for some crypographic technology as well. See
 [the BouncyCastle FAQ][bouncy-faq] for
 more details on bouncycastle's cryptography features.
 
-
 [accumulo]: https://accumulo.apache.org
 [logo]: contrib/accumulo-logo.png
 [install]: INSTALL.md
@@ -137,13 +129,11 @@ more details on bouncycastle's cryptography features.
 [Thrift]: https://thrift.apache.org
 [features]: https://accumulo.apache.org/notable_features
 [Maven]: https://maven.apache.org
-[man-web]: https://accumulo.apache.org/latest/accumulo_user_manual
-[ex-web]: https://accumulo.apache.org/latest/examples
+[manual]: https://accumulo.apache.org/latest/accumulo_user_manual
+[examples]: https://github.com/apache/accumulo-examples
+[javadocs]: https://accumulo.apache.org/latest/apidocs
 [semver]: http://semver.org/spec/v2.0.0
 [regex]: http://checkstyle.sourceforge.net/config_regexp.html
-[ex-src]: examples/simple/src/main/java/org/apache/accumulo/examples/simple
-[rm-src]: docs/src/main/resources/examples
-[man-src]: docs/src/main/asciidoc
 [li]: https://img.shields.io/badge/license-ASL-blue.svg
 [ll]: https://www.apache.org/licenses/LICENSE-2.0
 [mi]: https://maven-badges.herokuapp.com/maven-central/org.apache.accumulo/accumulo-core/badge.svg

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/assemble/conf/examples/generic_logger.properties
----------------------------------------------------------------------
diff --git a/assemble/conf/examples/generic_logger.properties b/assemble/conf/examples/generic_logger.properties
index dbe12cf..ff589d9 100644
--- a/assemble/conf/examples/generic_logger.properties
+++ b/assemble/conf/examples/generic_logger.properties
@@ -48,8 +48,6 @@ log4j.logger.org.apache.accumulo.server.security.Auditor=WARN
 
 log4j.logger.org.apache.accumulo.core.file.rfile.bcfile=INFO
 
-log4j.logger.org.apache.accumulo.examples.wikisearch=INFO
-
 log4j.logger.org.mortbay.log=WARN
 
 log4j.logger.com.yahoo.zookeeper=ERROR

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/assemble/pom.xml
----------------------------------------------------------------------
diff --git a/assemble/pom.xml b/assemble/pom.xml
index 1f2a899..e35a3bc 100644
--- a/assemble/pom.xml
+++ b/assemble/pom.xml
@@ -75,11 +75,6 @@
     </dependency>
     <dependency>
       <groupId>org.apache.accumulo</groupId>
-      <artifactId>accumulo-examples-simple</artifactId>
-      <optional>true</optional>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.accumulo</groupId>
       <artifactId>accumulo-fate</artifactId>
       <optional>true</optional>
     </dependency>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/assemble/src/main/assemblies/component.xml
----------------------------------------------------------------------
diff --git a/assemble/src/main/assemblies/component.xml b/assemble/src/main/assemblies/component.xml
index 83db9e8..1f45fe1 100644
--- a/assemble/src/main/assemblies/component.xml
+++ b/assemble/src/main/assemblies/component.xml
@@ -99,15 +99,6 @@
       </includes>  
     </fileSet>
     <fileSet>
-      <directory>../examples/simple/src/main/java/org/apache/accumulo/examples/simple/</directory>
-      <outputDirectory>docs/examples/src</outputDirectory>
-      <directoryMode>0755</directoryMode>
-      <fileMode>0644</fileMode>
-      <includes>
-        <include>*/**</include>
-      </includes>
-    </fileSet>
-    <fileSet>
       <directory>./</directory>
       <outputDirectory>lib/ext</outputDirectory>
       <directoryMode>0755</directoryMode>
@@ -124,17 +115,6 @@
       </excludes>
     </fileSet>
     <fileSet>
-      <directory>../docs/src/main/resources</directory>
-      <outputDirectory>docs</outputDirectory>
-      <directoryMode>0755</directoryMode>
-      <fileMode>0644</fileMode>
-      <includes>
-        <include>*.html</include>
-        <include>*.css</include>
-        <include>examples/*</include>
-      </includes>
-    </fileSet>
-    <fileSet>
       <directory>conf</directory>
       <directoryMode>0755</directoryMode>
       <fileMode>0755</fileMode>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperations.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperations.java b/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperations.java
index 3e56736..cabcfa3 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperations.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperations.java
@@ -143,7 +143,7 @@ public interface TableOperations {
    * offline for the duration of distcp. To avoid losing access to a table it can be cloned and the clone taken offline for export.
    *
    * <p>
-   * See docs/examples/README.export
+   * See https://github.com/apache/accumulo-examples/blob/master/docs/export.md
    *
    * @param tableName
    *          Name of the table to export.

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/core/src/main/java/org/apache/accumulo/core/iterators/user/IntersectingIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/IntersectingIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/IntersectingIterator.java
index 94995cb..205fbbb 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/IntersectingIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/IntersectingIterator.java
@@ -52,7 +52,7 @@ import org.apache.hadoop.io.Text;
  * This iterator will *ignore* any columnFamilies passed to {@link #seek(Range, Collection, boolean)} as it performs intersections over terms. Extending classes
  * should override the {@link TermSource#seekColfams} in their implementation's {@link #init(SortedKeyValueIterator, Map, IteratorEnvironment)} method.
  *
- * README.shard in docs/examples shows an example of using the IntersectingIterator.
+ * An example of using the IntersectingIterator is available at https://github.com/apache/accumulo-examples/blob/master/docs/shard.md
  */
 public class IntersectingIterator implements SortedKeyValueIterator<Key,Value> {
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/asciidoc/chapters/analytics.txt
----------------------------------------------------------------------
diff --git a/docs/src/main/asciidoc/chapters/analytics.txt b/docs/src/main/asciidoc/chapters/analytics.txt
index 3954788..3b428bb 100644
--- a/docs/src/main/asciidoc/chapters/analytics.txt
+++ b/docs/src/main/asciidoc/chapters/analytics.txt
@@ -185,7 +185,8 @@ AccumuloOutputFormat.setZooKeeperInstance(job, "myinstance",
 AccumuloOutputFormat.setMaxLatency(job, 300000); // milliseconds
 AccumuloOutputFormat.setMaxMutationBufferSize(job, 50000000); // bytes
 
-An example of using MapReduce with Accumulo can be found at +docs/examples/mapred.md+.
+The https://github.com/apache/accumulo-examples/blob/master/docs/mapred.md[MapReduce example]
+contains a complete example of using MapReduce with Accumulo.
 
 === Combiners
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/asciidoc/chapters/clients.txt
----------------------------------------------------------------------
diff --git a/docs/src/main/asciidoc/chapters/clients.txt b/docs/src/main/asciidoc/chapters/clients.txt
index 713abad..8571eae 100644
--- a/docs/src/main/asciidoc/chapters/clients.txt
+++ b/docs/src/main/asciidoc/chapters/clients.txt
@@ -102,6 +102,7 @@ mutation.put(colFam, colQual, colVis, timestamp, value);
 ----
 
 ==== BatchWriter
+
 The BatchWriter is highly optimized to send Mutations to multiple TabletServers
 and automatically batches Mutations destined for the same TabletServer to
 amortize network overhead. Care must be taken to avoid changing the contents of
@@ -123,7 +124,7 @@ writer.addMutation(mutation);
 writer.close();
 ----
 
-An example of using the batch writer can be found at +docs/examples/batch.md+.
+For more example code, see the https://github.com/apache/accumulo-examples/blob/master/docs/batch.md[batch writing and scanning example].
 
 ==== ConditionalWriter
 
@@ -148,7 +149,8 @@ and possibly sending another conditional mutation.  If this is not sufficient,
 then a higher level of abstraction can be built by storing transactional
 information within a row.
 
-An example of using the conditional writer can be found at +docs/examples/reservations.md+.
+See the https://github.com/apache/accumulo-examples/blob/master/docs/reservations.md[reservations example]
+for example code that uses the conditional writer.
 
 ==== Durability
 
@@ -232,7 +234,8 @@ crash a tablet server. By default rows are buffered in memory, but the user
 can easily supply their own buffer if they wish to buffer to disk when rows are
 large.
 
-For an example, see +docs/examples/src/isolation/InterferenceTest.java+
+See the https://github.com/apache/accumulo-examples/blob/master/docs/isolation.md[isolation example]
+for example code that uses the IsolatedScanner.
 
 ==== BatchScanner
 
@@ -261,7 +264,7 @@ for(Entry<Key,Value> entry : bscan) {
 }
 ----
 
-An example of the BatchScanner can be found at +docs/examples/batch.md+.
+For more example code, see the https://github.com/apache/accumulo-examples/blob/master/docs/batch.md[batch writing and scanning example].
 
 At this time, there is no client side isolation support for the BatchScanner.
 You may consider using the WholeRowIterator with the BatchScanner to achieve

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/asciidoc/chapters/high_speed_ingest.txt
----------------------------------------------------------------------
diff --git a/docs/src/main/asciidoc/chapters/high_speed_ingest.txt b/docs/src/main/asciidoc/chapters/high_speed_ingest.txt
index 1e1be48..2a7a702 100644
--- a/docs/src/main/asciidoc/chapters/high_speed_ingest.txt
+++ b/docs/src/main/asciidoc/chapters/high_speed_ingest.txt
@@ -92,7 +92,8 @@ Note that the paths referenced are directories within the same HDFS instance ove
 which Accumulo is running. Accumulo places any files that failed to be added to the
 second directory specified.
 
-A complete example of using Bulk Ingest can be found at +docs/examples/bulkIngest.md+.
+See the https://github.com/apache/accumulo-examples/blob/master/docs/bulkIngest.md[Bulk Ingest example]
+for a complete example.
 
 === Logical Time for Bulk Ingest
 
@@ -119,6 +120,5 @@ import file.
 It is possible to efficiently write many mutations to Accumulo in parallel via a
 MapReduce job. In this scenario the MapReduce is written to process data that lives
 in HDFS and write mutations to Accumulo using the AccumuloOutputFormat. See
-the MapReduce section under Analytics for details.
-
-An example of using MapReduce can be found at +docs/examples/mapred.md+.
+the MapReduce section under Analytics for details. The https://github.com/apache/accumulo-examples/blob/master/docs/mapred.md[MapReduce example]
+is also a good reference for example code.

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/asciidoc/chapters/sampling.txt
----------------------------------------------------------------------
diff --git a/docs/src/main/asciidoc/chapters/sampling.txt b/docs/src/main/asciidoc/chapters/sampling.txt
index 99c3c7b..237f43c 100644
--- a/docs/src/main/asciidoc/chapters/sampling.txt
+++ b/docs/src/main/asciidoc/chapters/sampling.txt
@@ -40,7 +40,7 @@ that class.  For guidance on implementing a Sampler see that interface's
 javadoc.  Accumulo provides a few implementations out of the box.   For
 information on how to use the samplers that ship with Accumulo look in the
 package `org.apache.accumulo.core.sample` and consult the javadoc of the
-classes there.  See +docs/examples/sample.md+ and +docs/examples/src/sample/SampleExample.java+
+classes there.  See the https://github.com/apache/accumulo-examples/blob/master/docs/sample.md[sampling example]
 for examples of how to configure a Sampler on a table.
 
 Once a table is configured with a sampler all writes after that point will
@@ -60,10 +60,10 @@ Inorder to scan sample data, use the +setSamplerConfiguration(...)+  method on
 +Scanner+ or +BatchScanner+.  Please consult this methods javadocs for more
 information.
 
-Sample data can also be scanned from within an Accumulo
-+SortedKeyValueIterator+.  To see how to do this, look at the example iterator
-referenced in +docs/examples/sample.md+.  Also, consult the javadoc on
-+org.apache.accumulo.core.iterators.IteratorEnvironment.cloneWithSamplingEnabled()+.
+Sample data can also be scanned from within an Accumulo +SortedKeyValueIterator+.
+To see how to do this, look at the example iterator referenced in the
+https://github.com/apache/accumulo-examples/blob/master/docs/sample.md[sampling example].
+Also, consult the javadoc on +org.apache.accumulo.core.iterators.IteratorEnvironment.cloneWithSamplingEnabled()+.
 
 Map reduce jobs using the +AccumuloInputFormat+ can also read sample data.  See
 the javadoc for the +setSamplerConfiguration()+ method on

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/asciidoc/chapters/table_configuration.txt
----------------------------------------------------------------------
diff --git a/docs/src/main/asciidoc/chapters/table_configuration.txt b/docs/src/main/asciidoc/chapters/table_configuration.txt
index adffd2e..e78d7bd 100644
--- a/docs/src/main/asciidoc/chapters/table_configuration.txt
+++ b/docs/src/main/asciidoc/chapters/table_configuration.txt
@@ -103,8 +103,8 @@ new constraint and place it in the lib directory of the Accumulo installation. N
 constraint jars can be added to Accumulo and enabled without restarting but any
 change to an existing constraint class requires Accumulo to be restarted.
 
-An example of constraints can be found in +docs/examples/constraints.md+ with
-corresponding code in +docs/examples/src/constraints+ .
+See the https://github.com/apache/accumulo-examples/blob/master/docs/contraints.md[constraints example]
+for example code.
 
 === Bloom Filters
 
@@ -117,7 +117,8 @@ To enable bloom filters, enter the following command in the Shell:
 
   user@myinstance> config -t mytable -s table.bloom.enabled=true
 
-An extensive example of using Bloom Filters can be found at +docs/examples/bloom.md+ .
+The https://github.com/apache/accumulo-examples/blob/master/docs/bloom.md[bloom filter example]
+contains an extensive example of using Bloom Filters.
 
 === Iterators
 
@@ -351,7 +352,8 @@ Additional Combiners can be added by creating a Java class that extends
 +org.apache.accumulo.core.iterators.Combiner+ and adding a jar containing that
 class to Accumulo's lib/ext directory.
 
-An example of a Combiner can be found at +docs/examples/combiner.md+.
+See the https://github.com/apache/accumulo-examples/blob/master/docs/combiner.md[combiner example]
+for example code.
 
 === Block Cache
 
@@ -664,4 +666,5 @@ splits, and logical time. Tables are exported and then copied via the hadoop
 distcp command. To export a table, it must be offline and stay offline while
 discp runs. The reason it needs to stay offline is to prevent files from being
 deleted. A table can be cloned and the clone taken offline inorder to avoid
-losing access to the table. See +docs/examples/export.md+ for an example.
+losing access to the table. See the https://github.com/apache/accumulo-examples/blob/master/docs/export.md[export example]
+for example code.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/resources/examples/batch.md
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/batch.md b/docs/src/main/resources/examples/batch.md
deleted file mode 100644
index d3ff5cf..0000000
--- a/docs/src/main/resources/examples/batch.md
+++ /dev/null
@@ -1,57 +0,0 @@
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
----
-title: Apache Accumulo Batch Writing and Scanning Example
----
-
-This tutorial uses the following Java classes, which can be found in org.apache.accumulo.examples.simple.client in the examples-simple module:
-
- * SequentialBatchWriter.java - writes mutations with sequential rows and random values
- * RandomBatchWriter.java - used by SequentialBatchWriter to generate random values
- * RandomBatchScanner.java - reads random rows and verifies their values
-
-This is an example of how to use the batch writer and batch scanner. To compile
-the example, run maven and copy the produced jar into the accumulo lib dir.
-This is already done in the tar distribution.
-
-Below are commands that add 10000 entries to accumulo and then do 100 random
-queries. The write command generates random 50 byte values.
-
-Be sure to use the name of your instance (given as instance here) and the appropriate
-list of zookeeper nodes (given as zookeepers here).
-
-Before you run this, you must ensure that the user you are running has the
-"exampleVis" authorization. (you can set this in the shell with "setauths -u username -s exampleVis")
-
-    $ ./bin/accumulo shell -u root -e "setauths -u username -s exampleVis"
-
-You must also create the table, batchtest1, ahead of time. (In the shell, use "createtable batchtest1")
-
-    $ ./bin/accumulo shell -u username -e "createtable batchtest1"
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.SequentialBatchWriter -i instance -z zookeepers -u username -p password -t batchtest1 --start 0 --num 10000 --size 50 --batchMemory 20M --batchLatency 500 --batchThreads 20 --vis exampleVis
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner -i instance -z zookeepers -u username -p password -t batchtest1 --num 100 --min 0 --max 10000 --size 50 --scanThreads 20 --auths exampleVis
-    07 11:33:11,103 [client.CountingVerifyingReceiver] INFO : Generating 100 random queries...
-    07 11:33:11,112 [client.CountingVerifyingReceiver] INFO : finished
-    07 11:33:11,260 [client.CountingVerifyingReceiver] INFO : 694.44 lookups/sec   0.14 secs
-
-    07 11:33:11,260 [client.CountingVerifyingReceiver] INFO : num results : 100
-
-    07 11:33:11,364 [client.CountingVerifyingReceiver] INFO : Generating 100 random queries...
-    07 11:33:11,370 [client.CountingVerifyingReceiver] INFO : finished
-    07 11:33:11,416 [client.CountingVerifyingReceiver] INFO : 2173.91 lookups/sec   0.05 secs
-
-    07 11:33:11,416 [client.CountingVerifyingReceiver] INFO : num results : 100

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/resources/examples/bloom.md
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/bloom.md b/docs/src/main/resources/examples/bloom.md
deleted file mode 100644
index 7aa8e86..0000000
--- a/docs/src/main/resources/examples/bloom.md
+++ /dev/null
@@ -1,221 +0,0 @@
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
----
-title: Apache Accumulo Bloom Filter Example
----
-
-This example shows how to create a table with bloom filters enabled.  It also
-shows how bloom filters increase query performance when looking for values that
-do not exist in a table.
-
-Below table named bloom_test is created and bloom filters are enabled.
-
-    $ ./bin/accumulo shell -u username -p password
-    Shell - Apache Accumulo Interactive Shell
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> setauths -u username -s exampleVis
-    username@instance> createtable bloom_test
-    username@instance bloom_test> config -t bloom_test -s table.bloom.enabled=true
-    username@instance bloom_test> exit
-
-Below 1 million random values are inserted into accumulo. The randomly
-generated rows range between 0 and 1 billion. The random number generator is
-initialized with the seed 7.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 7 -i instance -z zookeepers -u username -p password -t bloom_test --num 1000000 --min 0 --max 1000000000 --size 50 --batchMemory 2M --batchLatency 60s --batchThreads 3 --vis exampleVis
-
-Below the table is flushed:
-
-    $ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test -w'
-    05 10:40:06,069 [shell.Shell] INFO : Flush of table bloom_test completed.
-
-After the flush completes, 500 random queries are done against the table. The
-same seed is used to generate the queries, therefore everything is found in the
-table.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner --seed 7 -i instance -z zookeepers -u username -p password -t bloom_test --num 500 --min 0 --max 1000000000 --size 50 --scanThreads 20 --auths exampleVis
-    Generating 500 random queries...finished
-    96.19 lookups/sec   5.20 secs
-    num results : 500
-    Generating 500 random queries...finished
-    102.35 lookups/sec   4.89 secs
-    num results : 500
-
-Below another 500 queries are performed, using a different seed which results
-in nothing being found. In this case the lookups are much faster because of
-the bloom filters.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner --seed 8 -i instance -z zookeepers -u username -p password -t bloom_test --num 500 --min 0 --max 1000000000 --size 50 -batchThreads 20 -auths exampleVis
-    Generating 500 random queries...finished
-    2212.39 lookups/sec   0.23 secs
-    num results : 0
-    Did not find 500 rows
-    Generating 500 random queries...finished
-    4464.29 lookups/sec   0.11 secs
-    num results : 0
-    Did not find 500 rows
-
-********************************************************************************
-
-Bloom filters can also speed up lookups for entries that exist. In accumulo
-data is divided into tablets and each tablet has multiple map files. Every
-lookup in accumulo goes to a specific tablet where a lookup is done on each
-map file in the tablet. So if a tablet has three map files, lookup performance
-can be three times slower than a tablet with one map file. However if the map
-files contain unique sets of data, then bloom filters can help eliminate map
-files that do not contain the row being looked up. To illustrate this two
-identical tables were created using the following process. One table had bloom
-filters, the other did not. Also the major compaction ratio was increased to
-prevent the files from being compacted into one file.
-
- * Insert 1 million entries using  RandomBatchWriter with a seed of 7
- * Flush the table using the shell
- * Insert 1 million entries using  RandomBatchWriter with a seed of 8
- * Flush the table using the shell
- * Insert 1 million entries using  RandomBatchWriter with a seed of 9
- * Flush the table using the shell
-
-After following the above steps, each table will have a tablet with three map
-files. Flushing the table after each batch of inserts will create a map file.
-Each map file will contain 1 million entries generated with a different seed.
-This is assuming that Accumulo is configured with enough memory to hold 1
-million inserts. If not, then more map files will be created.
-
-The commands for creating the first table without bloom filters are below.
-
-    $ ./bin/accumulo shell -u username -p password
-    Shell - Apache Accumulo Interactive Shell
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> setauths -u username -s exampleVis
-    username@instance> createtable bloom_test1
-    username@instance bloom_test1> config -t bloom_test1 -s table.compaction.major.ratio=7
-    username@instance bloom_test1> exit
-
-    $ ARGS="-i instance -z zookeepers -u username -p password -t bloom_test1 --num 1000000 --min 0 --max 1000000000 --size 50 --batchMemory 2M --batchLatency 60s --batchThreads 3 --vis exampleVis"
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 7 $ARGS
-    $ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test1 -w'
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 8 $ARGS
-    $ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test1 -w'
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 9 $ARGS
-    $ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test1 -w'
-
-The commands for creating the second table with bloom filers are below.
-
-    $ ./bin/accumulo shell -u username -p password
-    Shell - Apache Accumulo Interactive Shell
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> setauths -u username -s exampleVis
-    username@instance> createtable bloom_test2
-    username@instance bloom_test2> config -t bloom_test2 -s table.compaction.major.ratio=7
-    username@instance bloom_test2> config -t bloom_test2 -s table.bloom.enabled=true
-    username@instance bloom_test2> exit
-
-    $ ARGS="-i instance -z zookeepers -u username -p password -t bloom_test2 --num 1000000 --min 0 --max 1000000000 --size 50 --batchMemory 2M --batchLatency 60s --batchThreads 3 --vis exampleVis"
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 7 $ARGS
-    $ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test2 -w'
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 8 $ARGS
-    $ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test2 -w'
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 9 $ARGS
-    $ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test2 -w'
-
-Below 500 lookups are done against the table without bloom filters using random
-NG seed 7. Even though only one map file will likely contain entries for this
-seed, all map files will be interrogated.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner --seed 7 -i instance -z zookeepers -u username -p password -t bloom_test1 --num 500 --min 0 --max 1000000000 --size 50 --scanThreads 20 --auths exampleVis
-    Generating 500 random queries...finished
-    35.09 lookups/sec  14.25 secs
-    num results : 500
-    Generating 500 random queries...finished
-    35.33 lookups/sec  14.15 secs
-    num results : 500
-
-Below the same lookups are done against the table with bloom filters. The
-lookups were 2.86 times faster because only one map file was used, even though three
-map files existed.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner --seed 7 -i instance -z zookeepers -u username -p password -t bloom_test2 --num 500 --min 0 --max 1000000000 --size 50 -scanThreads 20 --auths exampleVis
-    Generating 500 random queries...finished
-    99.03 lookups/sec   5.05 secs
-    num results : 500
-    Generating 500 random queries...finished
-    101.15 lookups/sec   4.94 secs
-    num results : 500
-
-You can verify the table has three files by looking in HDFS. To look in HDFS
-you will need the table ID, because this is used in HDFS instead of the table
-name. The following command will show table ids.
-
-    $ ./bin/accumulo shell -u username -p password -e 'tables -l'
-    accumulo.metadata    =>        !0
-    accumulo.root        =>        +r
-    bloom_test1          =>        o7
-    bloom_test2          =>        o8
-    trace                =>         1
-
-So the table id for bloom_test2 is o8. The command below shows what files this
-table has in HDFS. This assumes Accumulo is at the default location in HDFS.
-
-    $ hadoop fs -lsr /accumulo/tables/o8
-    drwxr-xr-x   - username supergroup          0 2012-01-10 14:02 /accumulo/tables/o8/default_tablet
-    -rw-r--r--   3 username supergroup   52672650 2012-01-10 14:01 /accumulo/tables/o8/default_tablet/F00000dj.rf
-    -rw-r--r--   3 username supergroup   52436176 2012-01-10 14:01 /accumulo/tables/o8/default_tablet/F00000dk.rf
-    -rw-r--r--   3 username supergroup   52850173 2012-01-10 14:02 /accumulo/tables/o8/default_tablet/F00000dl.rf
-
-Running the rfile-info command shows that one of the files has a bloom filter
-and its 1.5MB.
-
-    $ ./bin/accumulo rfile-info /accumulo/tables/o8/default_tablet/F00000dj.rf
-    Locality group         : <DEFAULT>
-	Start block          : 0
-	Num   blocks         : 752
-	Index level 0        : 43,598 bytes  1 blocks
-	First key            : row_0000001169 foo:1 [exampleVis] 1326222052539 false
-	Last key             : row_0999999421 foo:1 [exampleVis] 1326222052058 false
-	Num entries          : 999,536
-	Column families      : [foo]
-
-    Meta block     : BCFile.index
-      Raw size             : 4 bytes
-      Compressed size      : 12 bytes
-      Compression type     : gz
-
-    Meta block     : RFile.index
-      Raw size             : 43,696 bytes
-      Compressed size      : 15,592 bytes
-      Compression type     : gz
-
-    Meta block     : acu_bloom
-      Raw size             : 1,540,292 bytes
-      Compressed size      : 1,433,115 bytes
-      Compression type     : gz
-

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/resources/examples/bulkIngest.md
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/bulkIngest.md b/docs/src/main/resources/examples/bulkIngest.md
deleted file mode 100644
index 468c903..0000000
--- a/docs/src/main/resources/examples/bulkIngest.md
+++ /dev/null
@@ -1,35 +0,0 @@
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
----
-title: Apache Accumulo Bulk Ingest Example
----
-
-This is an example of how to bulk ingest data into accumulo using map reduce.
-
-The following commands show how to run this example. This example creates a
-table called test_bulk which has two initial split points. Then 1000 rows of
-test data are created in HDFS. After that the 1000 rows are ingested into
-accumulo. Then we verify the 1000 rows are in accumulo.
-
-    $ PKG=org.apache.accumulo.examples.simple.mapreduce.bulk
-    $ ARGS="-i instance -z zookeepers -u username -p password"
-    $ ./bin/accumulo $PKG.SetupTable $ARGS -t test_bulk row_00000333 row_00000666
-    $ ./bin/accumulo $PKG.GenerateTestData --start-row 0 --count 1000 --output bulk/test_1.txt
-    $ ./contrib/tool.sh lib/accumulo-examples-simple.jar $PKG.BulkIngestExample $ARGS -t test_bulk --inputDir bulk --workDir tmp/bulkWork
-    $ ./bin/accumulo $PKG.VerifyIngest $ARGS -t test_bulk --start-row 0 --count 1000
-
-For a high level discussion of bulk ingest, see the docs dir.

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/resources/examples/classpath.md
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/classpath.md b/docs/src/main/resources/examples/classpath.md
deleted file mode 100644
index 7ed7381..0000000
--- a/docs/src/main/resources/examples/classpath.md
+++ /dev/null
@@ -1,69 +0,0 @@
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
----
-title: Apache Accumulo Classpath Example
----
-
-This example shows how to use per table classpaths. The example leverages a
-test jar which contains a Filter that supresses rows containing "foo". The
-example shows copying the FooFilter.jar into HDFS and then making an Accumulo
-table reference that jar.
-
-
-Execute the following command in the shell.
-
-    $ hadoop fs -copyFromLocal /path/to/accumulo/test/src/test/resources/FooFilter.jar /user1/lib
-
-Execute following in Accumulo shell to setup classpath context
-
-    root@test15> config -s general.vfs.context.classpath.cx1=hdfs://<namenode host>:<namenode port>/user1/lib/[^.].*.jar
-
-Create a table
-
-    root@test15> createtable nofoo
-
-The following command makes this table use the configured classpath context
-
-    root@test15 nofoo> config -t nofoo -s table.classpath.context=cx1
-
-The following command configures an iterator thats in FooFilter.jar
-
-    root@test15 nofoo> setiter -n foofilter -p 10 -scan -minc -majc -class org.apache.accumulo.test.FooFilter
-    Filter accepts or rejects each Key/Value pair
-    ----------> set FooFilter parameter negate, default false keeps k/v that pass accept method, true rejects k/v that pass accept method: false
-
-The commands below show the filter is working.
-
-    root@test15 nofoo> insert foo1 f1 q1 v1
-    root@test15 nofoo> insert noo1 f1 q1 v2
-    root@test15 nofoo> scan
-    noo1 f1:q1 []    v2
-    root@test15 nofoo>
-
-Below, an attempt is made to add the FooFilter to a table thats not configured
-to use the clasppath context cx1. This fails util the table is configured to
-use cx1.
-
-    root@test15 nofoo> createtable nofootwo
-    root@test15 nofootwo> setiter -n foofilter -p 10 -scan -minc -majc -class org.apache.accumulo.test.FooFilter
-    2013-05-03 12:49:35,943 [shell.Shell] ERROR: java.lang.IllegalArgumentException: org.apache.accumulo.test.FooFilter
-    root@test15 nofootwo> config -t nofootwo -s table.classpath.context=cx1
-    root@test15 nofootwo> setiter -n foofilter -p 10 -scan -minc -majc -class org.apache.accumulo.test.FooFilter
-    Filter accepts or rejects each Key/Value pair
-    ----------> set FooFilter parameter negate, default false keeps k/v that pass accept method, true rejects k/v that pass accept method: false
-
-

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/resources/examples/client.md
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/client.md b/docs/src/main/resources/examples/client.md
deleted file mode 100644
index b07ae8e..0000000
--- a/docs/src/main/resources/examples/client.md
+++ /dev/null
@@ -1,81 +0,0 @@
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
----
-title: Apache Accumulo Client Examples
----
-
-This documents how you run the simplest java examples.
-
-This tutorial uses the following Java classes, which can be found in org.apache.accumulo.examples.simple.client in the examples-simple module:
-
- * Flush.java - flushes a table
- * RowOperations.java - reads and writes rows
- * ReadWriteExample.java - creates a table, writes to it, and reads from it
-
-Using the accumulo command, you can run the simple client examples by providing their
-class name, and enough arguments to find your accumulo instance. For example,
-the Flush class will flush a table:
-
-    $ PACKAGE=org.apache.accumulo.examples.simple.client
-    $ bin/accumulo $PACKAGE.Flush -u root -p mypassword -i instance -z zookeeper -t trace
-
-The very simple RowOperations class demonstrates how to read and write rows using the BatchWriter
-and Scanner:
-
-    $ bin/accumulo $PACKAGE.RowOperations -u root -p mypassword -i instance -z zookeeper
-    2013-01-14 14:45:24,738 [client.RowOperations] INFO : This is everything
-    2013-01-14 14:45:24,744 [client.RowOperations] INFO : Key: row1 column:1 [] 1358192724640 false Value: This is the value for this key
-    2013-01-14 14:45:24,744 [client.RowOperations] INFO : Key: row1 column:2 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,744 [client.RowOperations] INFO : Key: row1 column:3 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,744 [client.RowOperations] INFO : Key: row1 column:4 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,746 [client.RowOperations] INFO : Key: row2 column:1 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,746 [client.RowOperations] INFO : Key: row2 column:2 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,746 [client.RowOperations] INFO : Key: row2 column:3 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,746 [client.RowOperations] INFO : Key: row2 column:4 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,747 [client.RowOperations] INFO : Key: row3 column:1 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,747 [client.RowOperations] INFO : Key: row3 column:2 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,747 [client.RowOperations] INFO : Key: row3 column:3 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,747 [client.RowOperations] INFO : Key: row3 column:4 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,756 [client.RowOperations] INFO : This is row1 and row3
-    2013-01-14 14:45:24,757 [client.RowOperations] INFO : Key: row1 column:1 [] 1358192724640 false Value: This is the value for this key
-    2013-01-14 14:45:24,757 [client.RowOperations] INFO : Key: row1 column:2 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,757 [client.RowOperations] INFO : Key: row1 column:3 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,757 [client.RowOperations] INFO : Key: row1 column:4 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,761 [client.RowOperations] INFO : Key: row3 column:1 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,761 [client.RowOperations] INFO : Key: row3 column:2 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,761 [client.RowOperations] INFO : Key: row3 column:3 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,761 [client.RowOperations] INFO : Key: row3 column:4 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,765 [client.RowOperations] INFO : This is just row3
-    2013-01-14 14:45:24,769 [client.RowOperations] INFO : Key: row3 column:1 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,770 [client.RowOperations] INFO : Key: row3 column:2 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,770 [client.RowOperations] INFO : Key: row3 column:3 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,770 [client.RowOperations] INFO : Key: row3 column:4 [] 1358192724642 false Value: This is the value for this key
-
-To create a table, write to it and read from it:
-
-    $ bin/accumulo $PACKAGE.ReadWriteExample -u root -p mypassword -i instance -z zookeeper --createtable --create --read
-    hello%00; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-    hello%01; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-    hello%02; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-    hello%03; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-    hello%04; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-    hello%05; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-    hello%06; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-    hello%07; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-    hello%08; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-    hello%09; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/resources/examples/combiner.md
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/combiner.md b/docs/src/main/resources/examples/combiner.md
deleted file mode 100644
index 03841d3..0000000
--- a/docs/src/main/resources/examples/combiner.md
+++ /dev/null
@@ -1,72 +0,0 @@
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
----
-title: Apache Accumulo Combiner Example
----
-
-This tutorial uses the following Java class, which can be found in org.apache.accumulo.examples.simple.combiner in the examples-simple module:
-
- * StatsCombiner.java - a combiner that calculates max, min, sum, and count
-
-This is a simple combiner example. To build this example run maven and then
-copy the produced jar into the accumulo lib dir. This is already done in the
-tar distribution.
-
-    $ bin/accumulo shell -u username
-    Enter current password for 'username'@'instance': ***
-
-    Shell - Apache Accumulo Interactive Shell
-    -
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> createtable runners
-    username@instance runners> setiter -t runners -p 10 -scan -minc -majc -n decStats -class org.apache.accumulo.examples.simple.combiner.StatsCombiner
-    Combiner that keeps track of min, max, sum, and count
-    ----------> set StatsCombiner parameter all, set to true to apply Combiner to every column, otherwise leave blank. if true, columns option will be ignored.:
-    ----------> set StatsCombiner parameter columns, <col fam>[:<col qual>]{,<col fam>[:<col qual>]} escape non aplhanum chars using %<hex>.: stat
-    ----------> set StatsCombiner parameter radix, radix/base of the numbers: 10
-    username@instance runners> setiter -t runners -p 11 -scan -minc -majc -n hexStats -class org.apache.accumulo.examples.simple.combiner.StatsCombiner
-    Combiner that keeps track of min, max, sum, and count
-    ----------> set StatsCombiner parameter all, set to true to apply Combiner to every column, otherwise leave blank. if true, columns option will be ignored.:
-    ----------> set StatsCombiner parameter columns, <col fam>[:<col qual>]{,<col fam>[:<col qual>]} escape non aplhanum chars using %<hex>.: hstat
-    ----------> set StatsCombiner parameter radix, radix/base of the numbers: 16
-    username@instance runners> insert 123456 name first Joe
-    username@instance runners> insert 123456 stat marathon 240
-    username@instance runners> scan
-    123456 name:first []    Joe
-    123456 stat:marathon []    240,240,240,1
-    username@instance runners> insert 123456 stat marathon 230
-    username@instance runners> insert 123456 stat marathon 220
-    username@instance runners> scan
-    123456 name:first []    Joe
-    123456 stat:marathon []    220,240,690,3
-    username@instance runners> insert 123456 hstat virtualMarathon 6a
-    username@instance runners> insert 123456 hstat virtualMarathon 6b
-    username@instance runners> scan
-    123456 hstat:virtualMarathon []    6a,6b,d5,2
-    123456 name:first []    Joe
-    123456 stat:marathon []    220,240,690,3
-
-In this example a table is created and the example stats combiner is applied to
-the column family stat and hstat. The stats combiner computes min,max,sum, and
-count. It can be configured to use a different base or radix. In the example
-above the column family stat is configured for base 10 and the column family
-hstat is configured for base 16.

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/resources/examples/compactionStrategy.md
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/compactionStrategy.md b/docs/src/main/resources/examples/compactionStrategy.md
deleted file mode 100644
index 642c3ea..0000000
--- a/docs/src/main/resources/examples/compactionStrategy.md
+++ /dev/null
@@ -1,67 +0,0 @@
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
----
-title: Apache Accumulo Customizing the Compaction Strategy
----
-
-This tutorial uses the following Java classes, which can be found in org.apache.accumulo.tserver.compaction: 
-
- * DefaultCompactionStrategy.java - determines which files to compact based on table.compaction.major.ratio and table.file.max
- * EverythingCompactionStrategy.java - compacts all files
- * SizeLimitCompactionStrategy.java - compacts files no bigger than table.majc.compaction.strategy.opts.sizeLimit
- * TwoTierCompactionStrategy.java - uses default compression for smaller files and table.majc.compaction.strategy.opts.file.large.compress.type for larger files
-
-This is an example of how to configure a compaction strategy. By default Accumulo will always use the DefaultCompactionStrategy, unless 
-these steps are taken to change the configuration.  Use the strategy and settings that best fits your Accumulo setup. This example shows
-how to configure and test one of the more complicated strategies, the TwoTierCompactionStrategy. Note that this example requires hadoop
-native libraries built with snappy in order to use snappy compression.
-
-To begin, run the command to create a table for testing:
-
-    $ ./bin/accumulo shell -u root -p secret -e "createtable test1"
-
-The command below sets the compression for smaller files and minor compactions for that table.
-
-    $ ./bin/accumulo shell -u root -p secret -e "config -s table.file.compress.type=snappy -t test1"
-
-The commands below will configure the TwoTierCompactionStrategy to use gz compression for files larger than 1M. 
-
-    $ ./bin/accumulo shell -u root -p secret -e "config -s table.majc.compaction.strategy.opts.file.large.compress.threshold=1M -t test1"
-    $ ./bin/accumulo shell -u root -p secret -e "config -s table.majc.compaction.strategy.opts.file.large.compress.type=gz -t test1"
-    $ ./bin/accumulo shell -u root -p secret -e "config -s table.majc.compaction.strategy=org.apache.accumulo.tserver.compaction.TwoTierCompactionStrategy -t test1"
-
-Generate some data and files in order to test the strategy:
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.SequentialBatchWriter -i instance17 -z localhost:2181 -u root -p secret -t test1 --start 0 --num 10000 --size 50 --batchMemory 20M --batchLatency 500 --batchThreads 20
-    $ ./bin/accumulo shell -u root -p secret -e "flush -t test1"
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.SequentialBatchWriter -i instance17 -z localhost:2181 -u root -p secret -t test1 --start 0 --num 11000 --size 50 --batchMemory 20M --batchLatency 500 --batchThreads 20
-    $ ./bin/accumulo shell -u root -p secret -e "flush -t test1"
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.SequentialBatchWriter -i instance17 -z localhost:2181 -u root -p secret -t test1 --start 0 --num 12000 --size 50 --batchMemory 20M --batchLatency 500 --batchThreads 20
-    $ ./bin/accumulo shell -u root -p secret -e "flush -t test1"
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.SequentialBatchWriter -i instance17 -z localhost:2181 -u root -p secret -t test1 --start 0 --num 13000 --size 50 --batchMemory 20M --batchLatency 500 --batchThreads 20
-    $ ./bin/accumulo shell -u root -p secret -e "flush -t test1"
-
-View the tserver log in <accumulo_home>/logs for the compaction and find the name of the <rfile> that was compacted for your table. Print info about this file using the PrintInfo tool:
-
-    $ ./bin/accumulo rfile-info <rfile>
-
-Details about the rfile will be printed and the compression type should match the type used in the compaction...
-Meta block     : RFile.index
-      Raw size             : 512 bytes
-      Compressed size      : 278 bytes
-      Compression type     : gz
-

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/resources/examples/constraints.md
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/constraints.md b/docs/src/main/resources/examples/constraints.md
deleted file mode 100644
index 4f23aab..0000000
--- a/docs/src/main/resources/examples/constraints.md
+++ /dev/null
@@ -1,56 +0,0 @@
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
----
-title: Apache Accumulo Constraints Example
----
-
-This tutorial uses the following Java classes, which can be found in org.apache.accumulo.examples.simple.constraints in the examples-simple module:
-
- * AlphaNumKeyConstraint.java - a constraint that requires alphanumeric keys
- * NumericValueConstraint.java - a constraint that requires numeric string values
-
-This an example of how to create a table with constraints. Below a table is
-created with two example constraints. One constraints does not allow non alpha
-numeric keys. The other constraint does not allow non numeric values. Two
-inserts that violate these constraints are attempted and denied. The scan at
-the end shows the inserts were not allowed.
-
-    $ ./bin/accumulo shell -u username -p password
-
-    Shell - Apache Accumulo Interactive Shell
-    -
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> createtable testConstraints
-    username@instance testConstraints> constraint -a org.apache.accumulo.examples.simple.constraints.NumericValueConstraint
-    username@instance testConstraints> constraint -a org.apache.accumulo.examples.simple.constraints.AlphaNumKeyConstraint
-    username@instance testConstraints> insert r1 cf1 cq1 1111
-    username@instance testConstraints> insert r1 cf1 cq1 ABC
-      Constraint Failures:
-          ConstraintViolationSummary(constrainClass:org.apache.accumulo.examples.simple.constraints.NumericValueConstraint, violationCode:1, violationDescription:Value is not numeric, numberOfViolatingMutations:1)
-    username@instance testConstraints> insert r1! cf1 cq1 ABC
-      Constraint Failures:
-          ConstraintViolationSummary(constrainClass:org.apache.accumulo.examples.simple.constraints.NumericValueConstraint, violationCode:1, violationDescription:Value is not numeric, numberOfViolatingMutations:1)
-          ConstraintViolationSummary(constrainClass:org.apache.accumulo.examples.simple.constraints.AlphaNumKeyConstraint, violationCode:1, violationDescription:Row was not alpha numeric, numberOfViolatingMutations:1)
-    username@instance testConstraints> scan
-    r1 cf1:cq1 []    1111
-    username@instance testConstraints>
-

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/resources/examples/dirlist.md
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/dirlist.md b/docs/src/main/resources/examples/dirlist.md
deleted file mode 100644
index 1b6a15c..0000000
--- a/docs/src/main/resources/examples/dirlist.md
+++ /dev/null
@@ -1,118 +0,0 @@
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
----
-title: Apache Accumulo File System Archive
----
-
-This example stores filesystem information in accumulo. The example stores the information in the following three tables. More information about the table structures can be found at the end of this document.
-
- * directory table : This table stores information about the filesystem directory structure.
- * index table     : This table stores a file name index. It can be used to quickly find files with given name, suffix, or prefix.
- * data table      : This table stores the file data. File with duplicate data are only stored once.
-
-This example shows how to use Accumulo to store a file system history. It has the following classes:
-
- * Ingest.java - Recursively lists the files and directories under a given path, ingests their names and file info into one Accumulo table, indexes the file names in a separate table, and the file data into a third table.
- * QueryUtil.java - Provides utility methods for getting the info for a file, listing the contents of a directory, and performing single wild card searches on file or directory names.
- * Viewer.java - Provides a GUI for browsing the file system information stored in Accumulo.
- * FileCount.java - Computes recursive counts over file system information and stores them back into the same Accumulo table.
-
-To begin, ingest some data with Ingest.java.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.Ingest -i instance -z zookeepers -u username -p password --vis exampleVis --chunkSize 100000 /local/username/workspace
-
-This may take some time if there are large files in the /local/username/workspace directory. If you use 0 instead of 100000 on the command line, the ingest will run much faster, but it will not put any file data into Accumulo (the dataTable will be empty).
-Note that running this example will create tables dirTable, indexTable, and dataTable in Accumulo that you should delete when you have completed the example.
-If you modify a file or add new files in the directory ingested (e.g. /local/username/workspace), you can run Ingest again to add new information into the Accumulo tables.
-
-To browse the data ingested, use Viewer.java. Be sure to give the "username" user the authorizations to see the data (in this case, run
-
-    $ ./bin/accumulo shell -u root -e 'setauths -u username -s exampleVis'
-
-then run the Viewer:
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.Viewer -i instance -z zookeepers -u username -p password -t dirTable --dataTable dataTable --auths exampleVis --path /local/username/workspace
-
-To list the contents of specific directories, use QueryUtil.java.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t dirTable --auths exampleVis --path /local/username
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t dirTable --auths exampleVis --path /local/username/workspace
-
-To perform searches on file or directory names, also use QueryUtil.java. Search terms must contain no more than one wild card and cannot contain "/".
-*Note* these queries run on the _indexTable_ table instead of the dirTable table.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t indexTable --auths exampleVis --path filename --search
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t indexTable --auths exampleVis --path 'filename*' --search
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t indexTable --auths exampleVis --path '*jar' --search
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t indexTable --auths exampleVis --path 'filename*jar' --search
-
-To count the number of direct children (directories and files) and descendants (children and children's descendants, directories and files), run the FileCount over the dirTable table.
-The results are written back to the same table. FileCount reads from and writes to Accumulo. This requires scan authorizations for the read and a visibility for the data written.
-In this example, the authorizations and visibility are set to the same value, exampleVis. See the [visibility example][vis] for more information on visibility and authorizations.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.FileCount -i instance -z zookeepers -u username -p password -t dirTable --auths exampleVis
-
-## Directory Table
-
-Here is a illustration of what data looks like in the directory table:
-
-    row colf:colq [vis]	value
-    000 dir:exec [exampleVis]    true
-    000 dir:hidden [exampleVis]    false
-    000 dir:lastmod [exampleVis]    1291996886000
-    000 dir:length [exampleVis]    1666
-    001/local dir:exec [exampleVis]    true
-    001/local dir:hidden [exampleVis]    false
-    001/local dir:lastmod [exampleVis]    1304945270000
-    001/local dir:length [exampleVis]    272
-    002/local/Accumulo.README \x7F\xFF\xFE\xCFH\xA1\x82\x97:exec [exampleVis]    false
-    002/local/Accumulo.README \x7F\xFF\xFE\xCFH\xA1\x82\x97:hidden [exampleVis]    false
-    002/local/Accumulo.README \x7F\xFF\xFE\xCFH\xA1\x82\x97:lastmod [exampleVis]    1308746481000
-    002/local/Accumulo.README \x7F\xFF\xFE\xCFH\xA1\x82\x97:length [exampleVis]    9192
-    002/local/Accumulo.README \x7F\xFF\xFE\xCFH\xA1\x82\x97:md5 [exampleVis]    274af6419a3c4c4a259260ac7017cbf1
-
-The rows are of the form depth + path, where depth is the number of slashes ("/") in the path padded to 3 digits. This is so that all the children of a directory appear as consecutive keys in Accumulo; without the depth, you would for example see all the subdirectories of /local before you saw /usr.
-For directories the column family is "dir". For files the column family is Long.MAX_VALUE - lastModified in bytes rather than string format so that newer versions sort earlier.
-
-## Index Table
-
-Here is an illustration of what data looks like in the index table:
-
-    row colf:colq [vis]
-    fAccumulo.README i:002/local/Accumulo.README [exampleVis]
-    flocal i:001/local [exampleVis]
-    rEMDAER.olumuccA i:002/local/Accumulo.README [exampleVis]
-    rlacol i:001/local [exampleVis]
-
-The values of the index table are null. The rows are of the form "f" + filename or "r" + reverse file name. This is to enable searches with wildcards at the beginning, middle, or end.
-
-## Data Table
-
-Here is an illustration of what data looks like in the data table:
-
-    row colf:colq [vis]	value
-    274af6419a3c4c4a259260ac7017cbf1 refs:e77276a2b56e5c15b540eaae32b12c69\x00filext [exampleVis]    README
-    274af6419a3c4c4a259260ac7017cbf1 refs:e77276a2b56e5c15b540eaae32b12c69\x00name [exampleVis]    /local/Accumulo.README
-    274af6419a3c4c4a259260ac7017cbf1 ~chunk:\x00\x0FB@\x00\x00\x00\x00 [exampleVis]    *******************************************************************************\x0A1. Building\x0A\x0AIn the normal tarball release of accumulo, [truncated]
-    274af6419a3c4c4a259260ac7017cbf1 ~chunk:\x00\x0FB@\x00\x00\x00\x01 [exampleVis]
-
-The rows are the md5 hash of the file. Some column family : column qualifier pairs are "refs" : hash of file name + null byte + property name, in which case the value is property value. There can be multiple references to the same file which are distinguished by the hash of the file name.
-Other column family : column qualifier pairs are "~chunk" : chunk size in bytes + chunk number in bytes, in which case the value is the bytes for that chunk of the file. There is an end of file data marker whose chunk number is the number of chunks for the file and whose value is empty.
-
-There may exist multiple copies of the same file (with the same md5 hash) with different chunk sizes or different visibilities. There is an iterator that can be set on the data table that combines these copies into a single copy with a visibility taken from the visibilities of the file references, e.g. (vis from ref1)|(vis from ref2).
-
-[vis]: visibility.md

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/resources/examples/export.md
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/export.md b/docs/src/main/resources/examples/export.md
deleted file mode 100644
index beb7b99..0000000
--- a/docs/src/main/resources/examples/export.md
+++ /dev/null
@@ -1,93 +0,0 @@
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
----
-title: Apache Accumulo Export/Import Example
----
-
-Accumulo provides a mechanism to export and import tables. This example shows
-how to use this feature.
-
-The shell session below shows creating a table, inserting data, and exporting
-the table. A table must be offline to export it, and it should remain offline
-for the duration of the distcp. An easy way to take a table offline without
-interuppting access to it is to clone it and take the clone offline.
-
-    root@test15> createtable table1
-    root@test15 table1> insert a cf1 cq1 v1
-    root@test15 table1> insert h cf1 cq1 v2
-    root@test15 table1> insert z cf1 cq1 v3
-    root@test15 table1> insert z cf1 cq2 v4
-    root@test15 table1> addsplits -t table1 b r
-    root@test15 table1> scan
-    a cf1:cq1 []    v1
-    h cf1:cq1 []    v2
-    z cf1:cq1 []    v3
-    z cf1:cq2 []    v4
-    root@test15> config -t table1 -s table.split.threshold=100M
-    root@test15 table1> clonetable table1 table1_exp
-    root@test15 table1> offline table1_exp
-    root@test15 table1> exporttable -t table1_exp /tmp/table1_export
-    root@test15 table1> quit
-
-After executing the export command, a few files are created in the hdfs dir.
-One of the files is a list of files to distcp as shown below.
-
-    $ hadoop fs -ls /tmp/table1_export
-    Found 2 items
-    -rw-r--r--   3 user supergroup        162 2012-07-25 09:56 /tmp/table1_export/distcp.txt
-    -rw-r--r--   3 user supergroup        821 2012-07-25 09:56 /tmp/table1_export/exportMetadata.zip
-    $ hadoop fs -cat /tmp/table1_export/distcp.txt
-    hdfs://n1.example.com:6093/accumulo/tables/3/default_tablet/F0000000.rf
-    hdfs://n1.example.com:6093/tmp/table1_export/exportMetadata.zip
-
-Before the table can be imported, it must be copied using distcp. After the
-discp completed, the cloned table may be deleted.
-
-    $ hadoop distcp -f /tmp/table1_export/distcp.txt /tmp/table1_export_dest
-
-The Accumulo shell session below shows importing the table and inspecting it.
-The data, splits, config, and logical time information for the table were
-preserved.
-
-    root@test15> importtable table1_copy /tmp/table1_export_dest
-    root@test15> table table1_copy
-    root@test15 table1_copy> scan
-    a cf1:cq1 []    v1
-    h cf1:cq1 []    v2
-    z cf1:cq1 []    v3
-    z cf1:cq2 []    v4
-    root@test15 table1_copy> getsplits -t table1_copy
-    b
-    r
-    root@test15> config -t table1_copy -f split
-    ---------+--------------------------+-------------------------------------------
-    SCOPE    | NAME                     | VALUE
-    ---------+--------------------------+-------------------------------------------
-    default  | table.split.threshold .. | 1G
-    table    |    @override ........... | 100M
-    ---------+--------------------------+-------------------------------------------
-    root@test15> tables -l
-    accumulo.metadata    =>        !0
-    accumulo.root        =>        +r
-    table1_copy          =>         5
-    trace                =>         1
-    root@test15 table1_copy> scan -t accumulo.metadata -b 5 -c srv:time
-    5;b srv:time []    M1343224500467
-    5;r srv:time []    M1343224500467
-    5< srv:time []    M1343224500467
-
-

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/resources/examples/filedata.md
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/filedata.md b/docs/src/main/resources/examples/filedata.md
deleted file mode 100644
index 6de2f0a..0000000
--- a/docs/src/main/resources/examples/filedata.md
+++ /dev/null
@@ -1,51 +0,0 @@
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
----
-title: Apache Accumulo File System Archive Example (Data Only)
----
-
-This example archives file data into an Accumulo table. Files with duplicate data are only stored once.
-The example has the following classes:
-
- * CharacterHistogram - A MapReduce that computes a histogram of byte frequency for each file and stores the histogram alongside the file data. An example use of the ChunkInputFormat.
- * ChunkCombiner - An Iterator that dedupes file data and sets their visibilities to a combined visibility based on current references to the file data.
- * ChunkInputFormat - An Accumulo InputFormat that provides keys containing file info (List<Entry<Key,Value>>) and values with an InputStream over the file (ChunkInputStream).
- * ChunkInputStream - An input stream over file data stored in Accumulo.
- * FileDataIngest - Takes a list of files and archives them into Accumulo keyed on hashes of the files.
- * FileDataQuery - Retrieves file data based on the hash of the file. (Used by the dirlist.Viewer.)
- * KeyUtil - A utility for creating and parsing null-byte separated strings into/from Text objects.
- * VisibilityCombiner - A utility for merging visibilities into the form (VIS1)|(VIS2)|...
-
-This example is coupled with the [dirlist example][dirlist].
-
-If you haven't already run the [dirlist example][dirlist], ingest a file with FileDataIngest.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.filedata.FileDataIngest -i instance -z zookeepers -u username -p password -t dataTable --auths exampleVis --chunk 1000 /path/to/accumulo/README.md
-
-Open the accumulo shell and look at the data. The row is the MD5 hash of the file, which you can verify by running a command such as 'md5sum' on the file.
-
-    > scan -t dataTable
-
-Run the CharacterHistogram MapReduce to add some information about the file.
-
-    $ ./contrib/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.filedata.CharacterHistogram -i instance -z zookeepers -u username -p password -t dataTable --auths exampleVis --vis exampleVis
-
-Scan again to see the histogram stored in the 'info' column family.
-
-    > scan -t dataTable
-
-[dirlist]: dirlist.md
\ No newline at end of file


[2/7] accumulo git commit: ACCUMULO-4511 Removed Accumulo Examples

Posted by mw...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Index.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Index.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Index.java
deleted file mode 100644
index ba1e32e..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Index.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.shard;
-
-import java.io.File;
-import java.io.FileReader;
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ClientOnRequiredTable;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.hadoop.io.Text;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * This program indexes a set of documents given on the command line into a shard table.
- *
- * What it writes to the table is row = partition id, column family = term, column qualifier = document id.
- *
- * See docs/examples/README.shard for instructions.
- */
-
-public class Index {
-
-  static Text genPartition(int partition) {
-    return new Text(String.format("%08x", Math.abs(partition)));
-  }
-
-  public static void index(int numPartitions, Text docId, String doc, String splitRegex, BatchWriter bw) throws Exception {
-
-    String[] tokens = doc.split(splitRegex);
-
-    Text partition = genPartition(doc.hashCode() % numPartitions);
-
-    Mutation m = new Mutation(partition);
-
-    HashSet<String> tokensSeen = new HashSet<>();
-
-    for (String token : tokens) {
-      token = token.toLowerCase();
-
-      if (!tokensSeen.contains(token)) {
-        tokensSeen.add(token);
-        m.put(new Text(token), docId, new Value(new byte[0]));
-      }
-    }
-
-    if (m.size() > 0)
-      bw.addMutation(m);
-  }
-
-  public static void index(int numPartitions, File src, String splitRegex, BatchWriter bw) throws Exception {
-    if (src.isDirectory()) {
-      File[] files = src.listFiles();
-      if (files != null) {
-        for (File child : files) {
-          index(numPartitions, child, splitRegex, bw);
-        }
-      }
-    } else {
-      FileReader fr = new FileReader(src);
-
-      StringBuilder sb = new StringBuilder();
-
-      char data[] = new char[4096];
-      int len;
-      while ((len = fr.read(data)) != -1) {
-        sb.append(data, 0, len);
-      }
-
-      fr.close();
-
-      index(numPartitions, new Text(src.getAbsolutePath()), sb.toString(), splitRegex, bw);
-    }
-
-  }
-
-  static class Opts extends ClientOnRequiredTable {
-    @Parameter(names = "--partitions", required = true, description = "the number of shards to create")
-    int partitions;
-    @Parameter(required = true, description = "<file> { <file> ... }")
-    List<String> files = new ArrayList<>();
-  }
-
-  public static void main(String[] args) throws Exception {
-    Opts opts = new Opts();
-    BatchWriterOpts bwOpts = new BatchWriterOpts();
-    opts.parseArgs(Index.class.getName(), args, bwOpts);
-
-    String splitRegex = "\\W+";
-
-    BatchWriter bw = opts.getConnector().createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
-    for (String filename : opts.files) {
-      index(opts.partitions, new File(filename), splitRegex, bw);
-    }
-    bw.close();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Query.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Query.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Query.java
deleted file mode 100644
index 13adcca..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Query.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.shard;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.accumulo.core.cli.BatchScannerOpts;
-import org.apache.accumulo.core.cli.ClientOnRequiredTable;
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.sample.SamplerConfiguration;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.user.IntersectingIterator;
-import org.apache.hadoop.io.Text;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * This program queries a set of terms in the shard table (populated by {@link Index}) using the {@link IntersectingIterator}.
- *
- * See docs/examples/README.shard for instructions.
- */
-
-public class Query {
-
-  static class Opts extends ClientOnRequiredTable {
-    @Parameter(description = " term { <term> ... }")
-    List<String> terms = new ArrayList<>();
-
-    @Parameter(names = {"--sample"}, description = "Do queries against sample, useful when sample is built using column qualifier")
-    private boolean useSample = false;
-
-    @Parameter(names = {"--sampleCutoff"},
-        description = "Use sample data to determine if a query might return a number of documents over the cutoff.  This check is per tablet.")
-    private Integer sampleCutoff = null;
-  }
-
-  public static List<String> query(BatchScanner bs, List<String> terms, Integer cutoff) {
-
-    Text columns[] = new Text[terms.size()];
-    int i = 0;
-    for (String term : terms) {
-      columns[i++] = new Text(term);
-    }
-
-    IteratorSetting ii;
-
-    if (cutoff != null) {
-      ii = new IteratorSetting(20, "ii", CutoffIntersectingIterator.class);
-      CutoffIntersectingIterator.setCutoff(ii, cutoff);
-    } else {
-      ii = new IteratorSetting(20, "ii", IntersectingIterator.class);
-    }
-
-    IntersectingIterator.setColumnFamilies(ii, columns);
-    bs.addScanIterator(ii);
-    bs.setRanges(Collections.singleton(new Range()));
-    List<String> result = new ArrayList<>();
-    for (Entry<Key,Value> entry : bs) {
-      result.add(entry.getKey().getColumnQualifier().toString());
-    }
-    return result;
-  }
-
-  public static void main(String[] args) throws Exception {
-    Opts opts = new Opts();
-    BatchScannerOpts bsOpts = new BatchScannerOpts();
-    opts.parseArgs(Query.class.getName(), args, bsOpts);
-    Connector conn = opts.getConnector();
-    BatchScanner bs = conn.createBatchScanner(opts.getTableName(), opts.auths, bsOpts.scanThreads);
-    bs.setTimeout(bsOpts.scanTimeout, TimeUnit.MILLISECONDS);
-    if (opts.useSample) {
-      SamplerConfiguration samplerConfig = conn.tableOperations().getSamplerConfiguration(opts.getTableName());
-      CutoffIntersectingIterator.validateSamplerConfig(conn.tableOperations().getSamplerConfiguration(opts.getTableName()));
-      bs.setSamplerConfiguration(samplerConfig);
-    }
-    for (String entry : query(bs, opts.terms, opts.sampleCutoff))
-      System.out.println("  " + entry);
-
-    bs.close();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Reverse.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Reverse.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Reverse.java
deleted file mode 100644
index dbcbe5f..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/Reverse.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.shard;
-
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ClientOpts;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.hadoop.io.Text;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * The program reads an accumulo table written by {@link Index} and writes out to another table. It writes out a mapping of documents to terms. The document to
- * term mapping is used by {@link ContinuousQuery}.
- *
- * See docs/examples/README.shard for instructions.
- */
-
-public class Reverse {
-
-  static class Opts extends ClientOpts {
-    @Parameter(names = "--shardTable")
-    String shardTable = "shard";
-    @Parameter(names = "--doc2Term")
-    String doc2TermTable = "doc2Term";
-  }
-
-  public static void main(String[] args) throws Exception {
-    Opts opts = new Opts();
-    ScannerOpts scanOpts = new ScannerOpts();
-    BatchWriterOpts bwOpts = new BatchWriterOpts();
-    opts.parseArgs(Reverse.class.getName(), args, scanOpts, bwOpts);
-
-    Connector conn = opts.getConnector();
-
-    Scanner scanner = conn.createScanner(opts.shardTable, opts.auths);
-    scanner.setBatchSize(scanOpts.scanBatchSize);
-    BatchWriter bw = conn.createBatchWriter(opts.doc2TermTable, bwOpts.getBatchWriterConfig());
-
-    for (Entry<Key,Value> entry : scanner) {
-      Key key = entry.getKey();
-      Mutation m = new Mutation(key.getColumnQualifier());
-      m.put(key.getColumnFamily(), new Text(), new Value(new byte[0]));
-      bw.addMutation(m);
-    }
-
-    bw.close();
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shell/DebugCommand.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shell/DebugCommand.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shell/DebugCommand.java
deleted file mode 100644
index 4395fe7..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shell/DebugCommand.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.shell;
-
-import java.util.Set;
-import java.util.TreeSet;
-
-import org.apache.accumulo.shell.Shell;
-import org.apache.accumulo.shell.Shell.Command;
-import org.apache.commons.cli.CommandLine;
-
-public class DebugCommand extends Command {
-
-  @Override
-  public int execute(String fullCommand, CommandLine cl, Shell shellState) throws Exception {
-    Set<String> lines = new TreeSet<>();
-    lines.add("This is a test");
-    shellState.printLines(lines.iterator(), true);
-    return 0;
-  }
-
-  @Override
-  public String description() {
-    return "prints a message to test extension feature";
-  }
-
-  @Override
-  public int numArgs() {
-    return 0;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shell/ExampleShellExtension.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shell/ExampleShellExtension.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shell/ExampleShellExtension.java
deleted file mode 100644
index bcd6690..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shell/ExampleShellExtension.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.shell;
-
-import org.apache.accumulo.shell.Shell.Command;
-import org.apache.accumulo.shell.ShellExtension;
-
-import com.google.auto.service.AutoService;
-
-@AutoService(ShellExtension.class)
-public class ExampleShellExtension extends ShellExtension {
-
-  @Override
-  public String getExtensionName() {
-    return "ExampleShellExtension";
-  }
-
-  @Override
-  public Command[] getCommands() {
-    return new Command[] {new DebugCommand()};
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/test/java/org/apache/accumulo/examples/simple/constraints/AlphaNumKeyConstraintTest.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/constraints/AlphaNumKeyConstraintTest.java b/examples/simple/src/test/java/org/apache/accumulo/examples/simple/constraints/AlphaNumKeyConstraintTest.java
deleted file mode 100644
index 8ef3f0f..0000000
--- a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/constraints/AlphaNumKeyConstraintTest.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.constraints;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-import com.google.common.collect.ImmutableList;
-
-public class AlphaNumKeyConstraintTest {
-
-  private AlphaNumKeyConstraint ankc = new AlphaNumKeyConstraint();
-
-  @Test
-  public void test() {
-    Mutation goodMutation = new Mutation(new Text("Row1"));
-    goodMutation.put(new Text("Colf2"), new Text("ColQ3"), new Value("value".getBytes()));
-    assertNull(ankc.check(null, goodMutation));
-
-    // Check that violations are in row, cf, cq order
-    Mutation badMutation = new Mutation(new Text("Row#1"));
-    badMutation.put(new Text("Colf$2"), new Text("Colq%3"), new Value("value".getBytes()));
-    assertEquals(ImmutableList.of(AlphaNumKeyConstraint.NON_ALPHA_NUM_ROW, AlphaNumKeyConstraint.NON_ALPHA_NUM_COLF, AlphaNumKeyConstraint.NON_ALPHA_NUM_COLQ),
-        ankc.check(null, badMutation));
-  }
-
-  @Test
-  public void testGetViolationDescription() {
-    assertEquals(AlphaNumKeyConstraint.ROW_VIOLATION_MESSAGE, ankc.getViolationDescription(AlphaNumKeyConstraint.NON_ALPHA_NUM_ROW));
-    assertEquals(AlphaNumKeyConstraint.COLF_VIOLATION_MESSAGE, ankc.getViolationDescription(AlphaNumKeyConstraint.NON_ALPHA_NUM_COLF));
-    assertEquals(AlphaNumKeyConstraint.COLQ_VIOLATION_MESSAGE, ankc.getViolationDescription(AlphaNumKeyConstraint.NON_ALPHA_NUM_COLQ));
-    assertNull(ankc.getViolationDescription((short) 4));
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/test/java/org/apache/accumulo/examples/simple/constraints/NumericValueConstraintTest.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/constraints/NumericValueConstraintTest.java b/examples/simple/src/test/java/org/apache/accumulo/examples/simple/constraints/NumericValueConstraintTest.java
deleted file mode 100644
index 7d1fc49..0000000
--- a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/constraints/NumericValueConstraintTest.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.constraints;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-import com.google.common.collect.Iterables;
-
-public class NumericValueConstraintTest {
-
-  private NumericValueConstraint nvc = new NumericValueConstraint();
-
-  @Test
-  public void testCheck() {
-    Mutation goodMutation = new Mutation(new Text("r"));
-    goodMutation.put(new Text("cf"), new Text("cq"), new Value("1234".getBytes()));
-    assertNull(nvc.check(null, goodMutation));
-
-    // Check that multiple bad mutations result in one violation only
-    Mutation badMutation = new Mutation(new Text("r"));
-    badMutation.put(new Text("cf"), new Text("cq"), new Value("foo1234".getBytes()));
-    badMutation.put(new Text("cf2"), new Text("cq2"), new Value("foo1234".getBytes()));
-    assertEquals(NumericValueConstraint.NON_NUMERIC_VALUE, Iterables.getOnlyElement(nvc.check(null, badMutation)).shortValue());
-  }
-
-  @Test
-  public void testGetViolationDescription() {
-    assertEquals(NumericValueConstraint.VIOLATION_MESSAGE, nvc.getViolationDescription(NumericValueConstraint.NON_NUMERIC_VALUE));
-    assertNull(nvc.getViolationDescription((short) 2));
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkCombinerTest.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkCombinerTest.java b/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkCombinerTest.java
deleted file mode 100644
index 40f4bb9..0000000
--- a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkCombinerTest.java
+++ /dev/null
@@ -1,258 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.filedata;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.SortedMap;
-import java.util.TreeMap;
-
-import junit.framework.TestCase;
-
-import org.apache.accumulo.core.data.ByteSequence;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.PartialKey;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.IteratorEnvironment;
-import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
-
-public class ChunkCombinerTest extends TestCase {
-
-  public static class MapIterator implements SortedKeyValueIterator<Key,Value> {
-    private Iterator<Entry<Key,Value>> iter;
-    private Entry<Key,Value> entry;
-    Collection<ByteSequence> columnFamilies;
-    private SortedMap<Key,Value> map;
-    private Range range;
-
-    @Override
-    public MapIterator deepCopy(IteratorEnvironment env) {
-      return new MapIterator(map);
-    }
-
-    private MapIterator(SortedMap<Key,Value> map) {
-      this.map = map;
-      iter = map.entrySet().iterator();
-      this.range = new Range();
-      if (iter.hasNext())
-        entry = iter.next();
-      else
-        entry = null;
-    }
-
-    @Override
-    public Key getTopKey() {
-      return entry.getKey();
-    }
-
-    @Override
-    public Value getTopValue() {
-      return entry.getValue();
-    }
-
-    @Override
-    public boolean hasTop() {
-      return entry != null;
-    }
-
-    @Override
-    public void next() throws IOException {
-      entry = null;
-      while (iter.hasNext()) {
-        entry = iter.next();
-        if (columnFamilies.size() > 0 && !columnFamilies.contains(entry.getKey().getColumnFamilyData())) {
-          entry = null;
-          continue;
-        }
-        if (range.afterEndKey(entry.getKey()))
-          entry = null;
-        break;
-      }
-    }
-
-    @Override
-    public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
-      if (!inclusive) {
-        throw new IllegalArgumentException("can only do inclusive colf filtering");
-      }
-      this.columnFamilies = columnFamilies;
-      this.range = range;
-
-      Key key = range.getStartKey();
-      if (key == null) {
-        key = new Key();
-      }
-
-      iter = map.tailMap(key).entrySet().iterator();
-      next();
-      while (hasTop() && range.beforeStartKey(getTopKey())) {
-        next();
-      }
-    }
-
-    @Override
-    public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
-      throw new UnsupportedOperationException();
-    }
-  }
-
-  private TreeMap<Key,Value> row1;
-  private TreeMap<Key,Value> row2;
-  private TreeMap<Key,Value> row3;
-  private TreeMap<Key,Value> allRows;
-
-  private TreeMap<Key,Value> cRow1;
-  private TreeMap<Key,Value> cRow2;
-  private TreeMap<Key,Value> cRow3;
-  private TreeMap<Key,Value> allCRows;
-
-  private TreeMap<Key,Value> cOnlyRow1;
-  private TreeMap<Key,Value> cOnlyRow2;
-  private TreeMap<Key,Value> cOnlyRow3;
-  private TreeMap<Key,Value> allCOnlyRows;
-
-  private TreeMap<Key,Value> badrow;
-
-  @Override
-  protected void setUp() {
-    row1 = new TreeMap<>();
-    row2 = new TreeMap<>();
-    row3 = new TreeMap<>();
-    allRows = new TreeMap<>();
-
-    cRow1 = new TreeMap<>();
-    cRow2 = new TreeMap<>();
-    cRow3 = new TreeMap<>();
-    allCRows = new TreeMap<>();
-
-    cOnlyRow1 = new TreeMap<>();
-    cOnlyRow2 = new TreeMap<>();
-    cOnlyRow3 = new TreeMap<>();
-    allCOnlyRows = new TreeMap<>();
-
-    badrow = new TreeMap<>();
-
-    String refs = FileDataIngest.REFS_CF.toString();
-    String fileext = FileDataIngest.REFS_FILE_EXT;
-    String filename = FileDataIngest.REFS_ORIG_FILE;
-    String chunk_cf = FileDataIngest.CHUNK_CF.toString();
-
-    row1.put(new Key("row1", refs, "hash1\0" + fileext, "C"), new Value("jpg".getBytes()));
-    row1.put(new Key("row1", refs, "hash1\0" + filename, "D"), new Value("foo1.jpg".getBytes()));
-    row1.put(new Key("row1", chunk_cf, "0000", "A"), new Value("V1".getBytes()));
-    row1.put(new Key("row1", chunk_cf, "0000", "B"), new Value("V1".getBytes()));
-    row1.put(new Key("row1", chunk_cf, "0001", "A"), new Value("V2".getBytes()));
-    row1.put(new Key("row1", chunk_cf, "0001", "B"), new Value("V2".getBytes()));
-
-    cRow1.put(new Key("row1", refs, "hash1\0" + fileext, "C"), new Value("jpg".getBytes()));
-    cRow1.put(new Key("row1", refs, "hash1\0" + filename, "D"), new Value("foo1.jpg".getBytes()));
-    cRow1.put(new Key("row1", chunk_cf, "0000", "(C)|(D)"), new Value("V1".getBytes()));
-    cRow1.put(new Key("row1", chunk_cf, "0001", "(C)|(D)"), new Value("V2".getBytes()));
-
-    cOnlyRow1.put(new Key("row1", chunk_cf, "0000", "(C)|(D)"), new Value("V1".getBytes()));
-    cOnlyRow1.put(new Key("row1", chunk_cf, "0001", "(C)|(D)"), new Value("V2".getBytes()));
-
-    row2.put(new Key("row2", refs, "hash1\0" + fileext, "A"), new Value("jpg".getBytes()));
-    row2.put(new Key("row2", refs, "hash1\0" + filename, "B"), new Value("foo1.jpg".getBytes()));
-    row2.put(new Key("row2", chunk_cf, "0000", "A|B"), new Value("V1".getBytes()));
-    row2.put(new Key("row2", chunk_cf, "0000", "A"), new Value("V1".getBytes()));
-    row2.put(new Key("row2", chunk_cf, "0000", "(A)|(B)"), new Value("V1".getBytes()));
-    row2.put(new Key("row2a", chunk_cf, "0000", "C"), new Value("V1".getBytes()));
-
-    cRow2.put(new Key("row2", refs, "hash1\0" + fileext, "A"), new Value("jpg".getBytes()));
-    cRow2.put(new Key("row2", refs, "hash1\0" + filename, "B"), new Value("foo1.jpg".getBytes()));
-    cRow2.put(new Key("row2", chunk_cf, "0000", "(A)|(B)"), new Value("V1".getBytes()));
-
-    cOnlyRow2.put(new Key("row2", chunk_cf, "0000", "(A)|(B)"), new Value("V1".getBytes()));
-
-    row3.put(new Key("row3", refs, "hash1\0w", "(A&B)|(C&(D|E))"), new Value("".getBytes()));
-    row3.put(new Key("row3", refs, "hash1\0x", "A&B"), new Value("".getBytes()));
-    row3.put(new Key("row3", refs, "hash1\0y", "(A&B)"), new Value("".getBytes()));
-    row3.put(new Key("row3", refs, "hash1\0z", "(F|G)&(D|E)"), new Value("".getBytes()));
-    row3.put(new Key("row3", chunk_cf, "0000", "(A&B)|(C&(D|E))", 10), new Value("V1".getBytes()));
-    row3.put(new Key("row3", chunk_cf, "0000", "A&B", 20), new Value("V1".getBytes()));
-    row3.put(new Key("row3", chunk_cf, "0000", "(A&B)", 10), new Value("V1".getBytes()));
-    row3.put(new Key("row3", chunk_cf, "0000", "(F|G)&(D|E)", 10), new Value("V1".getBytes()));
-
-    cRow3.put(new Key("row3", refs, "hash1\0w", "(A&B)|(C&(D|E))"), new Value("".getBytes()));
-    cRow3.put(new Key("row3", refs, "hash1\0x", "A&B"), new Value("".getBytes()));
-    cRow3.put(new Key("row3", refs, "hash1\0y", "(A&B)"), new Value("".getBytes()));
-    cRow3.put(new Key("row3", refs, "hash1\0z", "(F|G)&(D|E)"), new Value("".getBytes()));
-    cRow3.put(new Key("row3", chunk_cf, "0000", "((F|G)&(D|E))|(A&B)|(C&(D|E))", 20), new Value("V1".getBytes()));
-
-    cOnlyRow3.put(new Key("row3", chunk_cf, "0000", "((F|G)&(D|E))|(A&B)|(C&(D|E))", 20), new Value("V1".getBytes()));
-
-    badrow.put(new Key("row1", chunk_cf, "0000", "A"), new Value("V1".getBytes()));
-    badrow.put(new Key("row1", chunk_cf, "0000", "B"), new Value("V2".getBytes()));
-
-    allRows.putAll(row1);
-    allRows.putAll(row2);
-    allRows.putAll(row3);
-
-    allCRows.putAll(cRow1);
-    allCRows.putAll(cRow2);
-    allCRows.putAll(cRow3);
-
-    allCOnlyRows.putAll(cOnlyRow1);
-    allCOnlyRows.putAll(cOnlyRow2);
-    allCOnlyRows.putAll(cOnlyRow3);
-  }
-
-  private static final Collection<ByteSequence> emptyColfs = new HashSet<>();
-
-  public void test1() throws IOException {
-    runTest(false, allRows, allCRows, emptyColfs);
-    runTest(true, allRows, allCRows, emptyColfs);
-    runTest(false, allRows, allCOnlyRows, Collections.singleton(FileDataIngest.CHUNK_CF_BS));
-    runTest(true, allRows, allCOnlyRows, Collections.singleton(FileDataIngest.CHUNK_CF_BS));
-
-    try {
-      runTest(true, badrow, null, emptyColfs);
-      assertNotNull(null);
-    } catch (RuntimeException e) {
-      assertNull(null);
-    }
-  }
-
-  private void runTest(boolean reseek, TreeMap<Key,Value> source, TreeMap<Key,Value> result, Collection<ByteSequence> cols) throws IOException {
-    MapIterator src = new MapIterator(source);
-    SortedKeyValueIterator<Key,Value> iter = new ChunkCombiner();
-    iter.init(src, null, null);
-    iter = iter.deepCopy(null);
-    iter.seek(new Range(), cols, true);
-
-    TreeMap<Key,Value> seen = new TreeMap<>();
-
-    while (iter.hasTop()) {
-      assertFalse("already contains " + iter.getTopKey(), seen.containsKey(iter.getTopKey()));
-      seen.put(new Key(iter.getTopKey()), new Value(iter.getTopValue()));
-
-      if (reseek)
-        iter.seek(new Range(iter.getTopKey().followingKey(PartialKey.ROW_COLFAM_COLQUAL), true, null, true), cols, true);
-      else
-        iter.next();
-    }
-
-    assertEquals(result, seen);
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkInputStreamTest.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkInputStreamTest.java b/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkInputStreamTest.java
deleted file mode 100644
index 2796d47..0000000
--- a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/ChunkInputStreamTest.java
+++ /dev/null
@@ -1,395 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.filedata;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.KeyValue;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.util.PeekingIterator;
-import org.apache.hadoop.io.Text;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class ChunkInputStreamTest {
-  private static final Logger log = LoggerFactory.getLogger(ChunkInputStream.class);
-  private List<Entry<Key,Value>> data;
-  private List<Entry<Key,Value>> baddata;
-  private List<Entry<Key,Value>> multidata;
-
-  @Before
-  public void setupData() {
-    data = new ArrayList<>();
-    addData(data, "a", "refs", "id\0ext", "A&B", "ext");
-    addData(data, "a", "refs", "id\0name", "A&B", "name");
-    addData(data, "a", "~chunk", 100, 0, "A&B", "asdfjkl;");
-    addData(data, "a", "~chunk", 100, 1, "A&B", "");
-    addData(data, "b", "refs", "id\0ext", "A&B", "ext");
-    addData(data, "b", "refs", "id\0name", "A&B", "name");
-    addData(data, "b", "~chunk", 100, 0, "A&B", "qwertyuiop");
-    addData(data, "b", "~chunk", 100, 0, "B&C", "qwertyuiop");
-    addData(data, "b", "~chunk", 100, 1, "A&B", "");
-    addData(data, "b", "~chunk", 100, 1, "B&C", "");
-    addData(data, "b", "~chunk", 100, 1, "D", "");
-    addData(data, "c", "~chunk", 100, 0, "A&B", "asdfjkl;");
-    addData(data, "c", "~chunk", 100, 1, "A&B", "asdfjkl;");
-    addData(data, "c", "~chunk", 100, 2, "A&B", "");
-    addData(data, "d", "~chunk", 100, 0, "A&B", "");
-    addData(data, "e", "~chunk", 100, 0, "A&B", "asdfjkl;");
-    addData(data, "e", "~chunk", 100, 1, "A&B", "");
-    baddata = new ArrayList<>();
-    addData(baddata, "a", "~chunk", 100, 0, "A", "asdfjkl;");
-    addData(baddata, "b", "~chunk", 100, 0, "B", "asdfjkl;");
-    addData(baddata, "b", "~chunk", 100, 2, "C", "");
-    addData(baddata, "c", "~chunk", 100, 0, "D", "asdfjkl;");
-    addData(baddata, "c", "~chunk", 100, 2, "E", "");
-    addData(baddata, "d", "~chunk", 100, 0, "F", "asdfjkl;");
-    addData(baddata, "d", "~chunk", 100, 1, "G", "");
-    addData(baddata, "d", "~zzzzz", "colq", "H", "");
-    addData(baddata, "e", "~chunk", 100, 0, "I", "asdfjkl;");
-    addData(baddata, "e", "~chunk", 100, 1, "J", "");
-    addData(baddata, "e", "~chunk", 100, 2, "I", "asdfjkl;");
-    addData(baddata, "f", "~chunk", 100, 2, "K", "asdfjkl;");
-    addData(baddata, "g", "~chunk", 100, 0, "L", "");
-    multidata = new ArrayList<>();
-    addData(multidata, "a", "~chunk", 100, 0, "A&B", "asdfjkl;");
-    addData(multidata, "a", "~chunk", 100, 1, "A&B", "");
-    addData(multidata, "a", "~chunk", 200, 0, "B&C", "asdfjkl;");
-    addData(multidata, "b", "~chunk", 100, 0, "A&B", "asdfjkl;");
-    addData(multidata, "b", "~chunk", 200, 0, "B&C", "asdfjkl;");
-    addData(multidata, "b", "~chunk", 200, 1, "B&C", "asdfjkl;");
-    addData(multidata, "c", "~chunk", 100, 0, "A&B", "asdfjkl;");
-    addData(multidata, "c", "~chunk", 100, 1, "B&C", "");
-  }
-
-  private static void addData(List<Entry<Key,Value>> data, String row, String cf, String cq, String vis, String value) {
-    data.add(new KeyValue(new Key(new Text(row), new Text(cf), new Text(cq), new Text(vis)), value.getBytes()));
-  }
-
-  private static void addData(List<Entry<Key,Value>> data, String row, String cf, int chunkSize, int chunkCount, String vis, String value) {
-    Text chunkCQ = new Text(FileDataIngest.intToBytes(chunkSize));
-    chunkCQ.append(FileDataIngest.intToBytes(chunkCount), 0, 4);
-    data.add(new KeyValue(new Key(new Text(row), new Text(cf), chunkCQ, new Text(vis)), value.getBytes()));
-  }
-
-  @Test
-  public void testExceptionOnMultipleSetSourceWithoutClose() throws IOException {
-    ChunkInputStream cis = new ChunkInputStream();
-    PeekingIterator<Entry<Key,Value>> pi = new PeekingIterator<>(data.iterator());
-    pi = new PeekingIterator<>(data.iterator());
-    cis.setSource(pi);
-    try {
-      cis.setSource(pi);
-      fail();
-    } catch (IOException e) {
-      /* expected */
-    }
-    cis.close();
-  }
-
-  @Test
-  public void testExceptionOnGetVisBeforeClose() throws IOException {
-    ChunkInputStream cis = new ChunkInputStream();
-    PeekingIterator<Entry<Key,Value>> pi = new PeekingIterator<>(data.iterator());
-
-    cis.setSource(pi);
-    try {
-      cis.getVisibilities();
-      fail();
-    } catch (RuntimeException e) {
-      /* expected */
-    }
-    cis.close();
-    cis.getVisibilities();
-  }
-
-  @Test
-  public void testReadIntoBufferSmallerThanChunks() throws IOException {
-    ChunkInputStream cis = new ChunkInputStream();
-    byte[] b = new byte[5];
-
-    PeekingIterator<Entry<Key,Value>> pi = new PeekingIterator<>(data.iterator());
-
-    cis.setSource(pi);
-    int read;
-    assertEquals(read = cis.read(b), 5);
-    assertEquals(new String(b, 0, read), "asdfj");
-    assertEquals(read = cis.read(b), 3);
-    assertEquals(new String(b, 0, read), "kl;");
-    assertEquals(read = cis.read(b), -1);
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), 5);
-    assertEquals(new String(b, 0, read), "qwert");
-    assertEquals(read = cis.read(b), 5);
-    assertEquals(new String(b, 0, read), "yuiop");
-    assertEquals(read = cis.read(b), -1);
-    assertEquals(cis.getVisibilities().toString(), "[A&B, B&C, D]");
-    cis.close();
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), 5);
-    assertEquals(new String(b, 0, read), "asdfj");
-    assertEquals(read = cis.read(b), 5);
-    assertEquals(new String(b, 0, read), "kl;as");
-    assertEquals(read = cis.read(b), 5);
-    assertEquals(new String(b, 0, read), "dfjkl");
-    assertEquals(read = cis.read(b), 1);
-    assertEquals(new String(b, 0, read), ";");
-    assertEquals(read = cis.read(b), -1);
-    assertEquals(cis.getVisibilities().toString(), "[A&B]");
-    cis.close();
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), -1);
-    cis.close();
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), 5);
-    assertEquals(new String(b, 0, read), "asdfj");
-    assertEquals(read = cis.read(b), 3);
-    assertEquals(new String(b, 0, read), "kl;");
-    assertEquals(read = cis.read(b), -1);
-    cis.close();
-
-    assertFalse(pi.hasNext());
-  }
-
-  @Test
-  public void testReadIntoBufferLargerThanChunks() throws IOException {
-    ChunkInputStream cis = new ChunkInputStream();
-    byte[] b = new byte[20];
-    int read;
-    PeekingIterator<Entry<Key,Value>> pi = new PeekingIterator<>(data.iterator());
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), 8);
-    assertEquals(new String(b, 0, read), "asdfjkl;");
-    assertEquals(read = cis.read(b), -1);
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), 10);
-    assertEquals(new String(b, 0, read), "qwertyuiop");
-    assertEquals(read = cis.read(b), -1);
-    assertEquals(cis.getVisibilities().toString(), "[A&B, B&C, D]");
-    cis.close();
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), 16);
-    assertEquals(new String(b, 0, read), "asdfjkl;asdfjkl;");
-    assertEquals(read = cis.read(b), -1);
-    assertEquals(cis.getVisibilities().toString(), "[A&B]");
-    cis.close();
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), -1);
-    cis.close();
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), 8);
-    assertEquals(new String(b, 0, read), "asdfjkl;");
-    assertEquals(read = cis.read(b), -1);
-    cis.close();
-
-    assertFalse(pi.hasNext());
-  }
-
-  private static void assumeExceptionOnRead(ChunkInputStream cis, byte[] b) {
-    try {
-      assertEquals(0, cis.read(b));
-      fail();
-    } catch (IOException e) {
-      log.debug("EXCEPTION {}", e.getMessage());
-      // expected, ignore
-    }
-  }
-
-  private static void assumeExceptionOnClose(ChunkInputStream cis) {
-    try {
-      cis.close();
-      fail();
-    } catch (IOException e) {
-      log.debug("EXCEPTION {}", e.getMessage());
-      // expected, ignore
-    }
-  }
-
-  @Test
-  public void testBadData() throws IOException {
-    ChunkInputStream cis = new ChunkInputStream();
-    byte[] b = new byte[20];
-    int read;
-    PeekingIterator<Entry<Key,Value>> pi = new PeekingIterator<>(baddata.iterator());
-
-    cis.setSource(pi);
-    assumeExceptionOnRead(cis, b);
-    assumeExceptionOnClose(cis);
-    // can still get visibilities after exception -- bad?
-    assertEquals(cis.getVisibilities().toString(), "[A]");
-
-    cis.setSource(pi);
-    assumeExceptionOnRead(cis, b);
-    assumeExceptionOnClose(cis);
-    assertEquals(cis.getVisibilities().toString(), "[B, C]");
-
-    cis.setSource(pi);
-    assumeExceptionOnRead(cis, b);
-    assumeExceptionOnClose(cis);
-    assertEquals(cis.getVisibilities().toString(), "[D, E]");
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), 8);
-    assertEquals(new String(b, 0, read), "asdfjkl;");
-    assertEquals(read = cis.read(b), -1);
-    assertEquals(cis.getVisibilities().toString(), "[F, G]");
-    cis.close();
-
-    cis.setSource(pi);
-    assumeExceptionOnRead(cis, b);
-    cis.close();
-    assertEquals(cis.getVisibilities().toString(), "[I, J]");
-
-    try {
-      cis.setSource(pi);
-      fail();
-    } catch (IOException e) {
-      // expected, ignore
-    }
-    assumeExceptionOnClose(cis);
-    assertEquals(cis.getVisibilities().toString(), "[K]");
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), -1);
-    assertEquals(cis.getVisibilities().toString(), "[L]");
-    cis.close();
-
-    assertFalse(pi.hasNext());
-
-    pi = new PeekingIterator<>(baddata.iterator());
-    cis.setSource(pi);
-    assumeExceptionOnClose(cis);
-  }
-
-  @Test
-  public void testBadDataWithoutClosing() throws IOException {
-    ChunkInputStream cis = new ChunkInputStream();
-    byte[] b = new byte[20];
-    int read;
-    PeekingIterator<Entry<Key,Value>> pi = new PeekingIterator<>(baddata.iterator());
-
-    cis.setSource(pi);
-    assumeExceptionOnRead(cis, b);
-    // can still get visibilities after exception -- bad?
-    assertEquals(cis.getVisibilities().toString(), "[A]");
-
-    cis.setSource(pi);
-    assumeExceptionOnRead(cis, b);
-    assertEquals(cis.getVisibilities().toString(), "[B, C]");
-
-    cis.setSource(pi);
-    assumeExceptionOnRead(cis, b);
-    assertEquals(cis.getVisibilities().toString(), "[D, E]");
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), 8);
-    assertEquals(new String(b, 0, read), "asdfjkl;");
-    assertEquals(read = cis.read(b), -1);
-    assertEquals(cis.getVisibilities().toString(), "[F, G]");
-    cis.close();
-
-    cis.setSource(pi);
-    assumeExceptionOnRead(cis, b);
-    assertEquals(cis.getVisibilities().toString(), "[I, J]");
-
-    try {
-      cis.setSource(pi);
-      fail();
-    } catch (IOException e) {
-      // expected, ignore
-    }
-    assertEquals(cis.getVisibilities().toString(), "[K]");
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), -1);
-    assertEquals(cis.getVisibilities().toString(), "[L]");
-    cis.close();
-
-    assertFalse(pi.hasNext());
-
-    pi = new PeekingIterator<>(baddata.iterator());
-    cis.setSource(pi);
-    assumeExceptionOnClose(cis);
-  }
-
-  @Test
-  public void testMultipleChunkSizes() throws IOException {
-    ChunkInputStream cis = new ChunkInputStream();
-    byte[] b = new byte[20];
-    int read;
-    PeekingIterator<Entry<Key,Value>> pi = new PeekingIterator<>(multidata.iterator());
-
-    b = new byte[20];
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), 8);
-    assertEquals(read = cis.read(b), -1);
-    cis.close();
-    assertEquals(cis.getVisibilities().toString(), "[A&B]");
-
-    cis.setSource(pi);
-    assumeExceptionOnRead(cis, b);
-    assertEquals(cis.getVisibilities().toString(), "[A&B]");
-
-    cis.setSource(pi);
-    assertEquals(read = cis.read(b), 8);
-    assertEquals(new String(b, 0, read), "asdfjkl;");
-    assertEquals(read = cis.read(b), -1);
-    cis.close();
-    assertEquals(cis.getVisibilities().toString(), "[A&B, B&C]");
-
-    assertFalse(pi.hasNext());
-  }
-
-  @Test
-  public void testSingleByteRead() throws IOException {
-    ChunkInputStream cis = new ChunkInputStream();
-    PeekingIterator<Entry<Key,Value>> pi = new PeekingIterator<>(data.iterator());
-
-    cis.setSource(pi);
-    assertEquals((byte) 'a', (byte) cis.read());
-    assertEquals((byte) 's', (byte) cis.read());
-    assertEquals((byte) 'd', (byte) cis.read());
-    assertEquals((byte) 'f', (byte) cis.read());
-    assertEquals((byte) 'j', (byte) cis.read());
-    assertEquals((byte) 'k', (byte) cis.read());
-    assertEquals((byte) 'l', (byte) cis.read());
-    assertEquals((byte) ';', (byte) cis.read());
-    assertEquals(cis.read(), -1);
-    cis.close();
-    assertEquals(cis.getVisibilities().toString(), "[A&B]");
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/KeyUtilTest.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/KeyUtilTest.java b/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/KeyUtilTest.java
deleted file mode 100644
index e93331a..0000000
--- a/examples/simple/src/test/java/org/apache/accumulo/examples/simple/filedata/KeyUtilTest.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.filedata;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.io.Text;
-
-public class KeyUtilTest extends TestCase {
-  public static void checkSeps(String... s) {
-    Text t = KeyUtil.buildNullSepText(s);
-    String[] rets = KeyUtil.splitNullSepText(t);
-
-    int length = 0;
-    for (String str : s)
-      length += str.length();
-    assertEquals(t.getLength(), length + s.length - 1);
-    assertEquals(rets.length, s.length);
-    for (int i = 0; i < s.length; i++)
-      assertEquals(s[i], rets[i]);
-  }
-
-  public void testNullSep() {
-    checkSeps("abc", "d", "", "efgh");
-    checkSeps("ab", "");
-    checkSeps("abcde");
-    checkSeps("");
-    checkSeps("", "");
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/examples/simple/src/test/resources/log4j.properties b/examples/simple/src/test/resources/log4j.properties
deleted file mode 100644
index 133a28c..0000000
--- a/examples/simple/src/test/resources/log4j.properties
+++ /dev/null
@@ -1,24 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-log4j.rootLogger=INFO, CA
-log4j.appender.CA=org.apache.log4j.ConsoleAppender
-log4j.appender.CA.layout=org.apache.log4j.PatternLayout
-log4j.appender.CA.layout.ConversionPattern=[%t} %-5p %c %x - %m%n
-
-log4j.logger.org.apache.hadoop.mapred=ERROR
-log4j.logger.org.apache.hadoop.util.ProcessTree=ERROR
-log4j.logger.org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter=ERROR
-log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 6280565..9128dd0 100644
--- a/pom.xml
+++ b/pom.xml
@@ -83,7 +83,6 @@
     <module>assemble</module>
     <module>core</module>
     <module>docs</module>
-    <module>examples/simple</module>
     <module>fate</module>
     <module>iterator-test-harness</module>
     <module>maven-plugin</module>
@@ -269,11 +268,6 @@
       </dependency>
       <dependency>
         <groupId>org.apache.accumulo</groupId>
-        <artifactId>accumulo-examples-simple</artifactId>
-        <version>${project.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.accumulo</groupId>
         <artifactId>accumulo-fate</artifactId>
         <version>${project.version}</version>
       </dependency>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/proxy/examples/python/TestNamespace.py
----------------------------------------------------------------------
diff --git a/proxy/examples/python/TestNamespace.py b/proxy/examples/python/TestNamespace.py
index e7d2377..1e3db5e 100644
--- a/proxy/examples/python/TestNamespace.py
+++ b/proxy/examples/python/TestNamespace.py
@@ -118,13 +118,13 @@ def main():
 
     print 'adding max mutation size namespace constraint'
     constraintid = client.addNamespaceConstraint(login, 'testing',
-                                                 'org.apache.accumulo.examples.simple.constraints.MaxMutationSize')
+                                                 'org.apache.accumulo.test.constraints.MaxMutationSize')
 
     print 'make sure constraint was added'
     constraints = client.listNamespaceConstraints(login, 'testing')
     found = False
     for name, cid in constraints.iteritems():
-        if cid == constraintid and name == 'org.apache.accumulo.examples.simple.constraints.MaxMutationSize':
+        if cid == constraintid and name == 'org.apache.accumulo.test.constraints.MaxMutationSize':
             found = True
             break
     assert found
@@ -136,7 +136,7 @@ def main():
     constraints = client.listNamespaceConstraints(login, 'testing')
     found = False
     for name, cid in constraints.iteritems():
-        if cid == constraintid and name == 'org.apache.accumulo.examples.simple.constraints.MaxMutationSize':
+        if cid == constraintid and name == 'org.apache.accumulo.test.constraints.MaxMutationSize':
             found = True
             break
     assert not found

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/proxy/pom.xml
----------------------------------------------------------------------
diff --git a/proxy/pom.xml b/proxy/pom.xml
index 6bb4eb2..253c5e8 100644
--- a/proxy/pom.xml
+++ b/proxy/pom.xml
@@ -77,11 +77,6 @@
       <scope>test</scope>
     </dependency>
     <dependency>
-      <groupId>org.apache.accumulo</groupId>
-      <artifactId>accumulo-examples-simple</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
       <groupId>org.easymock</groupId>
       <artifactId>easymock</artifactId>
       <scope>test</scope>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/pom.xml
----------------------------------------------------------------------
diff --git a/test/pom.xml b/test/pom.xml
index 0bad02f..18a28dc 100644
--- a/test/pom.xml
+++ b/test/pom.xml
@@ -84,10 +84,6 @@
     </dependency>
     <dependency>
       <groupId>org.apache.accumulo</groupId>
-      <artifactId>accumulo-examples-simple</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.accumulo</groupId>
       <artifactId>accumulo-fate</artifactId>
     </dependency>
     <dependency>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/src/main/java/org/apache/accumulo/test/ConditionalWriterIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/ConditionalWriterIT.java b/test/src/main/java/org/apache/accumulo/test/ConditionalWriterIT.java
index f0b46b5..7df5885 100644
--- a/test/src/main/java/org/apache/accumulo/test/ConditionalWriterIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/ConditionalWriterIT.java
@@ -87,7 +87,7 @@ import org.apache.accumulo.core.trace.DistributedTrace;
 import org.apache.accumulo.core.trace.Span;
 import org.apache.accumulo.core.trace.Trace;
 import org.apache.accumulo.core.util.FastFormat;
-import org.apache.accumulo.examples.simple.constraints.AlphaNumKeyConstraint;
+import org.apache.accumulo.test.constraints.AlphaNumKeyConstraint;
 import org.apache.accumulo.harness.AccumuloClusterHarness;
 import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
 import org.apache.accumulo.test.functional.BadIterator;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/src/main/java/org/apache/accumulo/test/NamespacesIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/NamespacesIT.java b/test/src/main/java/org/apache/accumulo/test/NamespacesIT.java
index 4d374eb..18c275a 100644
--- a/test/src/main/java/org/apache/accumulo/test/NamespacesIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/NamespacesIT.java
@@ -75,7 +75,7 @@ import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.NamespacePermission;
 import org.apache.accumulo.core.security.SystemPermission;
 import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.examples.simple.constraints.NumericValueConstraint;
+import org.apache.accumulo.test.constraints.NumericValueConstraint;
 import org.apache.accumulo.harness.AccumuloClusterHarness;
 import org.apache.accumulo.test.categories.MiniClusterOnlyTests;
 import org.apache.hadoop.io.Text;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/src/main/java/org/apache/accumulo/test/ShellServerIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/ShellServerIT.java b/test/src/main/java/org/apache/accumulo/test/ShellServerIT.java
index c327c28..eae5ca9 100644
--- a/test/src/main/java/org/apache/accumulo/test/ShellServerIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/ShellServerIT.java
@@ -1692,7 +1692,7 @@ public class ShellServerIT extends SharedMiniClusterBase {
     ts.exec("tables", true, "thing2.thingy", false);
 
     // put constraints on a namespace
-    ts.exec("constraint -ns thing3 -a org.apache.accumulo.examples.simple.constraints.NumericValueConstraint", true);
+    ts.exec("constraint -ns thing3 -a org.apache.accumulo.test.constraints.NumericValueConstraint", true);
     ts.exec("createtable thing3.constrained", true);
     ts.exec("table thing3.constrained", true);
     ts.exec("constraint -d 1");

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/src/main/java/org/apache/accumulo/test/constraints/AlphaNumKeyConstraint.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/constraints/AlphaNumKeyConstraint.java b/test/src/main/java/org/apache/accumulo/test/constraints/AlphaNumKeyConstraint.java
new file mode 100644
index 0000000..14fa82d
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/constraints/AlphaNumKeyConstraint.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.constraints;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.accumulo.core.constraints.Constraint;
+import org.apache.accumulo.core.data.ColumnUpdate;
+import org.apache.accumulo.core.data.Mutation;
+
+/**
+ * This class is an accumulo constraint that ensures all fields of a key are alpha numeric.
+ */
+public class AlphaNumKeyConstraint implements Constraint {
+
+  static final short NON_ALPHA_NUM_ROW = 1;
+  static final short NON_ALPHA_NUM_COLF = 2;
+  static final short NON_ALPHA_NUM_COLQ = 3;
+
+  static final String ROW_VIOLATION_MESSAGE = "Row was not alpha numeric";
+  static final String COLF_VIOLATION_MESSAGE = "Column family was not alpha numeric";
+  static final String COLQ_VIOLATION_MESSAGE = "Column qualifier was not alpha numeric";
+
+  private boolean isAlphaNum(byte bytes[]) {
+    for (byte b : bytes) {
+      boolean ok = ((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || (b >= '0' && b <= '9'));
+      if (!ok)
+        return false;
+    }
+
+    return true;
+  }
+
+  private Set<Short> addViolation(Set<Short> violations, short violation) {
+    if (violations == null) {
+      violations = new LinkedHashSet<>();
+      violations.add(violation);
+    } else if (!violations.contains(violation)) {
+      violations.add(violation);
+    }
+    return violations;
+  }
+
+  @Override
+  public List<Short> check(Environment env, Mutation mutation) {
+    Set<Short> violations = null;
+
+    if (!isAlphaNum(mutation.getRow()))
+      violations = addViolation(violations, NON_ALPHA_NUM_ROW);
+
+    Collection<ColumnUpdate> updates = mutation.getUpdates();
+    for (ColumnUpdate columnUpdate : updates) {
+      if (!isAlphaNum(columnUpdate.getColumnFamily()))
+        violations = addViolation(violations, NON_ALPHA_NUM_COLF);
+
+      if (!isAlphaNum(columnUpdate.getColumnQualifier()))
+        violations = addViolation(violations, NON_ALPHA_NUM_COLQ);
+    }
+
+    return null == violations ? null : new ArrayList<>(violations);
+  }
+
+  @Override
+  public String getViolationDescription(short violationCode) {
+
+    switch (violationCode) {
+      case NON_ALPHA_NUM_ROW:
+        return ROW_VIOLATION_MESSAGE;
+      case NON_ALPHA_NUM_COLF:
+        return COLF_VIOLATION_MESSAGE;
+      case NON_ALPHA_NUM_COLQ:
+        return COLQ_VIOLATION_MESSAGE;
+    }
+
+    return null;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/src/main/java/org/apache/accumulo/test/constraints/MaxMutationSize.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/constraints/MaxMutationSize.java b/test/src/main/java/org/apache/accumulo/test/constraints/MaxMutationSize.java
new file mode 100644
index 0000000..1b68e96
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/constraints/MaxMutationSize.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.constraints;
+
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.accumulo.core.constraints.Constraint;
+import org.apache.accumulo.core.data.Mutation;
+
+/**
+ * Ensure that mutations are a reasonable size: we must be able to fit several in memory at a time.
+ *
+ */
+public class MaxMutationSize implements Constraint {
+  static final long MAX_SIZE = Runtime.getRuntime().maxMemory() >> 8;
+  static final List<Short> empty = Collections.emptyList();
+  static final List<Short> violations = Collections.singletonList(Short.valueOf((short) 0));
+
+  @Override
+  public String getViolationDescription(short violationCode) {
+    return String.format("mutation exceeded maximum size of %d", MAX_SIZE);
+  }
+
+  @Override
+  public List<Short> check(Environment env, Mutation mutation) {
+    if (mutation.estimatedMemoryUsed() < MAX_SIZE)
+      return empty;
+    return violations;
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/src/main/java/org/apache/accumulo/test/constraints/NumericValueConstraint.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/constraints/NumericValueConstraint.java b/test/src/main/java/org/apache/accumulo/test/constraints/NumericValueConstraint.java
new file mode 100644
index 0000000..0d1ae86
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/constraints/NumericValueConstraint.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.constraints;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.accumulo.core.constraints.Constraint;
+import org.apache.accumulo.core.data.ColumnUpdate;
+import org.apache.accumulo.core.data.Mutation;
+
+/**
+ * This class is an accumulo constraint that ensures values are numeric strings.
+ */
+public class NumericValueConstraint implements Constraint {
+
+  static final short NON_NUMERIC_VALUE = 1;
+  static final String VIOLATION_MESSAGE = "Value is not numeric";
+
+  private static final List<Short> VIOLATION_LIST = Collections.unmodifiableList(Arrays.asList(NON_NUMERIC_VALUE));
+
+  private boolean isNumeric(byte bytes[]) {
+    for (byte b : bytes) {
+      boolean ok = (b >= '0' && b <= '9');
+      if (!ok)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public List<Short> check(Environment env, Mutation mutation) {
+    Collection<ColumnUpdate> updates = mutation.getUpdates();
+
+    for (ColumnUpdate columnUpdate : updates) {
+      if (!isNumeric(columnUpdate.getValue()))
+        return VIOLATION_LIST;
+    }
+
+    return null;
+  }
+
+  @Override
+  public String getViolationDescription(short violationCode) {
+
+    switch (violationCode) {
+      case NON_NUMERIC_VALUE:
+        return "Value is not numeric";
+    }
+
+    return null;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/src/main/java/org/apache/accumulo/test/examples/simple/dirlist/CountIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/examples/simple/dirlist/CountIT.java b/test/src/main/java/org/apache/accumulo/test/examples/simple/dirlist/CountIT.java
deleted file mode 100644
index 93708a6..0000000
--- a/test/src/main/java/org/apache/accumulo/test/examples/simple/dirlist/CountIT.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.examples.simple.dirlist;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-
-import java.util.ArrayList;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.core.util.Pair;
-import org.apache.accumulo.examples.simple.dirlist.FileCount;
-import org.apache.accumulo.examples.simple.dirlist.FileCount.Opts;
-import org.apache.accumulo.examples.simple.dirlist.Ingest;
-import org.apache.accumulo.examples.simple.dirlist.QueryUtil;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.hadoop.io.Text;
-import org.junit.Before;
-import org.junit.Test;
-
-public class CountIT extends ConfigurableMacBase {
-
-  private Connector conn;
-  private String tableName;
-
-  @Before
-  public void setupInstance() throws Exception {
-    tableName = getUniqueNames(1)[0];
-    conn = getConnector();
-    conn.tableOperations().create(tableName);
-    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
-    ColumnVisibility cv = new ColumnVisibility();
-    // / has 1 dir
-    // /local has 2 dirs 1 file
-    // /local/user1 has 2 files
-    bw.addMutation(Ingest.buildMutation(cv, "/local", true, false, true, 272, 12345, null));
-    bw.addMutation(Ingest.buildMutation(cv, "/local/user1", true, false, true, 272, 12345, null));
-    bw.addMutation(Ingest.buildMutation(cv, "/local/user2", true, false, true, 272, 12345, null));
-    bw.addMutation(Ingest.buildMutation(cv, "/local/file", false, false, false, 1024, 12345, null));
-    bw.addMutation(Ingest.buildMutation(cv, "/local/file", false, false, false, 1024, 23456, null));
-    bw.addMutation(Ingest.buildMutation(cv, "/local/user1/file1", false, false, false, 2024, 12345, null));
-    bw.addMutation(Ingest.buildMutation(cv, "/local/user1/file2", false, false, false, 1028, 23456, null));
-    bw.close();
-  }
-
-  @Test
-  public void test() throws Exception {
-    Scanner scanner = conn.createScanner(tableName, new Authorizations());
-    scanner.fetchColumn(new Text("dir"), new Text("counts"));
-    assertFalse(scanner.iterator().hasNext());
-
-    Opts opts = new Opts();
-    ScannerOpts scanOpts = new ScannerOpts();
-    BatchWriterOpts bwOpts = new BatchWriterOpts();
-    opts.instance = conn.getInstance().getInstanceName();
-    opts.zookeepers = conn.getInstance().getZooKeepers();
-    opts.setTableName(tableName);
-    opts.setPrincipal(conn.whoami());
-    opts.setPassword(new Opts.Password(ROOT_PASSWORD));
-    FileCount fc = new FileCount(opts, scanOpts, bwOpts);
-    fc.run();
-
-    ArrayList<Pair<String,String>> expected = new ArrayList<>();
-    expected.add(new Pair<>(QueryUtil.getRow("").toString(), "1,0,3,3"));
-    expected.add(new Pair<>(QueryUtil.getRow("/local").toString(), "2,1,2,3"));
-    expected.add(new Pair<>(QueryUtil.getRow("/local/user1").toString(), "0,2,0,2"));
-    expected.add(new Pair<>(QueryUtil.getRow("/local/user2").toString(), "0,0,0,0"));
-
-    int i = 0;
-    for (Entry<Key,Value> e : scanner) {
-      assertEquals(e.getKey().getRow().toString(), expected.get(i).getFirst());
-      assertEquals(e.getValue().toString(), expected.get(i).getSecond());
-      i++;
-    }
-    assertEquals(i, expected.size());
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/test/src/main/java/org/apache/accumulo/test/examples/simple/filedata/ChunkInputFormatIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/examples/simple/filedata/ChunkInputFormatIT.java b/test/src/main/java/org/apache/accumulo/test/examples/simple/filedata/ChunkInputFormatIT.java
deleted file mode 100644
index cb53ec0..0000000
--- a/test/src/main/java/org/apache/accumulo/test/examples/simple/filedata/ChunkInputFormatIT.java
+++ /dev/null
@@ -1,320 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.accumulo.test.examples.simple.filedata;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.fail;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.examples.simple.filedata.ChunkInputFormat;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import com.google.common.collect.ArrayListMultimap;
-import com.google.common.collect.Multimap;
-
-public class ChunkInputFormatIT extends AccumuloClusterHarness {
-
-  // track errors in the map reduce job; jobs insert a dummy error for the map and cleanup tasks (to ensure test correctness),
-  // so error tests should check to see if there is at least one error (could be more depending on the test) rather than zero
-  private static Multimap<String,AssertionError> assertionErrors = ArrayListMultimap.create();
-
-  private static final Authorizations AUTHS = new Authorizations("A", "B", "C", "D");
-
-  private static List<Entry<Key,Value>> data;
-  private static List<Entry<Key,Value>> baddata;
-
-  private Connector conn;
-  private String tableName;
-
-  @Before
-  public void setupInstance() throws Exception {
-    conn = getConnector();
-    tableName = getUniqueNames(1)[0];
-    conn.securityOperations().changeUserAuthorizations(conn.whoami(), AUTHS);
-  }
-
-  @BeforeClass
-  public static void setupClass() {
-    System.setProperty("hadoop.tmp.dir", System.getProperty("user.dir") + "/target/hadoop-tmp");
-
-    data = new ArrayList<>();
-    ChunkInputStreamIT.addData(data, "a", "refs", "ida\0ext", "A&B", "ext");
-    ChunkInputStreamIT.addData(data, "a", "refs", "ida\0name", "A&B", "name");
-    ChunkInputStreamIT.addData(data, "a", "~chunk", 100, 0, "A&B", "asdfjkl;");
-    ChunkInputStreamIT.addData(data, "a", "~chunk", 100, 1, "A&B", "");
-    ChunkInputStreamIT.addData(data, "b", "refs", "ida\0ext", "A&B", "ext");
-    ChunkInputStreamIT.addData(data, "b", "refs", "ida\0name", "A&B", "name");
-    ChunkInputStreamIT.addData(data, "b", "~chunk", 100, 0, "A&B", "qwertyuiop");
-    ChunkInputStreamIT.addData(data, "b", "~chunk", 100, 0, "B&C", "qwertyuiop");
-    ChunkInputStreamIT.addData(data, "b", "~chunk", 100, 1, "A&B", "");
-    ChunkInputStreamIT.addData(data, "b", "~chunk", 100, 1, "B&C", "");
-    ChunkInputStreamIT.addData(data, "b", "~chunk", 100, 1, "D", "");
-    baddata = new ArrayList<>();
-    ChunkInputStreamIT.addData(baddata, "c", "refs", "ida\0ext", "A&B", "ext");
-    ChunkInputStreamIT.addData(baddata, "c", "refs", "ida\0name", "A&B", "name");
-  }
-
-  public static void entryEquals(Entry<Key,Value> e1, Entry<Key,Value> e2) {
-    assertEquals(e1.getKey(), e2.getKey());
-    assertEquals(e1.getValue(), e2.getValue());
-  }
-
-  public static class CIFTester extends Configured implements Tool {
-    public static class TestMapper extends Mapper<List<Entry<Key,Value>>,InputStream,List<Entry<Key,Value>>,InputStream> {
-      int count = 0;
-
-      @Override
-      protected void map(List<Entry<Key,Value>> key, InputStream value, Context context) throws IOException, InterruptedException {
-        String table = context.getConfiguration().get("MRTester_tableName");
-        assertNotNull(table);
-
-        byte[] b = new byte[20];
-        int read;
-        try {
-          switch (count) {
-            case 0:
-              assertEquals(key.size(), 2);
-              entryEquals(key.get(0), data.get(0));
-              entryEquals(key.get(1), data.get(1));
-              assertEquals(read = value.read(b), 8);
-              assertEquals(new String(b, 0, read), "asdfjkl;");
-              assertEquals(read = value.read(b), -1);
-              break;
-            case 1:
-              assertEquals(key.size(), 2);
-              entryEquals(key.get(0), data.get(4));
-              entryEquals(key.get(1), data.get(5));
-              assertEquals(read = value.read(b), 10);
-              assertEquals(new String(b, 0, read), "qwertyuiop");
-              assertEquals(read = value.read(b), -1);
-              break;
-            default:
-              fail();
-          }
-        } catch (AssertionError e) {
-          assertionErrors.put(table, e);
-        } finally {
-          value.close();
-        }
-        count++;
-      }
-
-      @Override
-      protected void cleanup(Context context) throws IOException, InterruptedException {
-        String table = context.getConfiguration().get("MRTester_tableName");
-        assertNotNull(table);
-
-        try {
-          assertEquals(2, count);
-        } catch (AssertionError e) {
-          assertionErrors.put(table, e);
-        }
-      }
-    }
-
-    public static class TestNoClose extends Mapper<List<Entry<Key,Value>>,InputStream,List<Entry<Key,Value>>,InputStream> {
-      int count = 0;
-
-      @Override
-      protected void map(List<Entry<Key,Value>> key, InputStream value, Context context) throws IOException, InterruptedException {
-        String table = context.getConfiguration().get("MRTester_tableName");
-        assertNotNull(table);
-
-        byte[] b = new byte[5];
-        int read;
-        try {
-          switch (count) {
-            case 0:
-              assertEquals(read = value.read(b), 5);
-              assertEquals(new String(b, 0, read), "asdfj");
-              break;
-            default:
-              fail();
-          }
-        } catch (AssertionError e) {
-          assertionErrors.put(table, e);
-        }
-        count++;
-        try {
-          context.nextKeyValue();
-          fail();
-        } catch (IOException ioe) {
-          assertionErrors.put(table + "_map_ioexception", new AssertionError(toString(), ioe));
-        }
-      }
-    }
-
-    public static class TestBadData extends Mapper<List<Entry<Key,Value>>,InputStream,List<Entry<Key,Value>>,InputStream> {
-      @Override
-      protected void map(List<Entry<Key,Value>> key, InputStream value, Context context) throws IOException, InterruptedException {
-        String table = context.getConfiguration().get("MRTester_tableName");
-        assertNotNull(table);
-
-        byte[] b = new byte[20];
-        try {
-          assertEquals(key.size(), 2);
-          entryEquals(key.get(0), baddata.get(0));
-          entryEquals(key.get(1), baddata.get(1));
-        } catch (AssertionError e) {
-          assertionErrors.put(table, e);
-        }
-        try {
-          assertFalse(value.read(b) > 0);
-          try {
-            fail();
-          } catch (AssertionError e) {
-            assertionErrors.put(table, e);
-          }
-        } catch (Exception e) {
-          // expected, ignore
-        }
-        try {
-          value.close();
-          try {
-            fail();
-          } catch (AssertionError e) {
-            assertionErrors.put(table, e);
-          }
-        } catch (Exception e) {
-          // expected, ignore
-        }
-      }
-    }
-
-    @Override
-    public int run(String[] args) throws Exception {
-      if (args.length != 2) {
-        throw new IllegalArgumentException("Usage : " + CIFTester.class.getName() + " <table> <mapperClass>");
-      }
-
-      String table = args[0];
-      assertionErrors.put(table, new AssertionError("Dummy"));
-      assertionErrors.put(table + "_map_ioexception", new AssertionError("Dummy_ioexception"));
-      getConf().set("MRTester_tableName", table);
-
-      Job job = Job.getInstance(getConf());
-      job.setJobName(this.getClass().getSimpleName() + "_" + System.currentTimeMillis());
-      job.setJarByClass(this.getClass());
-
-      job.setInputFormatClass(ChunkInputFormat.class);
-
-      ChunkInputFormat.setZooKeeperInstance(job, getCluster().getClientConfig());
-      ChunkInputFormat.setConnectorInfo(job, getAdminPrincipal(), getAdminToken());
-      ChunkInputFormat.setInputTableName(job, table);
-      ChunkInputFormat.setScanAuthorizations(job, AUTHS);
-
-      @SuppressWarnings("unchecked")
-      Class<? extends Mapper<?,?,?,?>> forName = (Class<? extends Mapper<?,?,?,?>>) Class.forName(args[1]);
-      job.setMapperClass(forName);
-      job.setMapOutputKeyClass(Key.class);
-      job.setMapOutputValueClass(Value.class);
-      job.setOutputFormatClass(NullOutputFormat.class);
-
-      job.setNumReduceTasks(0);
-
-      job.waitForCompletion(true);
-
-      return job.isSuccessful() ? 0 : 1;
-    }
-
-    public static int main(String... args) throws Exception {
-      Configuration conf = new Configuration();
-      conf.set("mapreduce.framework.name", "local");
-      conf.set("mapreduce.cluster.local.dir", new File(System.getProperty("user.dir"), "target/mapreduce-tmp").getAbsolutePath());
-      return ToolRunner.run(conf, new CIFTester(), args);
-    }
-  }
-
-  @Test
-  public void test() throws Exception {
-    conn.tableOperations().create(tableName);
-    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
-
-    for (Entry<Key,Value> e : data) {
-      Key k = e.getKey();
-      Mutation m = new Mutation(k.getRow());
-      m.put(k.getColumnFamily(), k.getColumnQualifier(), new ColumnVisibility(k.getColumnVisibility()), k.getTimestamp(), e.getValue());
-      bw.addMutation(m);
-    }
-    bw.close();
-
-    assertEquals(0, CIFTester.main(tableName, CIFTester.TestMapper.class.getName()));
-    assertEquals(1, assertionErrors.get(tableName).size());
-  }
-
-  @Test
-  public void testErrorOnNextWithoutClose() throws Exception {
-    conn.tableOperations().create(tableName);
-    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
-
-    for (Entry<Key,Value> e : data) {
-      Key k = e.getKey();
-      Mutation m = new Mutation(k.getRow());
-      m.put(k.getColumnFamily(), k.getColumnQualifier(), new ColumnVisibility(k.getColumnVisibility()), k.getTimestamp(), e.getValue());
-      bw.addMutation(m);
-    }
-    bw.close();
-
-    assertEquals(1, CIFTester.main(tableName, CIFTester.TestNoClose.class.getName()));
-    assertEquals(1, assertionErrors.get(tableName).size());
-    // this should actually exist, in addition to the dummy entry
-    assertEquals(2, assertionErrors.get(tableName + "_map_ioexception").size());
-  }
-
-  @Test
-  public void testInfoWithoutChunks() throws Exception {
-    conn.tableOperations().create(tableName);
-    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
-    for (Entry<Key,Value> e : baddata) {
-      Key k = e.getKey();
-      Mutation m = new Mutation(k.getRow());
-      m.put(k.getColumnFamily(), k.getColumnQualifier(), new ColumnVisibility(k.getColumnVisibility()), k.getTimestamp(), e.getValue());
-      bw.addMutation(m);
-    }
-    bw.close();
-
-    assertEquals(0, CIFTester.main(tableName, CIFTester.TestBadData.class.getName()));
-    assertEquals(1, assertionErrors.get(tableName).size());
-  }
-}


[5/7] accumulo git commit: ACCUMULO-4511 Removed Accumulo Examples

Posted by mw...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchScanner.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchScanner.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchScanner.java
deleted file mode 100644
index e762e7d..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchScanner.java
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.client;
-
-import static org.apache.accumulo.examples.simple.client.RandomBatchWriter.abs;
-
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map.Entry;
-import java.util.Random;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.accumulo.core.cli.BatchScannerOpts;
-import org.apache.accumulo.core.cli.ClientOnRequiredTable;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.hadoop.io.Text;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * Simple example for reading random batches of data from Accumulo. See docs/examples/README.batch for instructions.
- */
-public class RandomBatchScanner {
-  private static final Logger log = LoggerFactory.getLogger(RandomBatchScanner.class);
-
-  /**
-   * Generate a number of ranges, each covering a single random row.
-   *
-   * @param num
-   *          the number of ranges to generate
-   * @param min
-   *          the minimum row that will be generated
-   * @param max
-   *          the maximum row that will be generated
-   * @param r
-   *          a random number generator
-   * @param ranges
-   *          a set in which to store the generated ranges
-   * @param expectedRows
-   *          a map in which to store the rows covered by the ranges (initially mapped to false)
-   */
-  static void generateRandomQueries(int num, long min, long max, Random r, HashSet<Range> ranges, HashMap<Text,Boolean> expectedRows) {
-    log.info(String.format("Generating %,d random queries...", num));
-    while (ranges.size() < num) {
-      long rowid = (abs(r.nextLong()) % (max - min)) + min;
-
-      Text row1 = new Text(String.format("row_%010d", rowid));
-
-      Range range = new Range(new Text(row1));
-      ranges.add(range);
-      expectedRows.put(row1, false);
-    }
-
-    log.info("finished");
-  }
-
-  /**
-   * Prints a count of the number of rows mapped to false.
-   *
-   * @return boolean indicating "were all the rows found?"
-   */
-  private static boolean checkAllRowsFound(HashMap<Text,Boolean> expectedRows) {
-    int count = 0;
-    boolean allFound = true;
-    for (Entry<Text,Boolean> entry : expectedRows.entrySet())
-      if (!entry.getValue())
-        count++;
-
-    if (count > 0) {
-      log.warn("Did not find " + count + " rows");
-      allFound = false;
-    }
-    return allFound;
-  }
-
-  /**
-   * Generates a number of random queries, verifies that the key/value pairs returned were in the queried ranges and that the values were generated by
-   * {@link RandomBatchWriter#createValue(long, int)}. Prints information about the results.
-   *
-   * @param num
-   *          the number of queries to generate
-   * @param min
-   *          the min row to query
-   * @param max
-   *          the max row to query
-   * @param evs
-   *          the expected size of the values
-   * @param r
-   *          a random number generator
-   * @param tsbr
-   *          a batch scanner
-   * @return boolean indicating "did the queries go fine?"
-   */
-  static boolean doRandomQueries(int num, long min, long max, int evs, Random r, BatchScanner tsbr) {
-
-    HashSet<Range> ranges = new HashSet<>(num);
-    HashMap<Text,Boolean> expectedRows = new java.util.HashMap<>();
-
-    generateRandomQueries(num, min, max, r, ranges, expectedRows);
-
-    tsbr.setRanges(ranges);
-
-    CountingVerifyingReceiver receiver = new CountingVerifyingReceiver(expectedRows, evs);
-
-    long t1 = System.currentTimeMillis();
-
-    for (Entry<Key,Value> entry : tsbr) {
-      receiver.receive(entry.getKey(), entry.getValue());
-    }
-
-    long t2 = System.currentTimeMillis();
-
-    log.info(String.format("%6.2f lookups/sec %6.2f secs%n", num / ((t2 - t1) / 1000.0), ((t2 - t1) / 1000.0)));
-    log.info(String.format("num results : %,d%n", receiver.count));
-
-    return checkAllRowsFound(expectedRows);
-  }
-
-  public static class Opts extends ClientOnRequiredTable {
-    @Parameter(names = "--min", description = "miniumum row that will be generated")
-    long min = 0;
-    @Parameter(names = "--max", description = "maximum ow that will be generated")
-    long max = 0;
-    @Parameter(names = "--num", required = true, description = "number of ranges to generate")
-    int num = 0;
-    @Parameter(names = "--size", required = true, description = "size of the value to write")
-    int size = 0;
-    @Parameter(names = "--seed", description = "seed for pseudo-random number generator")
-    Long seed = null;
-  }
-
-  /**
-   * Scans over a specified number of entries to Accumulo using a {@link BatchScanner}. Completes scans twice to compare times for a fresh query with those for
-   * a repeated query which has cached metadata and connections already established.
-   */
-  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    Opts opts = new Opts();
-    BatchScannerOpts bsOpts = new BatchScannerOpts();
-    opts.parseArgs(RandomBatchScanner.class.getName(), args, bsOpts);
-
-    Connector connector = opts.getConnector();
-    BatchScanner batchReader = connector.createBatchScanner(opts.getTableName(), opts.auths, bsOpts.scanThreads);
-    batchReader.setTimeout(bsOpts.scanTimeout, TimeUnit.MILLISECONDS);
-
-    Random r;
-    if (opts.seed == null)
-      r = new Random();
-    else
-      r = new Random(opts.seed);
-
-    // do one cold
-    boolean status = doRandomQueries(opts.num, opts.min, opts.max, opts.size, r, batchReader);
-
-    System.gc();
-    System.gc();
-    System.gc();
-
-    if (opts.seed == null)
-      r = new Random();
-    else
-      r = new Random(opts.seed);
-
-    // do one hot (connections already established, metadata table cached)
-    status = status && doRandomQueries(opts.num, opts.min, opts.max, opts.size, r, batchReader);
-
-    batchReader.close();
-    if (!status) {
-      System.exit(1);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
deleted file mode 100644
index 51aee8f..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.client;
-
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map.Entry;
-import java.util.Random;
-import java.util.Set;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ClientOnRequiredTable;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.security.SecurityErrorCode;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.TabletId;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.hadoop.io.Text;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * Simple example for writing random data to Accumulo. See docs/examples/README.batch for instructions.
- *
- * The rows of the entries will be randomly generated numbers between a specified min and max (prefixed by "row_"). The column families will be "foo" and column
- * qualifiers will be "1". The values will be random byte arrays of a specified size.
- */
-public class RandomBatchWriter {
-
-  /**
-   * Creates a random byte array of specified size using the specified seed.
-   *
-   * @param rowid
-   *          the seed to use for the random number generator
-   * @param dataSize
-   *          the size of the array
-   * @return a random byte array
-   */
-  public static byte[] createValue(long rowid, int dataSize) {
-    Random r = new Random(rowid);
-    byte value[] = new byte[dataSize];
-
-    r.nextBytes(value);
-
-    // transform to printable chars
-    for (int j = 0; j < value.length; j++) {
-      value[j] = (byte) (((0xff & value[j]) % 92) + ' ');
-    }
-
-    return value;
-  }
-
-  /**
-   * Creates a mutation on a specified row with column family "foo", column qualifier "1", specified visibility, and a random value of specified size.
-   *
-   * @param rowid
-   *          the row of the mutation
-   * @param dataSize
-   *          the size of the random value
-   * @param visibility
-   *          the visibility of the entry to insert
-   * @return a mutation
-   */
-  public static Mutation createMutation(long rowid, int dataSize, ColumnVisibility visibility) {
-    Text row = new Text(String.format("row_%010d", rowid));
-
-    Mutation m = new Mutation(row);
-
-    // create a random value that is a function of the
-    // row id for verification purposes
-    byte value[] = createValue(rowid, dataSize);
-
-    m.put(new Text("foo"), new Text("1"), visibility, new Value(value));
-
-    return m;
-  }
-
-  static class Opts extends ClientOnRequiredTable {
-    @Parameter(names = "--num", required = true)
-    int num = 0;
-    @Parameter(names = "--min")
-    long min = 0;
-    @Parameter(names = "--max")
-    long max = Long.MAX_VALUE;
-    @Parameter(names = "--size", required = true, description = "size of the value to write")
-    int size = 0;
-    @Parameter(names = "--vis", converter = VisibilityConverter.class)
-    ColumnVisibility visiblity = new ColumnVisibility("");
-    @Parameter(names = "--seed", description = "seed for pseudo-random number generator")
-    Long seed = null;
-  }
-
-  public static long abs(long l) {
-    l = Math.abs(l); // abs(Long.MIN_VALUE) == Long.MIN_VALUE...
-    if (l < 0)
-      return 0;
-    return l;
-  }
-
-  /**
-   * Writes a specified number of entries to Accumulo using a {@link BatchWriter}.
-   */
-  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    Opts opts = new Opts();
-    BatchWriterOpts bwOpts = new BatchWriterOpts();
-    opts.parseArgs(RandomBatchWriter.class.getName(), args, bwOpts);
-    if ((opts.max - opts.min) < 1L * opts.num) { // right-side multiplied by 1L to convert to long in a way that doesn't trigger FindBugs
-      System.err.println(String.format("You must specify a min and a max that allow for at least num possible values. "
-          + "For example, you requested %d rows, but a min of %d and a max of %d (exclusive), which only allows for %d rows.", opts.num, opts.min, opts.max,
-          (opts.max - opts.min)));
-      System.exit(1);
-    }
-    Random r;
-    if (opts.seed == null)
-      r = new Random();
-    else {
-      r = new Random(opts.seed);
-    }
-    Connector connector = opts.getConnector();
-    BatchWriter bw = connector.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
-
-    // reuse the ColumnVisibility object to improve performance
-    ColumnVisibility cv = opts.visiblity;
-
-    // Generate num unique row ids in the given range
-    HashSet<Long> rowids = new HashSet<>(opts.num);
-    while (rowids.size() < opts.num) {
-      rowids.add((abs(r.nextLong()) % (opts.max - opts.min)) + opts.min);
-    }
-    for (long rowid : rowids) {
-      Mutation m = createMutation(rowid, opts.size, cv);
-      bw.addMutation(m);
-    }
-
-    try {
-      bw.close();
-    } catch (MutationsRejectedException e) {
-      if (e.getSecurityErrorCodes().size() > 0) {
-        HashMap<String,Set<SecurityErrorCode>> tables = new HashMap<>();
-        for (Entry<TabletId,Set<SecurityErrorCode>> ke : e.getSecurityErrorCodes().entrySet()) {
-          String tableId = ke.getKey().getTableId().toString();
-          Set<SecurityErrorCode> secCodes = tables.get(tableId);
-          if (secCodes == null) {
-            secCodes = new HashSet<>();
-            tables.put(tableId, secCodes);
-          }
-          secCodes.addAll(ke.getValue());
-        }
-        System.err.println("ERROR : Not authorized to write to tables : " + tables);
-      }
-
-      if (e.getConstraintViolationSummaries().size() > 0) {
-        System.err.println("ERROR : Constraint violations occurred : " + e.getConstraintViolationSummaries());
-      }
-      System.exit(1);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/ReadWriteExample.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/ReadWriteExample.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/ReadWriteExample.java
deleted file mode 100644
index 44d4b6f..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/ReadWriteExample.java
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.client;
-
-import java.util.Map.Entry;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.cli.ClientOnDefaultTable;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Durability;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.impl.DurabilityImpl;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.core.util.ByteArraySet;
-import org.apache.hadoop.io.Text;
-
-import com.beust.jcommander.IStringConverter;
-import com.beust.jcommander.Parameter;
-
-public class ReadWriteExample {
-  // defaults
-  private static final String DEFAULT_AUTHS = "LEVEL1,GROUP1";
-  private static final String DEFAULT_TABLE_NAME = "test";
-
-  private Connector conn;
-
-  static class DurabilityConverter implements IStringConverter<Durability> {
-    @Override
-    public Durability convert(String value) {
-      return DurabilityImpl.fromString(value);
-    }
-  }
-
-  static class Opts extends ClientOnDefaultTable {
-    @Parameter(names = {"-C", "--createtable"}, description = "create table before doing anything")
-    boolean createtable = false;
-    @Parameter(names = {"-D", "--deletetable"}, description = "delete table when finished")
-    boolean deletetable = false;
-    @Parameter(names = {"-c", "--create"}, description = "create entries before any deletes")
-    boolean createEntries = false;
-    @Parameter(names = {"-r", "--read"}, description = "read entries after any creates/deletes")
-    boolean readEntries = false;
-    @Parameter(names = {"-d", "--delete"}, description = "delete entries after any creates")
-    boolean deleteEntries = false;
-    @Parameter(names = {"--durability"}, description = "durability used for writes (none, log, flush or sync)", converter = DurabilityConverter.class)
-    Durability durability = Durability.DEFAULT;
-
-    public Opts() {
-      super(DEFAULT_TABLE_NAME);
-      auths = new Authorizations(DEFAULT_AUTHS.split(","));
-    }
-  }
-
-  // hidden constructor
-  private ReadWriteExample() {}
-
-  private void execute(Opts opts, ScannerOpts scanOpts) throws Exception {
-    conn = opts.getConnector();
-
-    // add the authorizations to the user
-    Authorizations userAuthorizations = conn.securityOperations().getUserAuthorizations(opts.getPrincipal());
-    ByteArraySet auths = new ByteArraySet(userAuthorizations.getAuthorizations());
-    auths.addAll(opts.auths.getAuthorizations());
-    if (!auths.isEmpty())
-      conn.securityOperations().changeUserAuthorizations(opts.getPrincipal(), new Authorizations(auths));
-
-    // create table
-    if (opts.createtable) {
-      SortedSet<Text> partitionKeys = new TreeSet<>();
-      for (int i = Byte.MIN_VALUE; i < Byte.MAX_VALUE; i++)
-        partitionKeys.add(new Text(new byte[] {(byte) i}));
-      conn.tableOperations().create(opts.getTableName());
-      conn.tableOperations().addSplits(opts.getTableName(), partitionKeys);
-    }
-
-    // send mutations
-    createEntries(opts);
-
-    // read entries
-    if (opts.readEntries) {
-      // Note that the user needs to have the authorizations for the specified scan authorizations
-      // by an administrator first
-      Scanner scanner = conn.createScanner(opts.getTableName(), opts.auths);
-      scanner.setBatchSize(scanOpts.scanBatchSize);
-      for (Entry<Key,Value> entry : scanner)
-        System.out.println(entry.getKey().toString() + " -> " + entry.getValue().toString());
-    }
-
-    // delete table
-    if (opts.deletetable)
-      conn.tableOperations().delete(opts.getTableName());
-  }
-
-  private void createEntries(Opts opts) throws Exception {
-    if (opts.createEntries || opts.deleteEntries) {
-      BatchWriterConfig cfg = new BatchWriterConfig();
-      cfg.setDurability(opts.durability);
-      BatchWriter writer = conn.createBatchWriter(opts.getTableName(), cfg);
-      ColumnVisibility cv = new ColumnVisibility(opts.auths.toString().replace(',', '|'));
-
-      Text cf = new Text("datatypes");
-      Text cq = new Text("xml");
-      byte[] row = {'h', 'e', 'l', 'l', 'o', '\0'};
-      byte[] value = {'w', 'o', 'r', 'l', 'd', '\0'};
-
-      for (int i = 0; i < 10; i++) {
-        row[row.length - 1] = (byte) i;
-        Mutation m = new Mutation(new Text(row));
-        if (opts.deleteEntries) {
-          m.putDelete(cf, cq, cv);
-        }
-        if (opts.createEntries) {
-          value[value.length - 1] = (byte) i;
-          m.put(cf, cq, cv, new Value(value));
-        }
-        writer.addMutation(m);
-      }
-      writer.close();
-    }
-  }
-
-  public static void main(String[] args) throws Exception {
-    ReadWriteExample rwe = new ReadWriteExample();
-    Opts opts = new Opts();
-    ScannerOpts scanOpts = new ScannerOpts();
-    opts.parseArgs(ReadWriteExample.class.getName(), args, scanOpts);
-    rwe.execute(opts, scanOpts);
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RowOperations.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RowOperations.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RowOperations.java
deleted file mode 100644
index 007619d..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RowOperations.java
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.client;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ClientOpts;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.hadoop.io.Text;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * A demonstration of reading entire rows and deleting entire rows.
- */
-public class RowOperations {
-
-  private static final Logger log = LoggerFactory.getLogger(RowOperations.class);
-
-  private static Connector connector;
-  private static String tableName = "example";
-  private static BatchWriter bw;
-
-  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException,
-      MutationsRejectedException {
-
-    ClientOpts opts = new ClientOpts();
-    ScannerOpts scanOpts = new ScannerOpts();
-    BatchWriterOpts bwOpts = new BatchWriterOpts();
-    opts.parseArgs(RowOperations.class.getName(), args, scanOpts, bwOpts);
-
-    // First the setup work
-    connector = opts.getConnector();
-
-    // lets create an example table
-    connector.tableOperations().create(tableName);
-
-    // lets create 3 rows of information
-    Text row1 = new Text("row1");
-    Text row2 = new Text("row2");
-    Text row3 = new Text("row3");
-
-    // Which means 3 different mutations
-    Mutation mut1 = new Mutation(row1);
-    Mutation mut2 = new Mutation(row2);
-    Mutation mut3 = new Mutation(row3);
-
-    // And we'll put 4 columns in each row
-    Text col1 = new Text("1");
-    Text col2 = new Text("2");
-    Text col3 = new Text("3");
-    Text col4 = new Text("4");
-
-    // Now we'll add them to the mutations
-    mut1.put(new Text("column"), col1, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
-    mut1.put(new Text("column"), col2, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
-    mut1.put(new Text("column"), col3, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
-    mut1.put(new Text("column"), col4, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
-
-    mut2.put(new Text("column"), col1, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
-    mut2.put(new Text("column"), col2, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
-    mut2.put(new Text("column"), col3, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
-    mut2.put(new Text("column"), col4, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
-
-    mut3.put(new Text("column"), col1, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
-    mut3.put(new Text("column"), col2, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
-    mut3.put(new Text("column"), col3, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
-    mut3.put(new Text("column"), col4, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
-
-    // Now we'll make a Batch Writer
-    bw = connector.createBatchWriter(tableName, bwOpts.getBatchWriterConfig());
-
-    // And add the mutations
-    bw.addMutation(mut1);
-    bw.addMutation(mut2);
-    bw.addMutation(mut3);
-
-    // Force a send
-    bw.flush();
-
-    // Now lets look at the rows
-    Scanner rowThree = getRow(scanOpts, new Text("row3"));
-    Scanner rowTwo = getRow(scanOpts, new Text("row2"));
-    Scanner rowOne = getRow(scanOpts, new Text("row1"));
-
-    // And print them
-    log.info("This is everything");
-    printRow(rowOne);
-    printRow(rowTwo);
-    printRow(rowThree);
-    System.out.flush();
-
-    // Now lets delete rowTwo with the iterator
-    rowTwo = getRow(scanOpts, new Text("row2"));
-    deleteRow(rowTwo);
-
-    // Now lets look at the rows again
-    rowThree = getRow(scanOpts, new Text("row3"));
-    rowTwo = getRow(scanOpts, new Text("row2"));
-    rowOne = getRow(scanOpts, new Text("row1"));
-
-    // And print them
-    log.info("This is row1 and row3");
-    printRow(rowOne);
-    printRow(rowTwo);
-    printRow(rowThree);
-    System.out.flush();
-
-    // Should only see the two rows
-    // Now lets delete rowOne without passing in the iterator
-
-    deleteRow(scanOpts, row1);
-
-    // Now lets look at the rows one last time
-    rowThree = getRow(scanOpts, new Text("row3"));
-    rowTwo = getRow(scanOpts, new Text("row2"));
-    rowOne = getRow(scanOpts, new Text("row1"));
-
-    // And print them
-    log.info("This is just row3");
-    printRow(rowOne);
-    printRow(rowTwo);
-    printRow(rowThree);
-    System.out.flush();
-
-    // Should only see rowThree
-
-    // Always close your batchwriter
-
-    bw.close();
-
-    // and lets clean up our mess
-    connector.tableOperations().delete(tableName);
-
-    // fin~
-
-  }
-
-  /**
-   * Deletes a row given a text object
-   */
-  private static void deleteRow(ScannerOpts scanOpts, Text row) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    deleteRow(getRow(scanOpts, row));
-  }
-
-  /**
-   * Deletes a row, given a Scanner of JUST that row
-   */
-  private static void deleteRow(Scanner scanner) throws MutationsRejectedException {
-    Mutation deleter = null;
-    // iterate through the keys
-    for (Entry<Key,Value> entry : scanner) {
-      // create a mutation for the row
-      if (deleter == null)
-        deleter = new Mutation(entry.getKey().getRow());
-      // the remove function adds the key with the delete flag set to true
-      deleter.putDelete(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier());
-    }
-    bw.addMutation(deleter);
-    bw.flush();
-  }
-
-  /**
-   * Just a generic print function given an iterator. Not necessarily just for printing a single row
-   */
-  private static void printRow(Scanner scanner) {
-    // iterates through and prints
-    for (Entry<Key,Value> entry : scanner)
-      log.info("Key: " + entry.getKey().toString() + " Value: " + entry.getValue().toString());
-  }
-
-  /**
-   * Gets a scanner over one row
-   */
-  private static Scanner getRow(ScannerOpts scanOpts, Text row) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    // Create a scanner
-    Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY);
-    scanner.setBatchSize(scanOpts.scanBatchSize);
-    // Say start key is the one with key of row
-    // and end key is the one that immediately follows the row
-    scanner.setRange(new Range(row));
-    return scanner;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/SequentialBatchWriter.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/SequentialBatchWriter.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/SequentialBatchWriter.java
deleted file mode 100644
index f2bd4d7..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/SequentialBatchWriter.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.client;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ClientOnRequiredTable;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.security.ColumnVisibility;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * Simple example for writing random data in sequential order to Accumulo. See docs/examples/README.batch for instructions.
- */
-public class SequentialBatchWriter {
-
-  static class Opts extends ClientOnRequiredTable {
-    @Parameter(names = "--start")
-    long start = 0;
-    @Parameter(names = "--num", required = true)
-    long num = 0;
-    @Parameter(names = "--size", required = true, description = "size of the value to write")
-    int valueSize = 0;
-    @Parameter(names = "--vis", converter = VisibilityConverter.class)
-    ColumnVisibility vis = new ColumnVisibility();
-  }
-
-  /**
-   * Writes a specified number of entries to Accumulo using a {@link BatchWriter}. The rows of the entries will be sequential starting at a specified number.
-   * The column families will be "foo" and column qualifiers will be "1". The values will be random byte arrays of a specified size.
-   */
-  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException {
-    Opts opts = new Opts();
-    BatchWriterOpts bwOpts = new BatchWriterOpts();
-    opts.parseArgs(SequentialBatchWriter.class.getName(), args, bwOpts);
-    Connector connector = opts.getConnector();
-    BatchWriter bw = connector.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
-
-    long end = opts.start + opts.num;
-
-    for (long i = opts.start; i < end; i++) {
-      Mutation m = RandomBatchWriter.createMutation(i, opts.valueSize, opts.vis);
-      bw.addMutation(m);
-    }
-
-    bw.close();
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/TraceDumpExample.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/TraceDumpExample.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/TraceDumpExample.java
deleted file mode 100644
index 5885094..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/TraceDumpExample.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.client;
-
-import org.apache.accumulo.core.cli.ClientOnDefaultTable;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.tracer.TraceDump;
-import org.apache.accumulo.tracer.TraceDump.Printer;
-import org.apache.hadoop.io.Text;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * Example of using the TraceDump class to print a formatted view of a Trace
- *
- */
-public class TraceDumpExample {
-  private static final Logger log = LoggerFactory.getLogger(TraceDumpExample.class);
-
-  static class Opts extends ClientOnDefaultTable {
-    public Opts() {
-      super("trace");
-    }
-
-    @Parameter(names = {"--traceid"}, description = "The hex string id of a given trace, for example 16cfbbd7beec4ae3")
-    public String traceId = "";
-  }
-
-  public void dump(Opts opts) throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
-
-    if (opts.traceId.isEmpty()) {
-      throw new IllegalArgumentException("--traceid option is required");
-    }
-
-    final Connector conn = opts.getConnector();
-    final String principal = opts.getPrincipal();
-    final String table = opts.getTableName();
-    if (!conn.securityOperations().hasTablePermission(principal, table, TablePermission.READ)) {
-      conn.securityOperations().grantTablePermission(principal, table, TablePermission.READ);
-      try {
-        Thread.sleep(1000);
-      } catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-        throw new RuntimeException(e);
-      }
-      while (!conn.securityOperations().hasTablePermission(principal, table, TablePermission.READ)) {
-        log.info("{} didn't propagate read permission on {}", principal, table);
-        try {
-          Thread.sleep(1000);
-        } catch (InterruptedException e) {
-          Thread.currentThread().interrupt();
-          throw new RuntimeException(e);
-        }
-      }
-    }
-    Scanner scanner = conn.createScanner(table, opts.auths);
-    scanner.setRange(new Range(new Text(opts.traceId)));
-    TraceDump.printTrace(scanner, new Printer() {
-      @Override
-      public void print(String line) {
-        System.out.println(line);
-      }
-    });
-  }
-
-  public static void main(String[] args) throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
-    TraceDumpExample traceDumpExample = new TraceDumpExample();
-    Opts opts = new Opts();
-    ScannerOpts scannerOpts = new ScannerOpts();
-    opts.parseArgs(TraceDumpExample.class.getName(), args, scannerOpts);
-
-    traceDumpExample.dump(opts);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/TracingExample.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/TracingExample.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/TracingExample.java
deleted file mode 100644
index 3ee0a27..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/TracingExample.java
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.accumulo.examples.simple.client;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.cli.ClientOnDefaultTable;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.trace.DistributedTrace;
-import org.apache.htrace.Sampler;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * A simple example showing how to use the distributed tracing API in client code
- *
- */
-public class TracingExample {
-  private static final Logger log = LoggerFactory.getLogger(TracingExample.class);
-  private static final String DEFAULT_TABLE_NAME = "test";
-
-  static class Opts extends ClientOnDefaultTable {
-    @Parameter(names = {"-C", "--createtable"}, description = "create table before doing anything")
-    boolean createtable = false;
-    @Parameter(names = {"-D", "--deletetable"}, description = "delete table when finished")
-    boolean deletetable = false;
-    @Parameter(names = {"-c", "--create"}, description = "create entries before any deletes")
-    boolean createEntries = false;
-    @Parameter(names = {"-r", "--read"}, description = "read entries after any creates/deletes")
-    boolean readEntries = false;
-
-    public Opts() {
-      super(DEFAULT_TABLE_NAME);
-      auths = new Authorizations();
-    }
-  }
-
-  public void enableTracing(Opts opts) throws Exception {
-    DistributedTrace.enable("myHost", "myApp");
-  }
-
-  public void execute(Opts opts) throws TableNotFoundException, InterruptedException, AccumuloException, AccumuloSecurityException, TableExistsException {
-
-    if (opts.createtable) {
-      opts.getConnector().tableOperations().create(opts.getTableName());
-    }
-
-    if (opts.createEntries) {
-      createEntries(opts);
-    }
-
-    if (opts.readEntries) {
-      readEntries(opts);
-    }
-
-    if (opts.deletetable) {
-      opts.getConnector().tableOperations().delete(opts.getTableName());
-    }
-  }
-
-  private void createEntries(Opts opts) throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
-
-    // Trace the write operation. Note, unless you flush the BatchWriter, you will not capture
-    // the write operation as it is occurs asynchronously. You can optionally create additional Spans
-    // within a given Trace as seen below around the flush
-    TraceScope scope = Trace.startSpan("Client Write", Sampler.ALWAYS);
-
-    System.out.println("TraceID: " + Long.toHexString(scope.getSpan().getTraceId()));
-    BatchWriter batchWriter = opts.getConnector().createBatchWriter(opts.getTableName(), new BatchWriterConfig());
-
-    Mutation m = new Mutation("row");
-    m.put("cf", "cq", "value");
-
-    batchWriter.addMutation(m);
-    // You can add timeline annotations to Spans which will be able to be viewed in the Monitor
-    scope.getSpan().addTimelineAnnotation("Initiating Flush");
-    batchWriter.flush();
-
-    batchWriter.close();
-    scope.close();
-  }
-
-  private void readEntries(Opts opts) throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
-
-    Scanner scanner = opts.getConnector().createScanner(opts.getTableName(), opts.auths);
-
-    // Trace the read operation.
-    TraceScope readScope = Trace.startSpan("Client Read", Sampler.ALWAYS);
-    System.out.println("TraceID: " + Long.toHexString(readScope.getSpan().getTraceId()));
-
-    int numberOfEntriesRead = 0;
-    for (Entry<Key,Value> entry : scanner) {
-      System.out.println(entry.getKey().toString() + " -> " + entry.getValue().toString());
-      ++numberOfEntriesRead;
-    }
-    // You can add additional metadata (key, values) to Spans which will be able to be viewed in the Monitor
-    readScope.getSpan().addKVAnnotation("Number of Entries Read".getBytes(UTF_8), String.valueOf(numberOfEntriesRead).getBytes(UTF_8));
-
-    readScope.close();
-  }
-
-  public static void main(String[] args) throws Exception {
-    try {
-      TracingExample tracingExample = new TracingExample();
-      Opts opts = new Opts();
-      ScannerOpts scannerOpts = new ScannerOpts();
-      opts.parseArgs(TracingExample.class.getName(), args, scannerOpts);
-
-      tracingExample.enableTracing(opts);
-      tracingExample.execute(opts);
-    } catch (Exception e) {
-      log.error("Caught exception running TraceExample", e);
-      System.exit(1);
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/combiner/StatsCombiner.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/combiner/StatsCombiner.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/combiner/StatsCombiner.java
deleted file mode 100644
index 7dad89c..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/combiner/StatsCombiner.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.combiner;
-
-import java.io.IOException;
-import java.util.Iterator;
-import java.util.Map;
-
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.Combiner;
-import org.apache.accumulo.core.iterators.IteratorEnvironment;
-import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
-
-/**
- * This combiner calculates the max, min, sum, and count of long integers represented as strings in values. It stores the result in a comma-separated value of
- * the form min,max,sum,count. If such a value is encountered while combining, its information is incorporated into the running calculations of min, max, sum,
- * and count. See {@link Combiner} for more information on which values are combined together. See docs/examples/README.combiner for instructions.
- */
-public class StatsCombiner extends Combiner {
-
-  public static final String RADIX_OPTION = "radix";
-
-  private int radix = 10;
-
-  @Override
-  public Value reduce(Key key, Iterator<Value> iter) {
-
-    long min = Long.MAX_VALUE;
-    long max = Long.MIN_VALUE;
-    long sum = 0;
-    long count = 0;
-
-    while (iter.hasNext()) {
-      String stats[] = iter.next().toString().split(",");
-
-      if (stats.length == 1) {
-        long val = Long.parseLong(stats[0], radix);
-        min = Math.min(val, min);
-        max = Math.max(val, max);
-        sum += val;
-        count += 1;
-      } else {
-        min = Math.min(Long.parseLong(stats[0], radix), min);
-        max = Math.max(Long.parseLong(stats[1], radix), max);
-        sum += Long.parseLong(stats[2], radix);
-        count += Long.parseLong(stats[3], radix);
-      }
-    }
-
-    String ret = Long.toString(min, radix) + "," + Long.toString(max, radix) + "," + Long.toString(sum, radix) + "," + Long.toString(count, radix);
-    return new Value(ret.getBytes());
-  }
-
-  @Override
-  public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
-    super.init(source, options, env);
-
-    if (options.containsKey(RADIX_OPTION))
-      radix = Integer.parseInt(options.get(RADIX_OPTION));
-    else
-      radix = 10;
-  }
-
-  @Override
-  public IteratorOptions describeOptions() {
-    IteratorOptions io = super.describeOptions();
-    io.setName("statsCombiner");
-    io.setDescription("Combiner that keeps track of min, max, sum, and count");
-    io.addNamedOption(RADIX_OPTION, "radix/base of the numbers");
-    return io;
-  }
-
-  @Override
-  public boolean validateOptions(Map<String,String> options) {
-    if (!super.validateOptions(options))
-      return false;
-
-    if (options.containsKey(RADIX_OPTION) && !options.get(RADIX_OPTION).matches("\\d+"))
-      throw new IllegalArgumentException("invalid option " + RADIX_OPTION + ":" + options.get(RADIX_OPTION));
-
-    return true;
-  }
-
-  /**
-   * A convenience method for setting the expected base/radix of the numbers
-   *
-   * @param iterConfig
-   *          Iterator settings to configure
-   * @param base
-   *          The expected base/radix of the numbers.
-   */
-  public static void setRadix(IteratorSetting iterConfig, int base) {
-    iterConfig.addOption(RADIX_OPTION, base + "");
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/constraints/AlphaNumKeyConstraint.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/constraints/AlphaNumKeyConstraint.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/constraints/AlphaNumKeyConstraint.java
deleted file mode 100644
index 14e3c8e..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/constraints/AlphaNumKeyConstraint.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.constraints;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.accumulo.core.constraints.Constraint;
-import org.apache.accumulo.core.data.ColumnUpdate;
-import org.apache.accumulo.core.data.Mutation;
-
-/**
- * This class is an accumulo constraint that ensures all fields of a key are alpha numeric.
- *
- * See docs/examples/README.constraint for instructions.
- *
- */
-
-public class AlphaNumKeyConstraint implements Constraint {
-
-  static final short NON_ALPHA_NUM_ROW = 1;
-  static final short NON_ALPHA_NUM_COLF = 2;
-  static final short NON_ALPHA_NUM_COLQ = 3;
-
-  static final String ROW_VIOLATION_MESSAGE = "Row was not alpha numeric";
-  static final String COLF_VIOLATION_MESSAGE = "Column family was not alpha numeric";
-  static final String COLQ_VIOLATION_MESSAGE = "Column qualifier was not alpha numeric";
-
-  private boolean isAlphaNum(byte bytes[]) {
-    for (byte b : bytes) {
-      boolean ok = ((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || (b >= '0' && b <= '9'));
-      if (!ok)
-        return false;
-    }
-
-    return true;
-  }
-
-  private Set<Short> addViolation(Set<Short> violations, short violation) {
-    if (violations == null) {
-      violations = new LinkedHashSet<>();
-      violations.add(violation);
-    } else if (!violations.contains(violation)) {
-      violations.add(violation);
-    }
-    return violations;
-  }
-
-  @Override
-  public List<Short> check(Environment env, Mutation mutation) {
-    Set<Short> violations = null;
-
-    if (!isAlphaNum(mutation.getRow()))
-      violations = addViolation(violations, NON_ALPHA_NUM_ROW);
-
-    Collection<ColumnUpdate> updates = mutation.getUpdates();
-    for (ColumnUpdate columnUpdate : updates) {
-      if (!isAlphaNum(columnUpdate.getColumnFamily()))
-        violations = addViolation(violations, NON_ALPHA_NUM_COLF);
-
-      if (!isAlphaNum(columnUpdate.getColumnQualifier()))
-        violations = addViolation(violations, NON_ALPHA_NUM_COLQ);
-    }
-
-    return null == violations ? null : new ArrayList<>(violations);
-  }
-
-  @Override
-  public String getViolationDescription(short violationCode) {
-
-    switch (violationCode) {
-      case NON_ALPHA_NUM_ROW:
-        return ROW_VIOLATION_MESSAGE;
-      case NON_ALPHA_NUM_COLF:
-        return COLF_VIOLATION_MESSAGE;
-      case NON_ALPHA_NUM_COLQ:
-        return COLQ_VIOLATION_MESSAGE;
-    }
-
-    return null;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/constraints/MaxMutationSize.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/constraints/MaxMutationSize.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/constraints/MaxMutationSize.java
deleted file mode 100644
index 3d94861..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/constraints/MaxMutationSize.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.constraints;
-
-import java.util.Collections;
-import java.util.List;
-
-import org.apache.accumulo.core.constraints.Constraint;
-import org.apache.accumulo.core.data.Mutation;
-
-/**
- * Ensure that mutations are a reasonable size: we must be able to fit several in memory at a time.
- *
- */
-public class MaxMutationSize implements Constraint {
-  static final long MAX_SIZE = Runtime.getRuntime().maxMemory() >> 8;
-  static final List<Short> empty = Collections.emptyList();
-  static final List<Short> violations = Collections.singletonList(Short.valueOf((short) 0));
-
-  @Override
-  public String getViolationDescription(short violationCode) {
-    return String.format("mutation exceeded maximum size of %d", MAX_SIZE);
-  }
-
-  @Override
-  public List<Short> check(Environment env, Mutation mutation) {
-    if (mutation.estimatedMemoryUsed() < MAX_SIZE)
-      return empty;
-    return violations;
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/constraints/NumericValueConstraint.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/constraints/NumericValueConstraint.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/constraints/NumericValueConstraint.java
deleted file mode 100644
index 2b22e6b..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/constraints/NumericValueConstraint.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.constraints;
-
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-
-import org.apache.accumulo.core.constraints.Constraint;
-import org.apache.accumulo.core.data.ColumnUpdate;
-import org.apache.accumulo.core.data.Mutation;
-
-/**
- * This class is an accumulo constraint that ensures values are numeric strings. See docs/examples/README.constraint for instructions.
- */
-public class NumericValueConstraint implements Constraint {
-
-  static final short NON_NUMERIC_VALUE = 1;
-  static final String VIOLATION_MESSAGE = "Value is not numeric";
-
-  private static final List<Short> VIOLATION_LIST = Collections.unmodifiableList(Arrays.asList(NON_NUMERIC_VALUE));
-
-  private boolean isNumeric(byte bytes[]) {
-    for (byte b : bytes) {
-      boolean ok = (b >= '0' && b <= '9');
-      if (!ok)
-        return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public List<Short> check(Environment env, Mutation mutation) {
-    Collection<ColumnUpdate> updates = mutation.getUpdates();
-
-    for (ColumnUpdate columnUpdate : updates) {
-      if (!isNumeric(columnUpdate.getValue()))
-        return VIOLATION_LIST;
-    }
-
-    return null;
-  }
-
-  @Override
-  public String getViolationDescription(short violationCode) {
-
-    switch (violationCode) {
-      case NON_NUMERIC_VALUE:
-        return "Value is not numeric";
-    }
-
-    return null;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/FileCount.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/FileCount.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/FileCount.java
deleted file mode 100644
index 111fae0..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/FileCount.java
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.dirlist;
-
-import java.util.Iterator;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ClientOnRequiredTable;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.hadoop.io.Text;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * Computes recursive counts over file system information and stores them back into the same Accumulo table. See docs/examples/README.dirlist for instructions.
- */
-public class FileCount {
-
-  private int entriesScanned;
-  private int inserts;
-
-  private Opts opts;
-  private ScannerOpts scanOpts;
-  private BatchWriterOpts bwOpts;
-
-  private static class CountValue {
-    int dirCount = 0;
-    int fileCount = 0;
-    int recursiveDirCount = 0;
-    int recusiveFileCount = 0;
-
-    void set(Value val) {
-      String sa[] = val.toString().split(",");
-      dirCount = Integer.parseInt(sa[0]);
-      fileCount = Integer.parseInt(sa[1]);
-      recursiveDirCount = Integer.parseInt(sa[2]);
-      recusiveFileCount = Integer.parseInt(sa[3]);
-    }
-
-    Value toValue() {
-      return new Value((dirCount + "," + fileCount + "," + recursiveDirCount + "," + recusiveFileCount).getBytes());
-    }
-
-    void incrementFiles() {
-      fileCount++;
-      recusiveFileCount++;
-    }
-
-    void incrementDirs() {
-      dirCount++;
-      recursiveDirCount++;
-    }
-
-    public void clear() {
-      dirCount = 0;
-      fileCount = 0;
-      recursiveDirCount = 0;
-      recusiveFileCount = 0;
-    }
-
-    public void incrementRecursive(CountValue other) {
-      recursiveDirCount += other.recursiveDirCount;
-      recusiveFileCount += other.recusiveFileCount;
-    }
-  }
-
-  private int findMaxDepth(Scanner scanner, int min, int max) {
-    int mid = min + (max - min) / 2;
-    return findMaxDepth(scanner, min, mid, max);
-  }
-
-  private int findMaxDepth(Scanner scanner, int min, int mid, int max) {
-    // check to see if the mid point exist
-    if (max < min)
-      return -1;
-
-    scanner.setRange(new Range(String.format("%03d", mid), true, String.format("%03d", mid + 1), false));
-
-    if (scanner.iterator().hasNext()) {
-      // this depth exist, check to see if a larger depth exist
-      int ret = findMaxDepth(scanner, mid + 1, max);
-      if (ret == -1)
-        return mid; // this must the max
-      else
-        return ret;
-    } else {
-      // this depth does not exist, look lower
-      return findMaxDepth(scanner, min, mid - 1);
-    }
-
-  }
-
-  private int findMaxDepth(Scanner scanner) {
-    // do binary search to find max depth
-    int origBatchSize = scanner.getBatchSize();
-    scanner.setBatchSize(100);
-    int depth = findMaxDepth(scanner, 0, 64, 999);
-    scanner.setBatchSize(origBatchSize);
-    return depth;
-  }
-
-  // find the count column and consume a row
-  private Entry<Key,Value> findCount(Entry<Key,Value> entry, Iterator<Entry<Key,Value>> iterator, CountValue cv) {
-
-    Key key = entry.getKey();
-    Text currentRow = key.getRow();
-
-    if (key.compareColumnQualifier(QueryUtil.COUNTS_COLQ) == 0)
-      cv.set(entry.getValue());
-
-    while (iterator.hasNext()) {
-      entry = iterator.next();
-      entriesScanned++;
-      key = entry.getKey();
-
-      if (key.compareRow(currentRow) != 0)
-        return entry;
-
-      if (key.compareColumnFamily(QueryUtil.DIR_COLF) == 0 && key.compareColumnQualifier(QueryUtil.COUNTS_COLQ) == 0) {
-        cv.set(entry.getValue());
-      }
-
-    }
-
-    return null;
-  }
-
-  private Entry<Key,Value> consumeRow(Entry<Key,Value> entry, Iterator<Entry<Key,Value>> iterator) {
-    Key key = entry.getKey();
-    Text currentRow = key.getRow();
-
-    while (iterator.hasNext()) {
-      entry = iterator.next();
-      entriesScanned++;
-      key = entry.getKey();
-
-      if (key.compareRow(currentRow) != 0)
-        return entry;
-    }
-
-    return null;
-  }
-
-  private String extractDir(Key key) {
-    String row = key.getRowData().toString();
-    return row.substring(3, row.lastIndexOf('/'));
-  }
-
-  private Mutation createMutation(int depth, String dir, CountValue countVal) {
-    Mutation m = new Mutation(String.format("%03d%s", depth, dir));
-    m.put(QueryUtil.DIR_COLF, QueryUtil.COUNTS_COLQ, opts.visibility, countVal.toValue());
-    return m;
-  }
-
-  private void calculateCounts(Scanner scanner, int depth, BatchWriter batchWriter) throws Exception {
-
-    scanner.setRange(new Range(String.format("%03d", depth), true, String.format("%03d", depth + 1), false));
-
-    CountValue countVal = new CountValue();
-
-    Iterator<Entry<Key,Value>> iterator = scanner.iterator();
-
-    String currentDir = null;
-
-    Entry<Key,Value> entry = null;
-    if (iterator.hasNext()) {
-      entry = iterator.next();
-      entriesScanned++;
-    }
-
-    while (entry != null) {
-      Key key = entry.getKey();
-
-      String dir = extractDir(key);
-
-      if (currentDir == null) {
-        currentDir = dir;
-      } else if (!currentDir.equals(dir)) {
-        batchWriter.addMutation(createMutation(depth - 1, currentDir, countVal));
-        inserts++;
-        currentDir = dir;
-        countVal.clear();
-      }
-
-      // process a whole row
-      if (key.compareColumnFamily(QueryUtil.DIR_COLF) == 0) {
-        CountValue tmpCount = new CountValue();
-        entry = findCount(entry, iterator, tmpCount);
-
-        if (tmpCount.dirCount == 0 && tmpCount.fileCount == 0) {
-          // in this case the higher depth will not insert anything if the
-          // dir has no children, so insert something here
-          Mutation m = new Mutation(key.getRow());
-          m.put(QueryUtil.DIR_COLF, QueryUtil.COUNTS_COLQ, opts.visibility, tmpCount.toValue());
-          batchWriter.addMutation(m);
-          inserts++;
-        }
-
-        countVal.incrementRecursive(tmpCount);
-        countVal.incrementDirs();
-      } else {
-        entry = consumeRow(entry, iterator);
-        countVal.incrementFiles();
-      }
-    }
-
-    if (currentDir != null) {
-      batchWriter.addMutation(createMutation(depth - 1, currentDir, countVal));
-      inserts++;
-    }
-  }
-
-  public FileCount(Opts opts, ScannerOpts scanOpts, BatchWriterOpts bwOpts) throws Exception {
-    this.opts = opts;
-    this.scanOpts = scanOpts;
-    this.bwOpts = bwOpts;
-  }
-
-  public void run() throws Exception {
-
-    entriesScanned = 0;
-    inserts = 0;
-
-    Connector conn = opts.getConnector();
-    Scanner scanner = conn.createScanner(opts.getTableName(), opts.auths);
-    scanner.setBatchSize(scanOpts.scanBatchSize);
-    BatchWriter bw = conn.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
-
-    long t1 = System.currentTimeMillis();
-
-    int depth = findMaxDepth(scanner);
-
-    long t2 = System.currentTimeMillis();
-
-    for (int d = depth; d > 0; d--) {
-      calculateCounts(scanner, d, bw);
-      // must flush so next depth can read what prev depth wrote
-      bw.flush();
-    }
-
-    bw.close();
-
-    long t3 = System.currentTimeMillis();
-
-    System.out.printf("Max depth              : %d%n", depth);
-    System.out.printf("Time to find max depth : %,d ms%n", (t2 - t1));
-    System.out.printf("Time to compute counts : %,d ms%n", (t3 - t2));
-    System.out.printf("Entries scanned        : %,d %n", entriesScanned);
-    System.out.printf("Counts inserted        : %,d %n", inserts);
-  }
-
-  public static class Opts extends ClientOnRequiredTable {
-    @Parameter(names = "--vis", description = "use a given visibility for the new counts", converter = VisibilityConverter.class)
-    ColumnVisibility visibility = new ColumnVisibility();
-  }
-
-  public static void main(String[] args) throws Exception {
-    Opts opts = new Opts();
-    ScannerOpts scanOpts = new ScannerOpts();
-    BatchWriterOpts bwOpts = new BatchWriterOpts();
-    String programName = FileCount.class.getName();
-    opts.parseArgs(programName, args, scanOpts, bwOpts);
-
-    FileCount fileCount = new FileCount(opts, scanOpts, bwOpts);
-    fileCount.run();
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/Ingest.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/Ingest.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/Ingest.java
deleted file mode 100644
index c0808fe..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/Ingest.java
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.dirlist;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ClientOpts;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.LongCombiner;
-import org.apache.accumulo.core.iterators.TypedValueCombiner.Encoder;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.examples.simple.filedata.ChunkCombiner;
-import org.apache.accumulo.examples.simple.filedata.FileDataIngest;
-import org.apache.hadoop.io.Text;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * Recursively lists the files and directories under a given path, ingests their names and file info into one Accumulo table, indexes the file names in a
- * separate table, and the file data into a third table. See docs/examples/README.dirlist for instructions.
- */
-public class Ingest {
-  static final Value nullValue = new Value(new byte[0]);
-  public static final String LENGTH_CQ = "length";
-  public static final String HIDDEN_CQ = "hidden";
-  public static final String EXEC_CQ = "exec";
-  public static final String LASTMOD_CQ = "lastmod";
-  public static final String HASH_CQ = "md5";
-  public static final Encoder<Long> encoder = LongCombiner.FIXED_LEN_ENCODER;
-
-  public static Mutation buildMutation(ColumnVisibility cv, String path, boolean isDir, boolean isHidden, boolean canExec, long length, long lastmod,
-      String hash) {
-    if (path.equals("/"))
-      path = "";
-    Mutation m = new Mutation(QueryUtil.getRow(path));
-    Text colf = null;
-    if (isDir)
-      colf = QueryUtil.DIR_COLF;
-    else
-      colf = new Text(encoder.encode(Long.MAX_VALUE - lastmod));
-    m.put(colf, new Text(LENGTH_CQ), cv, new Value(Long.toString(length).getBytes()));
-    m.put(colf, new Text(HIDDEN_CQ), cv, new Value(Boolean.toString(isHidden).getBytes()));
-    m.put(colf, new Text(EXEC_CQ), cv, new Value(Boolean.toString(canExec).getBytes()));
-    m.put(colf, new Text(LASTMOD_CQ), cv, new Value(Long.toString(lastmod).getBytes()));
-    if (hash != null && hash.length() > 0)
-      m.put(colf, new Text(HASH_CQ), cv, new Value(hash.getBytes()));
-    return m;
-  }
-
-  private static void ingest(File src, ColumnVisibility cv, BatchWriter dirBW, BatchWriter indexBW, FileDataIngest fdi, BatchWriter data) throws Exception {
-    // build main table entry
-    String path = null;
-    try {
-      path = src.getCanonicalPath();
-    } catch (IOException e) {
-      path = src.getAbsolutePath();
-    }
-    System.out.println(path);
-
-    String hash = null;
-    if (!src.isDirectory()) {
-      try {
-        hash = fdi.insertFileData(path, data);
-      } catch (Exception e) {
-        // if something goes wrong, just skip this one
-        return;
-      }
-    }
-
-    dirBW.addMutation(buildMutation(cv, path, src.isDirectory(), src.isHidden(), src.canExecute(), src.length(), src.lastModified(), hash));
-
-    // build index table entries
-    Text row = QueryUtil.getForwardIndex(path);
-    if (row != null) {
-      Text p = new Text(QueryUtil.getRow(path));
-      Mutation m = new Mutation(row);
-      m.put(QueryUtil.INDEX_COLF, p, cv, nullValue);
-      indexBW.addMutation(m);
-
-      row = QueryUtil.getReverseIndex(path);
-      m = new Mutation(row);
-      m.put(QueryUtil.INDEX_COLF, p, cv, nullValue);
-      indexBW.addMutation(m);
-    }
-  }
-
-  private static void recurse(File src, ColumnVisibility cv, BatchWriter dirBW, BatchWriter indexBW, FileDataIngest fdi, BatchWriter data) throws Exception {
-    // ingest this File
-    ingest(src, cv, dirBW, indexBW, fdi, data);
-    // recurse into subdirectories
-    if (src.isDirectory()) {
-      File[] files = src.listFiles();
-      if (files == null)
-        return;
-      for (File child : files) {
-        recurse(child, cv, dirBW, indexBW, fdi, data);
-      }
-    }
-  }
-
-  static class Opts extends ClientOpts {
-    @Parameter(names = "--dirTable", description = "a table to hold the directory information")
-    String nameTable = "dirTable";
-    @Parameter(names = "--indexTable", description = "an index over the ingested data")
-    String indexTable = "indexTable";
-    @Parameter(names = "--dataTable", description = "the file data, chunked into parts")
-    String dataTable = "dataTable";
-    @Parameter(names = "--vis", description = "the visibility to mark the data", converter = VisibilityConverter.class)
-    ColumnVisibility visibility = new ColumnVisibility();
-    @Parameter(names = "--chunkSize", description = "the size of chunks when breaking down files")
-    int chunkSize = 100000;
-    @Parameter(description = "<dir> { <dir> ... }")
-    List<String> directories = new ArrayList<>();
-  }
-
-  public static void main(String[] args) throws Exception {
-    Opts opts = new Opts();
-    BatchWriterOpts bwOpts = new BatchWriterOpts();
-    opts.parseArgs(Ingest.class.getName(), args, bwOpts);
-
-    Connector conn = opts.getConnector();
-    if (!conn.tableOperations().exists(opts.nameTable))
-      conn.tableOperations().create(opts.nameTable);
-    if (!conn.tableOperations().exists(opts.indexTable))
-      conn.tableOperations().create(opts.indexTable);
-    if (!conn.tableOperations().exists(opts.dataTable)) {
-      conn.tableOperations().create(opts.dataTable);
-      conn.tableOperations().attachIterator(opts.dataTable, new IteratorSetting(1, ChunkCombiner.class));
-    }
-
-    BatchWriter dirBW = conn.createBatchWriter(opts.nameTable, bwOpts.getBatchWriterConfig());
-    BatchWriter indexBW = conn.createBatchWriter(opts.indexTable, bwOpts.getBatchWriterConfig());
-    BatchWriter dataBW = conn.createBatchWriter(opts.dataTable, bwOpts.getBatchWriterConfig());
-    FileDataIngest fdi = new FileDataIngest(opts.chunkSize, opts.visibility);
-    for (String dir : opts.directories) {
-      recurse(new File(dir), opts.visibility, dirBW, indexBW, fdi, dataBW);
-
-      // fill in parent directory info
-      int slashIndex = -1;
-      while ((slashIndex = dir.lastIndexOf("/")) > 0) {
-        dir = dir.substring(0, slashIndex);
-        ingest(new File(dir), opts.visibility, dirBW, indexBW, fdi, dataBW);
-      }
-    }
-    ingest(new File("/"), opts.visibility, dirBW, indexBW, fdi, dataBW);
-
-    dirBW.close();
-    indexBW.close();
-    dataBW.close();
-  }
-}


[3/7] accumulo git commit: ACCUMULO-4511 Removed Accumulo Examples

Posted by mw...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RowHash.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RowHash.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RowHash.java
deleted file mode 100644
index fab2532..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RowHash.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.mapreduce;
-
-import java.io.IOException;
-import java.util.Base64;
-import java.util.Collections;
-
-import org.apache.accumulo.core.cli.MapReduceClientOnRequiredTable;
-import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
-import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.util.Pair;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.io.MD5Hash;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-
-import com.beust.jcommander.Parameter;
-
-public class RowHash extends Configured implements Tool {
-  /**
-   * The Mapper class that given a row number, will generate the appropriate output line.
-   */
-  public static class HashDataMapper extends Mapper<Key,Value,Text,Mutation> {
-    @Override
-    public void map(Key row, Value data, Context context) throws IOException, InterruptedException {
-      Mutation m = new Mutation(row.getRow());
-      m.put(new Text("cf-HASHTYPE"), new Text("cq-MD5BASE64"), new Value(Base64.getEncoder().encode(MD5Hash.digest(data.toString()).getDigest())));
-      context.write(null, m);
-      context.progress();
-    }
-
-    @Override
-    public void setup(Context job) {}
-  }
-
-  private static class Opts extends MapReduceClientOnRequiredTable {
-    @Parameter(names = "--column", required = true)
-    String column;
-  }
-
-  @Override
-  public int run(String[] args) throws Exception {
-    Job job = Job.getInstance(getConf());
-    job.setJobName(this.getClass().getName());
-    job.setJarByClass(this.getClass());
-    Opts opts = new Opts();
-    opts.parseArgs(RowHash.class.getName(), args);
-    job.setInputFormatClass(AccumuloInputFormat.class);
-    opts.setAccumuloConfigs(job);
-
-    String col = opts.column;
-    int idx = col.indexOf(":");
-    Text cf = new Text(idx < 0 ? col : col.substring(0, idx));
-    Text cq = idx < 0 ? null : new Text(col.substring(idx + 1));
-    if (cf.getLength() > 0)
-      AccumuloInputFormat.fetchColumns(job, Collections.singleton(new Pair<>(cf, cq)));
-
-    job.setMapperClass(HashDataMapper.class);
-    job.setMapOutputKeyClass(Text.class);
-    job.setMapOutputValueClass(Mutation.class);
-
-    job.setNumReduceTasks(0);
-
-    job.setOutputFormatClass(AccumuloOutputFormat.class);
-
-    job.waitForCompletion(true);
-    return job.isSuccessful() ? 0 : 1;
-  }
-
-  public static void main(String[] args) throws Exception {
-    ToolRunner.run(new Configuration(), new RowHash(), args);
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java
deleted file mode 100644
index 96603ad..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TableToFile.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.mapreduce;
-
-import java.io.IOException;
-import java.util.AbstractMap.SimpleImmutableEntry;
-import java.util.HashSet;
-import java.util.Map;
-
-import org.apache.accumulo.core.cli.MapReduceClientOnRequiredTable;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.util.Pair;
-import org.apache.accumulo.core.util.format.DefaultFormatter;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.NullWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * Takes a table and outputs the specified column to a set of part files on hdfs
- * {@code accumulo accumulo.examples.mapreduce.TableToFile <username> <password> <tablename> <column> <hdfs-output-path>}
- */
-public class TableToFile extends Configured implements Tool {
-
-  static class Opts extends MapReduceClientOnRequiredTable {
-    @Parameter(names = "--output", description = "output directory", required = true)
-    String output;
-    @Parameter(names = "--columns", description = "columns to extract, in cf:cq{,cf:cq,...} form")
-    String columns = "";
-  }
-
-  /**
-   * The Mapper class that given a row number, will generate the appropriate output line.
-   */
-  public static class TTFMapper extends Mapper<Key,Value,NullWritable,Text> {
-    @Override
-    public void map(Key row, Value data, Context context) throws IOException, InterruptedException {
-      Map.Entry<Key,Value> entry = new SimpleImmutableEntry<>(row, data);
-      context.write(NullWritable.get(), new Text(DefaultFormatter.formatEntry(entry, false)));
-      context.setStatus("Outputed Value");
-    }
-  }
-
-  @Override
-  public int run(String[] args) throws IOException, InterruptedException, ClassNotFoundException, AccumuloSecurityException {
-    Job job = Job.getInstance(getConf());
-    job.setJobName(this.getClass().getSimpleName() + "_" + System.currentTimeMillis());
-    job.setJarByClass(this.getClass());
-    Opts opts = new Opts();
-    opts.parseArgs(getClass().getName(), args);
-
-    job.setInputFormatClass(AccumuloInputFormat.class);
-    opts.setAccumuloConfigs(job);
-
-    HashSet<Pair<Text,Text>> columnsToFetch = new HashSet<>();
-    for (String col : opts.columns.split(",")) {
-      int idx = col.indexOf(":");
-      Text cf = new Text(idx < 0 ? col : col.substring(0, idx));
-      Text cq = idx < 0 ? null : new Text(col.substring(idx + 1));
-      if (cf.getLength() > 0)
-        columnsToFetch.add(new Pair<>(cf, cq));
-    }
-    if (!columnsToFetch.isEmpty())
-      AccumuloInputFormat.fetchColumns(job, columnsToFetch);
-
-    job.setMapperClass(TTFMapper.class);
-    job.setMapOutputKeyClass(NullWritable.class);
-    job.setMapOutputValueClass(Text.class);
-
-    job.setNumReduceTasks(0);
-
-    job.setOutputFormatClass(TextOutputFormat.class);
-    TextOutputFormat.setOutputPath(job, new Path(opts.output));
-
-    job.waitForCompletion(true);
-    return job.isSuccessful() ? 0 : 1;
-  }
-
-  /**
-   *
-   * @param args
-   *          instanceName zookeepers username password table columns outputpath
-   */
-  public static void main(String[] args) throws Exception {
-    ToolRunner.run(new Configuration(), new TableToFile(), args);
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TeraSortIngest.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TeraSortIngest.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TeraSortIngest.java
deleted file mode 100644
index b0b5177..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TeraSortIngest.java
+++ /dev/null
@@ -1,399 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.accumulo.examples.simple.mapreduce;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-
-import org.apache.accumulo.core.cli.MapReduceClientOnRequiredTable;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.NullWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableUtils;
-import org.apache.hadoop.mapreduce.InputFormat;
-import org.apache.hadoop.mapreduce.InputSplit;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.JobContext;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.mapreduce.RecordReader;
-import org.apache.hadoop.mapreduce.TaskAttemptContext;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * Generate the *almost* official terasort input data set. (See below) The user specifies the number of rows and the output directory and this class runs a
- * map/reduce program to generate the data. The format of the data is:
- * <ul>
- * <li>(10 bytes key) (10 bytes rowid) (78 bytes filler) \r \n
- * <li>The keys are random characters from the set ' ' .. '~'.
- * <li>The rowid is the right justified row id as a int.
- * <li>The filler consists of 7 runs of 10 characters from 'A' to 'Z'.
- * </ul>
- *
- * This TeraSort is slightly modified to allow for variable length key sizes and value sizes. The row length isn't variable. To generate a terabyte of data in
- * the same way TeraSort does use 10000000000 rows and 10/10 byte key length and 78/78 byte value length. Along with the 10 byte row id and \r\n this gives you
- * 100 byte row * 10000000000 rows = 1tb. Min/Max ranges for key and value parameters are inclusive/inclusive respectively.
- *
- *
- */
-public class TeraSortIngest extends Configured implements Tool {
-  /**
-   * An input format that assigns ranges of longs to each mapper.
-   */
-  static class RangeInputFormat extends InputFormat<LongWritable,NullWritable> {
-    /**
-     * An input split consisting of a range on numbers.
-     */
-    static class RangeInputSplit extends InputSplit implements Writable {
-      long firstRow;
-      long rowCount;
-
-      public RangeInputSplit() {}
-
-      public RangeInputSplit(long offset, long length) {
-        firstRow = offset;
-        rowCount = length;
-      }
-
-      @Override
-      public long getLength() throws IOException {
-        return 0;
-      }
-
-      @Override
-      public String[] getLocations() throws IOException {
-        return new String[] {};
-      }
-
-      @Override
-      public void readFields(DataInput in) throws IOException {
-        firstRow = WritableUtils.readVLong(in);
-        rowCount = WritableUtils.readVLong(in);
-      }
-
-      @Override
-      public void write(DataOutput out) throws IOException {
-        WritableUtils.writeVLong(out, firstRow);
-        WritableUtils.writeVLong(out, rowCount);
-      }
-    }
-
-    /**
-     * A record reader that will generate a range of numbers.
-     */
-    static class RangeRecordReader extends RecordReader<LongWritable,NullWritable> {
-      long startRow;
-      long finishedRows;
-      long totalRows;
-
-      public RangeRecordReader(RangeInputSplit split) {
-        startRow = split.firstRow;
-        finishedRows = 0;
-        totalRows = split.rowCount;
-      }
-
-      @Override
-      public void close() throws IOException {}
-
-      @Override
-      public float getProgress() throws IOException {
-        return finishedRows / (float) totalRows;
-      }
-
-      @Override
-      public LongWritable getCurrentKey() throws IOException, InterruptedException {
-        return new LongWritable(startRow + finishedRows);
-      }
-
-      @Override
-      public NullWritable getCurrentValue() throws IOException, InterruptedException {
-        return NullWritable.get();
-      }
-
-      @Override
-      public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {}
-
-      @Override
-      public boolean nextKeyValue() throws IOException, InterruptedException {
-        if (finishedRows < totalRows) {
-          ++finishedRows;
-          return true;
-        }
-        return false;
-      }
-    }
-
-    @Override
-    public RecordReader<LongWritable,NullWritable> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException {
-      // reporter.setStatus("Creating record reader");
-      return new RangeRecordReader((RangeInputSplit) split);
-    }
-
-    /**
-     * Create the desired number of splits, dividing the number of rows between the mappers.
-     */
-    @Override
-    public List<InputSplit> getSplits(JobContext job) {
-      long totalRows = job.getConfiguration().getLong(NUMROWS, 0);
-      int numSplits = job.getConfiguration().getInt(NUMSPLITS, 1);
-      long rowsPerSplit = totalRows / numSplits;
-      System.out.println("Generating " + totalRows + " using " + numSplits + " maps with step of " + rowsPerSplit);
-      ArrayList<InputSplit> splits = new ArrayList<>(numSplits);
-      long currentRow = 0;
-      for (int split = 0; split < numSplits - 1; ++split) {
-        splits.add(new RangeInputSplit(currentRow, rowsPerSplit));
-        currentRow += rowsPerSplit;
-      }
-      splits.add(new RangeInputSplit(currentRow, totalRows - currentRow));
-      System.out.println("Done Generating.");
-      return splits;
-    }
-
-  }
-
-  private static String NUMSPLITS = "terasort.overridesplits";
-  private static String NUMROWS = "terasort.numrows";
-
-  static class RandomGenerator {
-    private long seed = 0;
-    private static final long mask32 = (1l << 32) - 1;
-    /**
-     * The number of iterations separating the precomputed seeds.
-     */
-    private static final int seedSkip = 128 * 1024 * 1024;
-    /**
-     * The precomputed seed values after every seedSkip iterations. There should be enough values so that a 2**32 iterations are covered.
-     */
-    private static final long[] seeds = new long[] {0L, 4160749568L, 4026531840L, 3892314112L, 3758096384L, 3623878656L, 3489660928L, 3355443200L, 3221225472L,
-        3087007744L, 2952790016L, 2818572288L, 2684354560L, 2550136832L, 2415919104L, 2281701376L, 2147483648L, 2013265920L, 1879048192L, 1744830464L,
-        1610612736L, 1476395008L, 1342177280L, 1207959552L, 1073741824L, 939524096L, 805306368L, 671088640L, 536870912L, 402653184L, 268435456L, 134217728L,};
-
-    /**
-     * Start the random number generator on the given iteration.
-     *
-     * @param initalIteration
-     *          the iteration number to start on
-     */
-    RandomGenerator(long initalIteration) {
-      int baseIndex = (int) ((initalIteration & mask32) / seedSkip);
-      seed = seeds[baseIndex];
-      for (int i = 0; i < initalIteration % seedSkip; ++i) {
-        next();
-      }
-    }
-
-    RandomGenerator() {
-      this(0);
-    }
-
-    long next() {
-      seed = (seed * 3141592621l + 663896637) & mask32;
-      return seed;
-    }
-  }
-
-  /**
-   * The Mapper class that given a row number, will generate the appropriate output line.
-   */
-  public static class SortGenMapper extends Mapper<LongWritable,NullWritable,Text,Mutation> {
-    private Text tableName = null;
-    private int minkeylength = 0;
-    private int maxkeylength = 0;
-    private int minvaluelength = 0;
-    private int maxvaluelength = 0;
-
-    private Text key = new Text();
-    private Text value = new Text();
-    private RandomGenerator rand;
-    private byte[] keyBytes; // = new byte[12];
-    private byte[] spaces = "          ".getBytes();
-    private byte[][] filler = new byte[26][];
-    {
-      for (int i = 0; i < 26; ++i) {
-        filler[i] = new byte[10];
-        for (int j = 0; j < 10; ++j) {
-          filler[i][j] = (byte) ('A' + i);
-        }
-      }
-    }
-
-    /**
-     * Add a random key to the text
-     */
-    private Random random = new Random();
-
-    private void addKey() {
-      int range = random.nextInt(maxkeylength - minkeylength + 1);
-      int keylen = range + minkeylength;
-      int keyceil = keylen + (4 - (keylen % 4));
-      keyBytes = new byte[keyceil];
-
-      long temp = 0;
-      for (int i = 0; i < keyceil / 4; i++) {
-        temp = rand.next() / 52;
-        keyBytes[3 + 4 * i] = (byte) (' ' + (temp % 95));
-        temp /= 95;
-        keyBytes[2 + 4 * i] = (byte) (' ' + (temp % 95));
-        temp /= 95;
-        keyBytes[1 + 4 * i] = (byte) (' ' + (temp % 95));
-        temp /= 95;
-        keyBytes[4 * i] = (byte) (' ' + (temp % 95));
-      }
-      key.set(keyBytes, 0, keylen);
-    }
-
-    /**
-     * Add the rowid to the row.
-     */
-    private Text getRowIdString(long rowId) {
-      Text paddedRowIdString = new Text();
-      byte[] rowid = Integer.toString((int) rowId).getBytes();
-      int padSpace = 10 - rowid.length;
-      if (padSpace > 0) {
-        paddedRowIdString.append(spaces, 0, 10 - rowid.length);
-      }
-      paddedRowIdString.append(rowid, 0, Math.min(rowid.length, 10));
-      return paddedRowIdString;
-    }
-
-    /**
-     * Add the required filler bytes. Each row consists of 7 blocks of 10 characters and 1 block of 8 characters.
-     *
-     * @param rowId
-     *          the current row number
-     */
-    private void addFiller(long rowId) {
-      int base = (int) ((rowId * 8) % 26);
-
-      // Get Random var
-      Random random = new Random(rand.seed);
-
-      int range = random.nextInt(maxvaluelength - minvaluelength + 1);
-      int valuelen = range + minvaluelength;
-
-      while (valuelen > 10) {
-        value.append(filler[(base + valuelen) % 26], 0, 10);
-        valuelen -= 10;
-      }
-
-      if (valuelen > 0)
-        value.append(filler[(base + valuelen) % 26], 0, valuelen);
-    }
-
-    @Override
-    public void map(LongWritable row, NullWritable ignored, Context context) throws IOException, InterruptedException {
-      context.setStatus("Entering");
-      long rowId = row.get();
-      if (rand == null) {
-        // we use 3 random numbers per a row
-        rand = new RandomGenerator(rowId * 3);
-      }
-      addKey();
-      value.clear();
-      // addRowId(rowId);
-      addFiller(rowId);
-
-      // New
-      Mutation m = new Mutation(key);
-      m.put(new Text("c"), // column family
-          getRowIdString(rowId), // column qual
-          new Value(value.toString().getBytes())); // data
-
-      context.setStatus("About to add to accumulo");
-      context.write(tableName, m);
-      context.setStatus("Added to accumulo " + key.toString());
-    }
-
-    @Override
-    public void setup(Context job) {
-      minkeylength = job.getConfiguration().getInt("cloudgen.minkeylength", 0);
-      maxkeylength = job.getConfiguration().getInt("cloudgen.maxkeylength", 0);
-      minvaluelength = job.getConfiguration().getInt("cloudgen.minvaluelength", 0);
-      maxvaluelength = job.getConfiguration().getInt("cloudgen.maxvaluelength", 0);
-      tableName = new Text(job.getConfiguration().get("cloudgen.tablename"));
-    }
-  }
-
-  public static void main(String[] args) throws Exception {
-    ToolRunner.run(new Configuration(), new TeraSortIngest(), args);
-  }
-
-  static class Opts extends MapReduceClientOnRequiredTable {
-    @Parameter(names = "--count", description = "number of rows to ingest", required = true)
-    long numRows;
-    @Parameter(names = {"-nk", "--minKeySize"}, description = "miniumum key size", required = true)
-    int minKeyLength;
-    @Parameter(names = {"-xk", "--maxKeySize"}, description = "maximum key size", required = true)
-    int maxKeyLength;
-    @Parameter(names = {"-nv", "--minValueSize"}, description = "minimum key size", required = true)
-    int minValueLength;
-    @Parameter(names = {"-xv", "--maxValueSize"}, description = "maximum key size", required = true)
-    int maxValueLength;
-    @Parameter(names = "--splits", description = "number of splits to create in the table")
-    int splits = 0;
-  }
-
-  @Override
-  public int run(String[] args) throws Exception {
-    Job job = Job.getInstance(getConf());
-    job.setJobName("TeraSortCloud");
-    job.setJarByClass(this.getClass());
-    Opts opts = new Opts();
-    opts.parseArgs(TeraSortIngest.class.getName(), args);
-
-    job.setInputFormatClass(RangeInputFormat.class);
-    job.setMapperClass(SortGenMapper.class);
-    job.setMapOutputKeyClass(Text.class);
-    job.setMapOutputValueClass(Mutation.class);
-
-    job.setNumReduceTasks(0);
-
-    job.setOutputFormatClass(AccumuloOutputFormat.class);
-    opts.setAccumuloConfigs(job);
-    BatchWriterConfig bwConfig = new BatchWriterConfig().setMaxMemory(10L * 1000 * 1000);
-    AccumuloOutputFormat.setBatchWriterOptions(job, bwConfig);
-
-    Configuration conf = job.getConfiguration();
-    conf.setLong(NUMROWS, opts.numRows);
-    conf.setInt("cloudgen.minkeylength", opts.minKeyLength);
-    conf.setInt("cloudgen.maxkeylength", opts.maxKeyLength);
-    conf.setInt("cloudgen.minvaluelength", opts.minValueLength);
-    conf.setInt("cloudgen.maxvaluelength", opts.maxValueLength);
-    conf.set("cloudgen.tablename", opts.getTableName());
-
-    if (args.length > 10)
-      conf.setInt(NUMSPLITS, opts.splits);
-
-    job.waitForCompletion(true);
-    return job.isSuccessful() ? 0 : 1;
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TokenFileWordCount.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TokenFileWordCount.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TokenFileWordCount.java
deleted file mode 100644
index 74c40a5..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/TokenFileWordCount.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.mapreduce;
-
-import java.io.IOException;
-
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * A simple map reduce job that inserts word counts into accumulo. See the README for instructions on how to run this. This version does not use the ClientOpts
- * class to parse arguments as an example of using AccumuloInputFormat and AccumuloOutputFormat directly. See README.mapred for more details.
- *
- */
-public class TokenFileWordCount extends Configured implements Tool {
-
-  private static final Logger log = LoggerFactory.getLogger(TokenFileWordCount.class);
-
-  public static class MapClass extends Mapper<LongWritable,Text,Text,Mutation> {
-    @Override
-    public void map(LongWritable key, Text value, Context output) throws IOException {
-      String[] words = value.toString().split("\\s+");
-
-      for (String word : words) {
-
-        Mutation mutation = new Mutation(new Text(word));
-        mutation.put(new Text("count"), new Text("20080906"), new Value("1".getBytes()));
-
-        try {
-          output.write(null, mutation);
-        } catch (InterruptedException e) {
-          log.error("Could not write to Context.", e);
-        }
-      }
-    }
-  }
-
-  @Override
-  public int run(String[] args) throws Exception {
-
-    String instance = args[0];
-    String zookeepers = args[1];
-    String user = args[2];
-    String tokenFile = args[3];
-    String input = args[4];
-    String tableName = args[5];
-
-    Job job = Job.getInstance(getConf());
-    job.setJobName(TokenFileWordCount.class.getName());
-    job.setJarByClass(this.getClass());
-
-    job.setInputFormatClass(TextInputFormat.class);
-    TextInputFormat.setInputPaths(job, input);
-
-    job.setMapperClass(MapClass.class);
-
-    job.setNumReduceTasks(0);
-
-    job.setOutputFormatClass(AccumuloOutputFormat.class);
-    job.setOutputKeyClass(Text.class);
-    job.setOutputValueClass(Mutation.class);
-
-    // AccumuloInputFormat not used here, but it uses the same functions.
-    AccumuloOutputFormat.setZooKeeperInstance(job, ClientConfiguration.loadDefault().withInstance(instance).withZkHosts(zookeepers));
-    AccumuloOutputFormat.setConnectorInfo(job, user, tokenFile);
-    AccumuloOutputFormat.setCreateTables(job, true);
-    AccumuloOutputFormat.setDefaultTableName(job, tableName);
-
-    job.waitForCompletion(true);
-    return 0;
-  }
-
-  public static void main(String[] args) throws Exception {
-    int res = ToolRunner.run(new Configuration(), new TokenFileWordCount(), args);
-    System.exit(res);
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/UniqueColumns.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/UniqueColumns.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/UniqueColumns.java
deleted file mode 100644
index 9cdf8d0..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/UniqueColumns.java
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.mapreduce;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.HashSet;
-
-import org.apache.accumulo.core.cli.MapReduceClientOnRequiredTable;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
-import org.apache.accumulo.core.data.ByteSequence;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.mapreduce.Reducer;
-import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * A simple map reduce job that computes the unique column families and column qualifiers in a table. This example shows one way to run against an offline
- * table.
- */
-public class UniqueColumns extends Configured implements Tool {
-
-  private static final Text EMPTY = new Text();
-
-  public static class UMapper extends Mapper<Key,Value,Text,Text> {
-    private Text temp = new Text();
-    private static final Text CF = new Text("cf:");
-    private static final Text CQ = new Text("cq:");
-
-    @Override
-    public void map(Key key, Value value, Context context) throws IOException, InterruptedException {
-      temp.set(CF);
-      ByteSequence cf = key.getColumnFamilyData();
-      temp.append(cf.getBackingArray(), cf.offset(), cf.length());
-      context.write(temp, EMPTY);
-
-      temp.set(CQ);
-      ByteSequence cq = key.getColumnQualifierData();
-      temp.append(cq.getBackingArray(), cq.offset(), cq.length());
-      context.write(temp, EMPTY);
-    }
-  }
-
-  public static class UReducer extends Reducer<Text,Text,Text,Text> {
-    @Override
-    public void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
-      context.write(key, EMPTY);
-    }
-  }
-
-  static class Opts extends MapReduceClientOnRequiredTable {
-    @Parameter(names = "--output", description = "output directory")
-    String output;
-    @Parameter(names = "--reducers", description = "number of reducers to use", required = true)
-    int reducers;
-    @Parameter(names = "--offline", description = "run against an offline table")
-    boolean offline = false;
-  }
-
-  @Override
-  public int run(String[] args) throws Exception {
-    Opts opts = new Opts();
-    opts.parseArgs(UniqueColumns.class.getName(), args);
-
-    String jobName = this.getClass().getSimpleName() + "_" + System.currentTimeMillis();
-
-    Job job = Job.getInstance(getConf());
-    job.setJobName(jobName);
-    job.setJarByClass(this.getClass());
-
-    String clone = opts.getTableName();
-    Connector conn = null;
-
-    opts.setAccumuloConfigs(job);
-
-    if (opts.offline) {
-      /*
-       * this example clones the table and takes it offline. If you plan to run map reduce jobs over a table many times, it may be more efficient to compact the
-       * table, clone it, and then keep using the same clone as input for map reduce.
-       */
-
-      conn = opts.getConnector();
-      clone = opts.getTableName() + "_" + jobName;
-      conn.tableOperations().clone(opts.getTableName(), clone, true, new HashMap<String,String>(), new HashSet<String>());
-      conn.tableOperations().offline(clone);
-
-      AccumuloInputFormat.setOfflineTableScan(job, true);
-      AccumuloInputFormat.setInputTableName(job, clone);
-    }
-
-    job.setInputFormatClass(AccumuloInputFormat.class);
-
-    job.setMapperClass(UMapper.class);
-    job.setMapOutputKeyClass(Text.class);
-    job.setMapOutputValueClass(Text.class);
-
-    job.setCombinerClass(UReducer.class);
-    job.setReducerClass(UReducer.class);
-
-    job.setNumReduceTasks(opts.reducers);
-
-    job.setOutputFormatClass(TextOutputFormat.class);
-    TextOutputFormat.setOutputPath(job, new Path(opts.output));
-
-    job.waitForCompletion(true);
-
-    if (opts.offline) {
-      conn.tableOperations().delete(clone);
-    }
-
-    return job.isSuccessful() ? 0 : 1;
-  }
-
-  public static void main(String[] args) throws Exception {
-    int res = ToolRunner.run(new Configuration(), new UniqueColumns(), args);
-    System.exit(res);
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/WordCount.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/WordCount.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/WordCount.java
deleted file mode 100644
index 604d05d..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/WordCount.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.mapreduce;
-
-import java.io.IOException;
-
-import org.apache.accumulo.core.cli.MapReduceClientOnRequiredTable;
-import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * A simple map reduce job that inserts word counts into accumulo. See the README for instructions on how to run this.
- *
- */
-public class WordCount extends Configured implements Tool {
-
-  private static final Logger log = LoggerFactory.getLogger(WordCount.class);
-
-  static class Opts extends MapReduceClientOnRequiredTable {
-    @Parameter(names = "--input", description = "input directory")
-    String inputDirectory;
-  }
-
-  public static class MapClass extends Mapper<LongWritable,Text,Text,Mutation> {
-    @Override
-    public void map(LongWritable key, Text value, Context output) throws IOException {
-      String[] words = value.toString().split("\\s+");
-
-      for (String word : words) {
-
-        Mutation mutation = new Mutation(new Text(word));
-        mutation.put(new Text("count"), new Text("20080906"), new Value("1".getBytes()));
-
-        try {
-          output.write(null, mutation);
-        } catch (InterruptedException e) {
-          log.error("Could not write mutation to Context.", e);
-        }
-      }
-    }
-  }
-
-  @Override
-  public int run(String[] args) throws Exception {
-    Opts opts = new Opts();
-    opts.parseArgs(WordCount.class.getName(), args);
-
-    Job job = Job.getInstance(getConf());
-    job.setJobName(WordCount.class.getName());
-    job.setJarByClass(this.getClass());
-
-    job.setInputFormatClass(TextInputFormat.class);
-    TextInputFormat.setInputPaths(job, new Path(opts.inputDirectory));
-
-    job.setMapperClass(MapClass.class);
-
-    job.setNumReduceTasks(0);
-
-    job.setOutputFormatClass(AccumuloOutputFormat.class);
-    job.setOutputKeyClass(Text.class);
-    job.setOutputValueClass(Mutation.class);
-    opts.setAccumuloConfigs(job);
-    job.waitForCompletion(true);
-    return 0;
-  }
-
-  public static void main(String[] args) throws Exception {
-    ToolRunner.run(new Configuration(), new WordCount(), args);
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java
deleted file mode 100644
index 42ec5ea..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/BulkIngestExample.java
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.mapreduce.bulk;
-
-import java.io.BufferedOutputStream;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.Base64;
-import java.util.Collection;
-
-import org.apache.accumulo.core.cli.MapReduceClientOnRequiredTable;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.mapreduce.AccumuloFileOutputFormat;
-import org.apache.accumulo.core.client.mapreduce.lib.partition.RangePartitioner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.util.TextUtil;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FsShell;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.mapreduce.Reducer;
-import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * Example map reduce job that bulk ingest data into an accumulo table. The expected input is text files containing tab separated key value pairs on each line.
- */
-public class BulkIngestExample extends Configured implements Tool {
-  public static class MapClass extends Mapper<LongWritable,Text,Text,Text> {
-    private Text outputKey = new Text();
-    private Text outputValue = new Text();
-
-    @Override
-    public void map(LongWritable key, Text value, Context output) throws IOException, InterruptedException {
-      // split on tab
-      int index = -1;
-      for (int i = 0; i < value.getLength(); i++) {
-        if (value.getBytes()[i] == '\t') {
-          index = i;
-          break;
-        }
-      }
-
-      if (index > 0) {
-        outputKey.set(value.getBytes(), 0, index);
-        outputValue.set(value.getBytes(), index + 1, value.getLength() - (index + 1));
-        output.write(outputKey, outputValue);
-      }
-    }
-  }
-
-  public static class ReduceClass extends Reducer<Text,Text,Key,Value> {
-    @Override
-    public void reduce(Text key, Iterable<Text> values, Context output) throws IOException, InterruptedException {
-      // be careful with the timestamp... if you run on a cluster
-      // where the time is whacked you may not see your updates in
-      // accumulo if there is already an existing value with a later
-      // timestamp in accumulo... so make sure ntp is running on the
-      // cluster or consider using logical time... one options is
-      // to let accumulo set the time
-      long timestamp = System.currentTimeMillis();
-
-      int index = 0;
-      for (Text value : values) {
-        Key outputKey = new Key(key, new Text("colf"), new Text(String.format("col_%07d", index)), timestamp);
-        index++;
-
-        Value outputValue = new Value(value.getBytes(), 0, value.getLength());
-        output.write(outputKey, outputValue);
-      }
-    }
-  }
-
-  static class Opts extends MapReduceClientOnRequiredTable {
-    @Parameter(names = "--inputDir", required = true)
-    String inputDir;
-    @Parameter(names = "--workDir", required = true)
-    String workDir;
-  }
-
-  @Override
-  public int run(String[] args) {
-    Opts opts = new Opts();
-    opts.parseArgs(BulkIngestExample.class.getName(), args);
-
-    Configuration conf = getConf();
-    PrintStream out = null;
-    try {
-      Job job = Job.getInstance(conf);
-      job.setJobName("bulk ingest example");
-      job.setJarByClass(this.getClass());
-
-      job.setInputFormatClass(TextInputFormat.class);
-
-      job.setMapperClass(MapClass.class);
-      job.setMapOutputKeyClass(Text.class);
-      job.setMapOutputValueClass(Text.class);
-
-      job.setReducerClass(ReduceClass.class);
-      job.setOutputFormatClass(AccumuloFileOutputFormat.class);
-      opts.setAccumuloConfigs(job);
-
-      Connector connector = opts.getConnector();
-
-      TextInputFormat.setInputPaths(job, new Path(opts.inputDir));
-      AccumuloFileOutputFormat.setOutputPath(job, new Path(opts.workDir + "/files"));
-
-      FileSystem fs = FileSystem.get(conf);
-      out = new PrintStream(new BufferedOutputStream(fs.create(new Path(opts.workDir + "/splits.txt"))));
-
-      Collection<Text> splits = connector.tableOperations().listSplits(opts.getTableName(), 100);
-      for (Text split : splits)
-        out.println(Base64.getEncoder().encodeToString(TextUtil.getBytes(split)));
-
-      job.setNumReduceTasks(splits.size() + 1);
-      out.close();
-
-      job.setPartitionerClass(RangePartitioner.class);
-      RangePartitioner.setSplitFile(job, opts.workDir + "/splits.txt");
-
-      job.waitForCompletion(true);
-      Path failures = new Path(opts.workDir, "failures");
-      fs.delete(failures, true);
-      fs.mkdirs(new Path(opts.workDir, "failures"));
-      // With HDFS permissions on, we need to make sure the Accumulo user can read/move the rfiles
-      FsShell fsShell = new FsShell(conf);
-      fsShell.run(new String[] {"-chmod", "-R", "777", opts.workDir});
-      connector.tableOperations().importDirectory(opts.getTableName(), opts.workDir + "/files", opts.workDir + "/failures", false);
-
-    } catch (Exception e) {
-      throw new RuntimeException(e);
-    } finally {
-      if (out != null)
-        out.close();
-    }
-
-    return 0;
-  }
-
-  public static void main(String[] args) throws Exception {
-    int res = ToolRunner.run(new Configuration(), new BulkIngestExample(), args);
-    System.exit(res);
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/GenerateTestData.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/GenerateTestData.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/GenerateTestData.java
deleted file mode 100644
index 5cb4a0b..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/GenerateTestData.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.mapreduce.bulk;
-
-import java.io.BufferedOutputStream;
-import java.io.IOException;
-import java.io.PrintStream;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-
-import com.beust.jcommander.Parameter;
-
-public class GenerateTestData {
-
-  static class Opts extends org.apache.accumulo.core.cli.Help {
-    @Parameter(names = "--start-row", required = true)
-    int startRow = 0;
-    @Parameter(names = "--count", required = true)
-    int numRows = 0;
-    @Parameter(names = "--output", required = true)
-    String outputFile;
-  }
-
-  public static void main(String[] args) throws IOException {
-    Opts opts = new Opts();
-    opts.parseArgs(GenerateTestData.class.getName(), args);
-
-    FileSystem fs = FileSystem.get(new Configuration());
-    PrintStream out = new PrintStream(new BufferedOutputStream(fs.create(new Path(opts.outputFile))));
-
-    for (int i = 0; i < opts.numRows; i++) {
-      out.println(String.format("row_%010d\tvalue_%010d", i + opts.startRow, i + opts.startRow));
-    }
-    out.close();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/SetupTable.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/SetupTable.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/SetupTable.java
deleted file mode 100644
index 0fc3110..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/SetupTable.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.mapreduce.bulk;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.cli.ClientOnRequiredTable;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.hadoop.io.Text;
-
-import com.beust.jcommander.Parameter;
-
-public class SetupTable {
-
-  static class Opts extends ClientOnRequiredTable {
-    @Parameter(description = "<split> { <split> ... } ")
-    List<String> splits = new ArrayList<>();
-  }
-
-  public static void main(String[] args) throws Exception {
-    Opts opts = new Opts();
-    opts.parseArgs(SetupTable.class.getName(), args);
-    Connector conn = opts.getConnector();
-    conn.tableOperations().create(opts.getTableName());
-    if (!opts.splits.isEmpty()) {
-      // create a table with initial partitions
-      TreeSet<Text> intialPartitions = new TreeSet<>();
-      for (String split : opts.splits) {
-        intialPartitions.add(new Text(split));
-      }
-      conn.tableOperations().addSplits(opts.getTableName(), intialPartitions);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/VerifyIngest.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/VerifyIngest.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/VerifyIngest.java
deleted file mode 100644
index 16530cc..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/bulk/VerifyIngest.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.mapreduce.bulk;
-
-import java.util.Iterator;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.cli.ClientOnRequiredTable;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.hadoop.io.Text;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.beust.jcommander.Parameter;
-
-public class VerifyIngest {
-  private static final Logger log = LoggerFactory.getLogger(VerifyIngest.class);
-
-  static class Opts extends ClientOnRequiredTable {
-    @Parameter(names = "--start-row")
-    int startRow = 0;
-    @Parameter(names = "--count", required = true, description = "number of rows to verify")
-    int numRows = 0;
-  }
-
-  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    Opts opts = new Opts();
-    opts.parseArgs(VerifyIngest.class.getName(), args);
-
-    Connector connector = opts.getConnector();
-    Scanner scanner = connector.createScanner(opts.getTableName(), opts.auths);
-
-    scanner.setRange(new Range(new Text(String.format("row_%010d", opts.startRow)), null));
-
-    Iterator<Entry<Key,Value>> si = scanner.iterator();
-
-    boolean ok = true;
-
-    for (int i = opts.startRow; i < opts.numRows; i++) {
-
-      if (si.hasNext()) {
-        Entry<Key,Value> entry = si.next();
-
-        if (!entry.getKey().getRow().toString().equals(String.format("row_%010d", i))) {
-          log.error("unexpected row key " + entry.getKey().getRow().toString() + " expected " + String.format("row_%010d", i));
-          ok = false;
-        }
-
-        if (!entry.getValue().toString().equals(String.format("value_%010d", i))) {
-          log.error("unexpected value " + entry.getValue().toString() + " expected " + String.format("value_%010d", i));
-          ok = false;
-        }
-
-      } else {
-        log.error("no more rows, expected " + String.format("row_%010d", i));
-        ok = false;
-        break;
-      }
-
-    }
-
-    if (ok) {
-      System.out.println("OK");
-      System.exit(0);
-    } else {
-      System.exit(1);
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/reservations/ARS.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/reservations/ARS.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/reservations/ARS.java
deleted file mode 100644
index eff8e21..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/reservations/ARS.java
+++ /dev/null
@@ -1,303 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.reservations;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ConditionalWriter;
-import org.apache.accumulo.core.client.ConditionalWriter.Status;
-import org.apache.accumulo.core.client.ConditionalWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IsolatedScanner;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.data.Condition;
-import org.apache.accumulo.core.data.ConditionalMutation;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.hadoop.io.Text;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import jline.console.ConsoleReader;
-
-/**
- * Accumulo Reservation System : An example reservation system using Accumulo. Supports atomic reservations of a resource at a date. Wait list are also
- * supported. In order to keep the example simple, no checking is done of the date. Also the code is inefficient, if interested in improving it take a look at
- * the EXCERCISE comments.
- */
-
-// EXCERCISE create a test that verifies correctness under concurrency. For example, have M threads making reservations against N resources. Each thread could
-// randomly reserve and cancel resources for a single user. When each thread finishes, it knows what the state of its single user should be. When all threads
-// finish, collect their expected state and verify the status of all users and resources. For extra credit run the test on a IAAS provider using 10 nodes and
-// 10 threads per node.
-
-public class ARS {
-
-  private static final Logger log = LoggerFactory.getLogger(ARS.class);
-
-  private Connector conn;
-  private String rTable;
-
-  public enum ReservationResult {
-    RESERVED, WAIT_LISTED
-  }
-
-  public ARS(Connector conn, String rTable) {
-    this.conn = conn;
-    this.rTable = rTable;
-  }
-
-  public List<String> setCapacity(String what, String when, int count) {
-    // EXCERCISE implement this method which atomically sets a capacity and returns anyone who was moved to the wait list if the capacity was decreased
-
-    throw new UnsupportedOperationException();
-  }
-
-  public ReservationResult reserve(String what, String when, String who) throws Exception {
-
-    String row = what + ":" + when;
-
-    // EXCERCISE This code assumes there is no reservation and tries to create one. If a reservation exist then the update will fail. This is a good strategy
-    // when it is expected there are usually no reservations. Could modify the code to scan first.
-
-    // The following mutation requires that the column tx:seq does not exist and will fail if it does.
-    ConditionalMutation update = new ConditionalMutation(row, new Condition("tx", "seq"));
-    update.put("tx", "seq", "0");
-    update.put("res", String.format("%04d", 0), who);
-
-    ReservationResult result = ReservationResult.RESERVED;
-
-    // it is important to use an isolated scanner so that only whole mutations are seen
-    try (ConditionalWriter cwriter = conn.createConditionalWriter(rTable, new ConditionalWriterConfig());
-        Scanner scanner = new IsolatedScanner(conn.createScanner(rTable, Authorizations.EMPTY))) {
-      while (true) {
-        Status status = cwriter.write(update).getStatus();
-        switch (status) {
-          case ACCEPTED:
-            return result;
-          case REJECTED:
-          case UNKNOWN:
-            // read the row and decide what to do
-            break;
-          default:
-            throw new RuntimeException("Unexpected status " + status);
-        }
-
-        // EXCERCISE in the case of many threads trying to reserve a slot, this approach of immediately retrying is inefficient. Exponential back-off is good
-        // general solution to solve contention problems like this. However in this particular case, exponential back-off could penalize the earliest threads
-        // that attempted to make a reservation by putting them later in the list. A more complex solution could involve having independent sub-queues within
-        // the row that approximately maintain arrival order and use exponential back off to fairly merge the sub-queues into the main queue.
-
-        scanner.setRange(new Range(row));
-
-        int seq = -1;
-        int maxReservation = -1;
-
-        for (Entry<Key,Value> entry : scanner) {
-          String cf = entry.getKey().getColumnFamilyData().toString();
-          String cq = entry.getKey().getColumnQualifierData().toString();
-          String val = entry.getValue().toString();
-
-          if (cf.equals("tx") && cq.equals("seq")) {
-            seq = Integer.parseInt(val);
-          } else if (cf.equals("res")) {
-            // EXCERCISE scanning the entire list to find if reserver is already in the list is inefficient. One possible way to solve this would be to sort the
-            // data differently in Accumulo so that finding the reserver could be done quickly.
-            if (val.equals(who))
-              if (maxReservation == -1)
-                return ReservationResult.RESERVED; // already have the first reservation
-              else
-                return ReservationResult.WAIT_LISTED; // already on wait list
-
-            // EXCERCISE the way this code finds the max reservation is very inefficient.... it would be better if it did not have to scan the entire row.
-            // One possibility is to just use the sequence number. Could also consider sorting the data in another way and/or using an iterator.
-            maxReservation = Integer.parseInt(cq);
-          }
-        }
-
-        Condition condition = new Condition("tx", "seq");
-        if (seq >= 0)
-          condition.setValue(seq + ""); // only expect a seq # if one was seen
-
-        update = new ConditionalMutation(row, condition);
-        update.put("tx", "seq", (seq + 1) + "");
-        update.put("res", String.format("%04d", maxReservation + 1), who);
-
-        // EXCERCISE if set capacity is implemented, then result should take capacity into account
-        if (maxReservation == -1)
-          result = ReservationResult.RESERVED; // if successful, will be first reservation
-        else
-          result = ReservationResult.WAIT_LISTED;
-      }
-    }
-  }
-
-  public void cancel(String what, String when, String who) throws Exception {
-
-    String row = what + ":" + when;
-
-    // Even though this method is only deleting a column, its important to use a conditional writer. By updating the seq # when deleting a reservation, it
-    // will cause any concurrent reservations to retry. If this delete were done using a batch writer, then a concurrent reservation could report WAIT_LISTED
-    // when it actually got the reservation.
-
-    // its important to use an isolated scanner so that only whole mutations are seen
-    try (ConditionalWriter cwriter = conn.createConditionalWriter(rTable, new ConditionalWriterConfig());
-        Scanner scanner = new IsolatedScanner(conn.createScanner(rTable, Authorizations.EMPTY))) {
-      while (true) {
-        scanner.setRange(new Range(row));
-
-        int seq = -1;
-        String reservation = null;
-
-        for (Entry<Key,Value> entry : scanner) {
-          String cf = entry.getKey().getColumnFamilyData().toString();
-          String cq = entry.getKey().getColumnQualifierData().toString();
-          String val = entry.getValue().toString();
-
-          // EXCERCISE avoid linear scan
-
-          if (cf.equals("tx") && cq.equals("seq")) {
-            seq = Integer.parseInt(val);
-          } else if (cf.equals("res") && val.equals(who)) {
-            reservation = cq;
-          }
-        }
-
-        if (reservation != null) {
-          ConditionalMutation update = new ConditionalMutation(row, new Condition("tx", "seq").setValue(seq + ""));
-          update.putDelete("res", reservation);
-          update.put("tx", "seq", (seq + 1) + "");
-
-          Status status = cwriter.write(update).getStatus();
-          switch (status) {
-            case ACCEPTED:
-              // successfully canceled reservation
-              return;
-            case REJECTED:
-            case UNKNOWN:
-              // retry
-              // EXCERCISE exponential back-off could be used here
-              break;
-            default:
-              throw new RuntimeException("Unexpected status " + status);
-          }
-
-        } else {
-          // not reserved, nothing to do
-          break;
-        }
-
-      }
-    }
-  }
-
-  public List<String> list(String what, String when) throws Exception {
-    String row = what + ":" + when;
-
-    // its important to use an isolated scanner so that only whole mutations are seen
-    try (Scanner scanner = new IsolatedScanner(conn.createScanner(rTable, Authorizations.EMPTY))) {
-      scanner.setRange(new Range(row));
-      scanner.fetchColumnFamily(new Text("res"));
-
-      List<String> reservations = new ArrayList<>();
-
-      for (Entry<Key,Value> entry : scanner) {
-        String val = entry.getValue().toString();
-        reservations.add(val);
-      }
-
-      return reservations;
-    }
-  }
-
-  public static void main(String[] args) throws Exception {
-    final ConsoleReader reader = new ConsoleReader();
-    ARS ars = null;
-
-    while (true) {
-      String line = reader.readLine(">");
-      if (line == null)
-        break;
-
-      final String[] tokens = line.split("\\s+");
-
-      if (tokens[0].equals("reserve") && tokens.length >= 4 && ars != null) {
-        // start up multiple threads all trying to reserve the same resource, no more than one should succeed
-
-        final ARS fars = ars;
-        ArrayList<Thread> threads = new ArrayList<>();
-        for (int i = 3; i < tokens.length; i++) {
-          final int whoIndex = i;
-          Runnable reservationTask = new Runnable() {
-            @Override
-            public void run() {
-              try {
-                reader.println("  " + String.format("%20s", tokens[whoIndex]) + " : " + fars.reserve(tokens[1], tokens[2], tokens[whoIndex]));
-              } catch (Exception e) {
-                log.warn("Could not write to the ConsoleReader.", e);
-              }
-            }
-          };
-
-          threads.add(new Thread(reservationTask));
-        }
-
-        for (Thread thread : threads)
-          thread.start();
-
-        for (Thread thread : threads)
-          thread.join();
-
-      } else if (tokens[0].equals("cancel") && tokens.length == 4 && ars != null) {
-        ars.cancel(tokens[1], tokens[2], tokens[3]);
-      } else if (tokens[0].equals("list") && tokens.length == 3 && ars != null) {
-        List<String> reservations = ars.list(tokens[1], tokens[2]);
-        if (reservations.size() > 0) {
-          reader.println("  Reservation holder : " + reservations.get(0));
-          if (reservations.size() > 1)
-            reader.println("  Wait list : " + reservations.subList(1, reservations.size()));
-        }
-      } else if (tokens[0].equals("quit") && tokens.length == 1) {
-        break;
-      } else if (tokens[0].equals("connect") && tokens.length == 6 && ars == null) {
-        ZooKeeperInstance zki = new ZooKeeperInstance(new ClientConfiguration().withInstance(tokens[1]).withZkHosts(tokens[2]));
-        Connector conn = zki.getConnector(tokens[3], new PasswordToken(tokens[4]));
-        if (conn.tableOperations().exists(tokens[5])) {
-          ars = new ARS(conn, tokens[5]);
-          reader.println("  connected");
-        } else
-          reader.println("  No Such Table");
-      } else {
-        System.out.println("  Commands : ");
-        if (ars == null) {
-          reader.println("    connect <instance> <zookeepers> <user> <pass> <table>");
-        } else {
-          reader.println("    reserve <what> <when> <who> {who}");
-          reader.println("    cancel <what> <when> <who>");
-          reader.println("    list <what> <when>");
-        }
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/sample/SampleExample.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/sample/SampleExample.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/sample/SampleExample.java
deleted file mode 100644
index 262e63d..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/sample/SampleExample.java
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.accumulo.examples.simple.sample;
-
-import java.util.Collections;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ClientOnDefaultTable;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.SampleNotPresentException;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.admin.CompactionConfig;
-import org.apache.accumulo.core.client.admin.CompactionStrategyConfig;
-import org.apache.accumulo.core.client.sample.RowSampler;
-import org.apache.accumulo.core.client.sample.SamplerConfiguration;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.examples.simple.client.RandomBatchWriter;
-import org.apache.accumulo.examples.simple.shard.CutoffIntersectingIterator;
-
-import com.google.common.collect.ImmutableMap;
-
-/**
- * A simple example of using Accumulo's sampling feature. This example does something similar to what README.sample shows using the shell. Also see
- * {@link CutoffIntersectingIterator} and README.sample for an example of how to use sample data from within an iterator.
- */
-public class SampleExample {
-
-  // a compaction strategy that only selects files for compaction that have no sample data or sample data created in a different way than the tables
-  static final CompactionStrategyConfig NO_SAMPLE_STRATEGY = new CompactionStrategyConfig(
-      "org.apache.accumulo.tserver.compaction.strategies.ConfigurableCompactionStrategy").setOptions(Collections.singletonMap("SF_NO_SAMPLE", ""));
-
-  static class Opts extends ClientOnDefaultTable {
-    public Opts() {
-      super("sampex");
-    }
-  }
-
-  public static void main(String[] args) throws Exception {
-    Opts opts = new Opts();
-    BatchWriterOpts bwOpts = new BatchWriterOpts();
-    opts.parseArgs(RandomBatchWriter.class.getName(), args, bwOpts);
-
-    Connector conn = opts.getConnector();
-
-    if (!conn.tableOperations().exists(opts.getTableName())) {
-      conn.tableOperations().create(opts.getTableName());
-    } else {
-      System.out.println("Table exists, not doing anything.");
-      return;
-    }
-
-    // write some data
-    BatchWriter bw = conn.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
-    bw.addMutation(createMutation("9225", "abcde", "file://foo.txt"));
-    bw.addMutation(createMutation("8934", "accumulo scales", "file://accumulo_notes.txt"));
-    bw.addMutation(createMutation("2317", "milk, eggs, bread, parmigiano-reggiano", "file://groceries/9/txt"));
-    bw.addMutation(createMutation("3900", "EC2 ate my homework", "file://final_project.txt"));
-    bw.flush();
-
-    SamplerConfiguration sc1 = new SamplerConfiguration(RowSampler.class.getName());
-    sc1.setOptions(ImmutableMap.of("hasher", "murmur3_32", "modulus", "3"));
-
-    conn.tableOperations().setSamplerConfiguration(opts.getTableName(), sc1);
-
-    Scanner scanner = conn.createScanner(opts.getTableName(), Authorizations.EMPTY);
-    System.out.println("Scanning all data :");
-    print(scanner);
-    System.out.println();
-
-    System.out.println("Scanning with sampler configuration.  Data was written before sampler was set on table, scan should fail.");
-    scanner.setSamplerConfiguration(sc1);
-    try {
-      print(scanner);
-    } catch (SampleNotPresentException e) {
-      System.out.println("  Saw sample not present exception as expected.");
-    }
-    System.out.println();
-
-    // compact table to recreate sample data
-    conn.tableOperations().compact(opts.getTableName(), new CompactionConfig().setCompactionStrategy(NO_SAMPLE_STRATEGY));
-
-    System.out.println("Scanning after compaction (compaction should have created sample data) : ");
-    print(scanner);
-    System.out.println();
-
-    // update a document in the sample data
-    bw.addMutation(createMutation("2317", "milk, eggs, bread, parmigiano-reggiano, butter", "file://groceries/9/txt"));
-    bw.close();
-    System.out.println("Scanning sample after updating content for docId 2317 (should see content change in sample data) : ");
-    print(scanner);
-    System.out.println();
-
-    // change tables sampling configuration...
-    SamplerConfiguration sc2 = new SamplerConfiguration(RowSampler.class.getName());
-    sc2.setOptions(ImmutableMap.of("hasher", "murmur3_32", "modulus", "2"));
-    conn.tableOperations().setSamplerConfiguration(opts.getTableName(), sc2);
-    // compact table to recreate sample data using new configuration
-    conn.tableOperations().compact(opts.getTableName(), new CompactionConfig().setCompactionStrategy(NO_SAMPLE_STRATEGY));
-
-    System.out.println("Scanning with old sampler configuration.  Sample data was created using new configuration with a compaction.  Scan should fail.");
-    try {
-      // try scanning with old sampler configuration
-      print(scanner);
-    } catch (SampleNotPresentException e) {
-      System.out.println("  Saw sample not present exception as expected ");
-    }
-    System.out.println();
-
-    // update expected sampler configuration on scanner
-    scanner.setSamplerConfiguration(sc2);
-
-    System.out.println("Scanning with new sampler configuration : ");
-    print(scanner);
-    System.out.println();
-
-  }
-
-  private static void print(Scanner scanner) {
-    for (Entry<Key,Value> entry : scanner) {
-      System.out.println("  " + entry.getKey() + " " + entry.getValue());
-    }
-  }
-
-  private static Mutation createMutation(String docId, String content, String url) {
-    Mutation m = new Mutation(docId);
-    m.put("doc", "context", content);
-    m.put("doc", "url", url);
-    return m;
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/ContinuousQuery.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/ContinuousQuery.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/ContinuousQuery.java
deleted file mode 100644
index 604c851..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/ContinuousQuery.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.shard;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Map.Entry;
-import java.util.Random;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.accumulo.core.cli.BatchScannerOpts;
-import org.apache.accumulo.core.cli.ClientOpts;
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.user.IntersectingIterator;
-import org.apache.hadoop.io.Text;
-
-import com.beust.jcommander.Parameter;
-import com.google.common.collect.Iterators;
-
-/**
- * Using the doc2word table created by Reverse.java, this program randomly selects N words per document. Then it continually queries a random set of words in
- * the shard table (created by {@link Index}) using the {@link IntersectingIterator}.
- *
- * See docs/examples/README.shard for instructions.
- */
-
-public class ContinuousQuery {
-
-  static class Opts extends ClientOpts {
-    @Parameter(names = "--shardTable", required = true, description = "name of the shard table")
-    String tableName = null;
-    @Parameter(names = "--doc2Term", required = true, description = "name of the doc2Term table")
-    String doc2Term;
-    @Parameter(names = "--terms", required = true, description = "the number of terms in the query")
-    int numTerms;
-    @Parameter(names = "--count", description = "the number of queries to run")
-    long iterations = Long.MAX_VALUE;
-  }
-
-  public static void main(String[] args) throws Exception {
-    Opts opts = new Opts();
-    BatchScannerOpts bsOpts = new BatchScannerOpts();
-    opts.parseArgs(ContinuousQuery.class.getName(), args, bsOpts);
-
-    Connector conn = opts.getConnector();
-
-    ArrayList<Text[]> randTerms = findRandomTerms(conn.createScanner(opts.doc2Term, opts.auths), opts.numTerms);
-
-    Random rand = new Random();
-
-    BatchScanner bs = conn.createBatchScanner(opts.tableName, opts.auths, bsOpts.scanThreads);
-    bs.setTimeout(bsOpts.scanTimeout, TimeUnit.MILLISECONDS);
-
-    for (long i = 0; i < opts.iterations; i += 1) {
-      Text[] columns = randTerms.get(rand.nextInt(randTerms.size()));
-
-      bs.clearScanIterators();
-      bs.clearColumns();
-
-      IteratorSetting ii = new IteratorSetting(20, "ii", IntersectingIterator.class);
-      IntersectingIterator.setColumnFamilies(ii, columns);
-      bs.addScanIterator(ii);
-      bs.setRanges(Collections.singleton(new Range()));
-
-      long t1 = System.currentTimeMillis();
-      int count = Iterators.size(bs.iterator());
-      long t2 = System.currentTimeMillis();
-
-      System.out.printf("  %s %,d %6.3f%n", Arrays.asList(columns), count, (t2 - t1) / 1000.0);
-    }
-
-    bs.close();
-
-  }
-
-  private static ArrayList<Text[]> findRandomTerms(Scanner scanner, int numTerms) {
-
-    Text currentRow = null;
-
-    ArrayList<Text> words = new ArrayList<>();
-    ArrayList<Text[]> ret = new ArrayList<>();
-
-    Random rand = new Random();
-
-    for (Entry<Key,Value> entry : scanner) {
-      Key key = entry.getKey();
-
-      if (currentRow == null)
-        currentRow = key.getRow();
-
-      if (!currentRow.equals(key.getRow())) {
-        selectRandomWords(words, ret, rand, numTerms);
-        words.clear();
-        currentRow = key.getRow();
-      }
-
-      words.add(key.getColumnFamily());
-
-    }
-
-    selectRandomWords(words, ret, rand, numTerms);
-
-    return ret;
-  }
-
-  private static void selectRandomWords(ArrayList<Text> words, ArrayList<Text[]> ret, Random rand, int numTerms) {
-    if (words.size() >= numTerms) {
-      Collections.shuffle(words, rand);
-      Text docWords[] = new Text[numTerms];
-      for (int i = 0; i < docWords.length; i++) {
-        docWords[i] = words.get(i);
-      }
-
-      ret.add(docWords);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/CutoffIntersectingIterator.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/CutoffIntersectingIterator.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/CutoffIntersectingIterator.java
deleted file mode 100644
index f5dce1d..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/shard/CutoffIntersectingIterator.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.accumulo.examples.simple.shard;
-
-import static com.google.common.base.Preconditions.checkArgument;
-import static java.util.Objects.requireNonNull;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.Map;
-
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.sample.RowColumnSampler;
-import org.apache.accumulo.core.client.sample.SamplerConfiguration;
-import org.apache.accumulo.core.data.ByteSequence;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.IteratorEnvironment;
-import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
-import org.apache.accumulo.core.iterators.user.IntersectingIterator;
-
-/**
- * This iterator uses a sample built from the Column Qualifier to quickly avoid intersecting iterator queries that may return too many documents.
- */
-
-public class CutoffIntersectingIterator extends IntersectingIterator {
-
-  private IntersectingIterator sampleII;
-  private int sampleMax;
-  private boolean hasTop;
-
-  public static void setCutoff(IteratorSetting iterCfg, int cutoff) {
-    checkArgument(cutoff >= 0);
-    iterCfg.addOption("cutoff", cutoff + "");
-  }
-
-  @Override
-  public boolean hasTop() {
-    return hasTop && super.hasTop();
-  }
-
-  @Override
-  public void seek(Range range, Collection<ByteSequence> seekColumnFamilies, boolean inclusive) throws IOException {
-
-    sampleII.seek(range, seekColumnFamilies, inclusive);
-
-    // this check will be redone whenever iterator stack is torn down and recreated.
-    int count = 0;
-    while (count <= sampleMax && sampleII.hasTop()) {
-      sampleII.next();
-      count++;
-    }
-
-    if (count > sampleMax) {
-      // In a real application would probably want to return a key value that indicates too much data. Since this would execute for each tablet, some tablets
-      // may return data. For tablets that did not return data, would want an indication.
-      hasTop = false;
-    } else {
-      hasTop = true;
-      super.seek(range, seekColumnFamilies, inclusive);
-    }
-  }
-
-  @Override
-  public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
-    super.init(source, options, env);
-
-    IteratorEnvironment sampleEnv = env.cloneWithSamplingEnabled();
-
-    setMax(sampleEnv, options);
-
-    SortedKeyValueIterator<Key,Value> sampleDC = source.deepCopy(sampleEnv);
-    sampleII = new IntersectingIterator();
-    sampleII.init(sampleDC, options, env);
-
-  }
-
-  static void validateSamplerConfig(SamplerConfiguration sampleConfig) {
-    requireNonNull(sampleConfig);
-    checkArgument(sampleConfig.getSamplerClassName().equals(RowColumnSampler.class.getName()), "Unexpected Sampler " + sampleConfig.getSamplerClassName());
-    checkArgument(sampleConfig.getOptions().get("qualifier").equals("true"), "Expected sample on column qualifier");
-    checkArgument(isNullOrFalse(sampleConfig.getOptions(), "row", "family", "visibility"), "Expected sample on column qualifier only");
-  }
-
-  private void setMax(IteratorEnvironment sampleEnv, Map<String,String> options) {
-    String cutoffValue = options.get("cutoff");
-    SamplerConfiguration sampleConfig = sampleEnv.getSamplerConfiguration();
-
-    // Ensure the sample was constructed in an expected way. If the sample is not built as expected, then can not draw conclusions based on sample.
-    requireNonNull(cutoffValue, "Expected cutoff option is missing");
-    validateSamplerConfig(sampleConfig);
-
-    int modulus = Integer.parseInt(sampleConfig.getOptions().get("modulus"));
-
-    sampleMax = Math.round(Float.parseFloat(cutoffValue) / modulus);
-  }
-
-  private static boolean isNullOrFalse(Map<String,String> options, String... keys) {
-    for (String key : keys) {
-      String val = options.get(key);
-      if (val != null && val.equals("true")) {
-        return false;
-      }
-    }
-    return true;
-  }
-}


[4/7] accumulo git commit: ACCUMULO-4511 Removed Accumulo Examples

Posted by mw...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/QueryUtil.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/QueryUtil.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/QueryUtil.java
deleted file mode 100644
index 2c76264..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/QueryUtil.java
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.dirlist;
-
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.TreeMap;
-
-import org.apache.accumulo.core.cli.ClientOnRequiredTable;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.user.RegExFilter;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.hadoop.io.Text;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * Provides utility methods for getting the info for a file, listing the contents of a directory, and performing single wild card searches on file or directory
- * names. See docs/examples/README.dirlist for instructions.
- */
-public class QueryUtil {
-  private Connector conn = null;
-  private String tableName;
-  private Authorizations auths;
-  public static final Text DIR_COLF = new Text("dir");
-  public static final Text FORWARD_PREFIX = new Text("f");
-  public static final Text REVERSE_PREFIX = new Text("r");
-  public static final Text INDEX_COLF = new Text("i");
-  public static final Text COUNTS_COLQ = new Text("counts");
-
-  public QueryUtil(Opts opts) throws AccumuloException, AccumuloSecurityException {
-    conn = opts.getConnector();
-    this.tableName = opts.getTableName();
-    this.auths = opts.auths;
-  }
-
-  /**
-   * Calculates the depth of a path, i.e. the number of forward slashes in the path name.
-   *
-   * @param path
-   *          the full path of a file or directory
-   * @return the depth of the path
-   */
-  public static int getDepth(String path) {
-    int numSlashes = 0;
-    int index = -1;
-    while ((index = path.indexOf("/", index + 1)) >= 0)
-      numSlashes++;
-    return numSlashes;
-  }
-
-  /**
-   * Given a path, construct an accumulo row prepended with the path's depth for the directory table.
-   *
-   * @param path
-   *          the full path of a file or directory
-   * @return the accumulo row associated with this path
-   */
-  public static Text getRow(String path) {
-    Text row = new Text(String.format("%03d", getDepth(path)));
-    row.append(path.getBytes(), 0, path.length());
-    return row;
-  }
-
-  /**
-   * Given a path, construct an accumulo row prepended with the {@link #FORWARD_PREFIX} for the index table.
-   *
-   * @param path
-   *          the full path of a file or directory
-   * @return the accumulo row associated with this path
-   */
-  public static Text getForwardIndex(String path) {
-    String part = path.substring(path.lastIndexOf("/") + 1);
-    if (part.length() == 0)
-      return null;
-    Text row = new Text(FORWARD_PREFIX);
-    row.append(part.getBytes(), 0, part.length());
-    return row;
-  }
-
-  /**
-   * Given a path, construct an accumulo row prepended with the {@link #REVERSE_PREFIX} with the path reversed for the index table.
-   *
-   * @param path
-   *          the full path of a file or directory
-   * @return the accumulo row associated with this path
-   */
-  public static Text getReverseIndex(String path) {
-    String part = path.substring(path.lastIndexOf("/") + 1);
-    if (part.length() == 0)
-      return null;
-    byte[] rev = new byte[part.length()];
-    int i = part.length() - 1;
-    for (byte b : part.getBytes())
-      rev[i--] = b;
-    Text row = new Text(REVERSE_PREFIX);
-    row.append(rev, 0, rev.length);
-    return row;
-  }
-
-  /**
-   * Returns either the {@link #DIR_COLF} or a decoded string version of the colf.
-   *
-   * @param colf
-   *          the column family
-   */
-  public static String getType(Text colf) {
-    if (colf.equals(DIR_COLF))
-      return colf.toString() + ":";
-    return Long.toString(Ingest.encoder.decode(colf.getBytes())) + ":";
-  }
-
-  /**
-   * Scans over the directory table and pulls out stat information about a path.
-   *
-   * @param path
-   *          the full path of a file or directory
-   */
-  public Map<String,String> getData(String path) throws TableNotFoundException {
-    if (path.endsWith("/"))
-      path = path.substring(0, path.length() - 1);
-    Scanner scanner = conn.createScanner(tableName, auths);
-    scanner.setRange(new Range(getRow(path)));
-    Map<String,String> data = new TreeMap<>();
-    for (Entry<Key,Value> e : scanner) {
-      String type = getType(e.getKey().getColumnFamily());
-      data.put("fullname", e.getKey().getRow().toString().substring(3));
-      data.put(type + e.getKey().getColumnQualifier().toString() + ":" + e.getKey().getColumnVisibility().toString(), new String(e.getValue().get()));
-    }
-    return data;
-  }
-
-  /**
-   * Uses the directory table to list the contents of a directory.
-   *
-   * @param path
-   *          the full path of a directory
-   */
-  public Map<String,Map<String,String>> getDirList(String path) throws TableNotFoundException {
-    if (!path.endsWith("/"))
-      path = path + "/";
-    Map<String,Map<String,String>> fim = new TreeMap<>();
-    Scanner scanner = conn.createScanner(tableName, auths);
-    scanner.setRange(Range.prefix(getRow(path)));
-    for (Entry<Key,Value> e : scanner) {
-      String name = e.getKey().getRow().toString();
-      name = name.substring(name.lastIndexOf("/") + 1);
-      String type = getType(e.getKey().getColumnFamily());
-      if (!fim.containsKey(name)) {
-        fim.put(name, new TreeMap<String,String>());
-        fim.get(name).put("fullname", e.getKey().getRow().toString().substring(3));
-      }
-      fim.get(name).put(type + e.getKey().getColumnQualifier().toString() + ":" + e.getKey().getColumnVisibility().toString(), new String(e.getValue().get()));
-    }
-    return fim;
-  }
-
-  /**
-   * Scans over the index table for files or directories with a given name.
-   *
-   * @param term
-   *          the name a file or directory to search for
-   */
-  public Iterable<Entry<Key,Value>> exactTermSearch(String term) throws Exception {
-    System.out.println("executing exactTermSearch for " + term);
-    Scanner scanner = conn.createScanner(tableName, auths);
-    scanner.setRange(new Range(getForwardIndex(term)));
-    return scanner;
-  }
-
-  /**
-   * Scans over the index table for files or directories with a given name, prefix, or suffix (indicated by a wildcard '*' at the beginning or end of the term.
-   *
-   * @param exp
-   *          the name a file or directory to search for with an optional wildcard '*' at the beginning or end
-   */
-  public Iterable<Entry<Key,Value>> singleRestrictedWildCardSearch(String exp) throws Exception {
-    if (exp.indexOf("/") >= 0)
-      throw new Exception("this method only works with unqualified names");
-
-    Scanner scanner = conn.createScanner(tableName, auths);
-    if (exp.startsWith("*")) {
-      System.out.println("executing beginning wildcard search for " + exp);
-      exp = exp.substring(1);
-      scanner.setRange(Range.prefix(getReverseIndex(exp)));
-    } else if (exp.endsWith("*")) {
-      System.out.println("executing ending wildcard search for " + exp);
-      exp = exp.substring(0, exp.length() - 1);
-      scanner.setRange(Range.prefix(getForwardIndex(exp)));
-    } else if (exp.indexOf("*") >= 0) {
-      throw new Exception("this method only works for beginning or ending wild cards");
-    } else {
-      return exactTermSearch(exp);
-    }
-    return scanner;
-  }
-
-  /**
-   * Scans over the index table for files or directories with a given name that can contain a single wildcard '*' anywhere in the term.
-   *
-   * @param exp
-   *          the name a file or directory to search for with one optional wildcard '*'
-   */
-  public Iterable<Entry<Key,Value>> singleWildCardSearch(String exp) throws Exception {
-    int starIndex = exp.indexOf("*");
-    if (exp.indexOf("*", starIndex + 1) >= 0)
-      throw new Exception("only one wild card for search");
-
-    if (starIndex < 0) {
-      return exactTermSearch(exp);
-    } else if (starIndex == 0 || starIndex == exp.length() - 1) {
-      return singleRestrictedWildCardSearch(exp);
-    }
-
-    String firstPart = exp.substring(0, starIndex);
-    String lastPart = exp.substring(starIndex + 1);
-    String regexString = ".*/" + exp.replace("*", "[^/]*");
-
-    Scanner scanner = conn.createScanner(tableName, auths);
-    if (firstPart.length() >= lastPart.length()) {
-      System.out.println("executing middle wildcard search for " + regexString + " from entries starting with " + firstPart);
-      scanner.setRange(Range.prefix(getForwardIndex(firstPart)));
-    } else {
-      System.out.println("executing middle wildcard search for " + regexString + " from entries ending with " + lastPart);
-      scanner.setRange(Range.prefix(getReverseIndex(lastPart)));
-    }
-    IteratorSetting regex = new IteratorSetting(50, "regex", RegExFilter.class);
-    RegExFilter.setRegexs(regex, null, null, regexString, null, false);
-    scanner.addScanIterator(regex);
-    return scanner;
-  }
-
-  public static class Opts extends ClientOnRequiredTable {
-    @Parameter(names = "--path", description = "the directory to list")
-    String path = "/";
-    @Parameter(names = "--search", description = "find a file or directory with the given name")
-    boolean search = false;
-  }
-
-  /**
-   * Lists the contents of a directory using the directory table, or searches for file or directory names (if the -search flag is included).
-   */
-  public static void main(String[] args) throws Exception {
-    Opts opts = new Opts();
-    opts.parseArgs(QueryUtil.class.getName(), args);
-    QueryUtil q = new QueryUtil(opts);
-    if (opts.search) {
-      for (Entry<Key,Value> e : q.singleWildCardSearch(opts.path)) {
-        System.out.println(e.getKey().getColumnQualifier());
-      }
-    } else {
-      for (Entry<String,Map<String,String>> e : q.getDirList(opts.path).entrySet()) {
-        System.out.println(e);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/Viewer.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/Viewer.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/Viewer.java
deleted file mode 100644
index 2648a48..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/Viewer.java
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.dirlist;
-
-import java.awt.BorderLayout;
-import java.io.IOException;
-import java.util.Enumeration;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import javax.swing.JFrame;
-import javax.swing.JScrollPane;
-import javax.swing.JSplitPane;
-import javax.swing.JTextArea;
-import javax.swing.JTree;
-import javax.swing.event.TreeExpansionEvent;
-import javax.swing.event.TreeExpansionListener;
-import javax.swing.event.TreeSelectionEvent;
-import javax.swing.event.TreeSelectionListener;
-import javax.swing.tree.DefaultMutableTreeNode;
-import javax.swing.tree.DefaultTreeModel;
-import javax.swing.tree.TreePath;
-
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.examples.simple.filedata.FileDataQuery;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * Provides a GUI for browsing the file system information stored in Accumulo. See docs/examples/README.dirlist for instructions.
- */
-@SuppressWarnings("serial")
-public class Viewer extends JFrame implements TreeSelectionListener, TreeExpansionListener {
-  private static final Logger log = LoggerFactory.getLogger(Viewer.class);
-
-  JTree tree;
-  DefaultTreeModel treeModel;
-  QueryUtil q;
-  FileDataQuery fdq;
-  String topPath;
-  Map<String,DefaultMutableTreeNode> nodeNameMap;
-  JTextArea text;
-  JTextArea data;
-  JScrollPane dataPane;
-
-  public static class NodeInfo {
-    private String name;
-    private Map<String,String> data;
-
-    public NodeInfo(String name, Map<String,String> data) {
-      this.name = name;
-      this.data = data;
-    }
-
-    public String getName() {
-      return name;
-    }
-
-    public String getFullName() {
-      String fn = data.get("fullname");
-      if (fn == null)
-        return name;
-      return fn;
-    }
-
-    public Map<String,String> getData() {
-      return data;
-    }
-
-    @Override
-    public String toString() {
-      return getName();
-    }
-
-    public String getHash() {
-      for (String k : data.keySet()) {
-        String[] parts = k.split(":");
-        if (parts.length >= 2 && parts[1].equals("md5")) {
-          return data.get(k);
-        }
-      }
-      return null;
-    }
-  }
-
-  public Viewer(Opts opts) throws Exception {
-    super("File Viewer");
-    setSize(1000, 800);
-    setDefaultCloseOperation(EXIT_ON_CLOSE);
-    q = new QueryUtil(opts);
-    fdq = new FileDataQuery(opts.instance, opts.zookeepers, opts.getPrincipal(), opts.getToken(), opts.dataTable, opts.auths);
-    this.topPath = opts.path;
-  }
-
-  public void populate(DefaultMutableTreeNode node) throws TableNotFoundException {
-    String path = ((NodeInfo) node.getUserObject()).getFullName();
-    log.debug("listing " + path);
-    for (Entry<String,Map<String,String>> e : q.getDirList(path).entrySet()) {
-      log.debug("got child for " + node.getUserObject() + ": " + e.getKey());
-      node.add(new DefaultMutableTreeNode(new NodeInfo(e.getKey(), e.getValue())));
-    }
-  }
-
-  public void populateChildren(DefaultMutableTreeNode node) throws TableNotFoundException {
-    @SuppressWarnings("unchecked")
-    Enumeration<DefaultMutableTreeNode> children = node.children();
-    while (children.hasMoreElements()) {
-      populate(children.nextElement());
-    }
-  }
-
-  public void init() throws TableNotFoundException {
-    DefaultMutableTreeNode root = new DefaultMutableTreeNode(new NodeInfo(topPath, q.getData(topPath)));
-    populate(root);
-    populateChildren(root);
-
-    treeModel = new DefaultTreeModel(root);
-    tree = new JTree(treeModel);
-    tree.addTreeExpansionListener(this);
-    tree.addTreeSelectionListener(this);
-    text = new JTextArea(getText(q.getData(topPath)));
-    data = new JTextArea("");
-    JScrollPane treePane = new JScrollPane(tree);
-    JScrollPane textPane = new JScrollPane(text);
-    dataPane = new JScrollPane(data);
-    JSplitPane infoSplitPane = new JSplitPane(JSplitPane.VERTICAL_SPLIT, textPane, dataPane);
-    JSplitPane mainSplitPane = new JSplitPane(JSplitPane.HORIZONTAL_SPLIT, treePane, infoSplitPane);
-    mainSplitPane.setDividerLocation(300);
-    infoSplitPane.setDividerLocation(150);
-    getContentPane().add(mainSplitPane, BorderLayout.CENTER);
-  }
-
-  public static String getText(DefaultMutableTreeNode node) {
-    return getText(((NodeInfo) node.getUserObject()).getData());
-  }
-
-  public static String getText(Map<String,String> data) {
-    StringBuilder sb = new StringBuilder();
-    for (String name : data.keySet()) {
-      sb.append(name);
-      sb.append(" : ");
-      sb.append(data.get(name));
-      sb.append('\n');
-    }
-    return sb.toString();
-  }
-
-  @Override
-  public void treeExpanded(TreeExpansionEvent event) {
-    try {
-      populateChildren((DefaultMutableTreeNode) event.getPath().getLastPathComponent());
-    } catch (TableNotFoundException e) {
-      log.error("Could not find table.", e);
-    }
-  }
-
-  @Override
-  public void treeCollapsed(TreeExpansionEvent event) {
-    DefaultMutableTreeNode node = (DefaultMutableTreeNode) event.getPath().getLastPathComponent();
-    @SuppressWarnings("unchecked")
-    Enumeration<DefaultMutableTreeNode> children = node.children();
-    while (children.hasMoreElements()) {
-      DefaultMutableTreeNode child = children.nextElement();
-      log.debug("removing children of " + ((NodeInfo) child.getUserObject()).getFullName());
-      child.removeAllChildren();
-    }
-  }
-
-  @Override
-  public void valueChanged(TreeSelectionEvent e) {
-    TreePath selected = e.getNewLeadSelectionPath();
-    if (selected == null)
-      return;
-    DefaultMutableTreeNode node = (DefaultMutableTreeNode) selected.getLastPathComponent();
-    text.setText(getText(node));
-    try {
-      String hash = ((NodeInfo) node.getUserObject()).getHash();
-      if (hash != null) {
-        data.setText(fdq.getSomeData(hash, 10000));
-      } else {
-        data.setText("");
-      }
-    } catch (IOException e1) {
-      log.error("Could not get data from FileDataQuery.", e1);
-    }
-  }
-
-  static class Opts extends QueryUtil.Opts {
-    @Parameter(names = "--dataTable")
-    String dataTable = "dataTable";
-  }
-
-  public static void main(String[] args) throws Exception {
-    Opts opts = new Opts();
-    opts.parseArgs(Viewer.class.getName(), args);
-
-    Viewer v = new Viewer(opts);
-    v.init();
-    v.setVisible(true);
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/CharacterHistogram.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/CharacterHistogram.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/CharacterHistogram.java
deleted file mode 100644
index 9e32090..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/CharacterHistogram.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.filedata;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.cli.MapReduceClientOnRequiredTable;
-import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.user.SummingArrayCombiner;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * A MapReduce that computes a histogram of byte frequency for each file and stores the histogram alongside the file data. The {@link ChunkInputFormat} is used
- * to read the file data from Accumulo. See docs/examples/README.filedata for instructions.
- */
-public class CharacterHistogram extends Configured implements Tool {
-  public static final String VIS = "vis";
-
-  public static void main(String[] args) throws Exception {
-    System.exit(ToolRunner.run(new Configuration(), new CharacterHistogram(), args));
-  }
-
-  public static class HistMapper extends Mapper<List<Entry<Key,Value>>,InputStream,Text,Mutation> {
-    private ColumnVisibility cv;
-
-    @Override
-    public void map(List<Entry<Key,Value>> k, InputStream v, Context context) throws IOException, InterruptedException {
-      Long[] hist = new Long[256];
-      for (int i = 0; i < hist.length; i++)
-        hist[i] = 0l;
-      int b = v.read();
-      while (b >= 0) {
-        hist[b] += 1l;
-        b = v.read();
-      }
-      v.close();
-      Mutation m = new Mutation(k.get(0).getKey().getRow());
-      m.put("info", "hist", cv, new Value(SummingArrayCombiner.STRING_ARRAY_ENCODER.encode(Arrays.asList(hist))));
-      context.write(new Text(), m);
-    }
-
-    @Override
-    protected void setup(Context context) throws IOException, InterruptedException {
-      cv = new ColumnVisibility(context.getConfiguration().get(VIS, ""));
-    }
-  }
-
-  static class Opts extends MapReduceClientOnRequiredTable {
-    @Parameter(names = "--vis")
-    String visibilities = "";
-  }
-
-  @Override
-  public int run(String[] args) throws Exception {
-    Job job = Job.getInstance(getConf());
-    job.setJobName(this.getClass().getSimpleName());
-    job.setJarByClass(this.getClass());
-
-    Opts opts = new Opts();
-    opts.parseArgs(CharacterHistogram.class.getName(), args);
-
-    job.setInputFormatClass(ChunkInputFormat.class);
-    opts.setAccumuloConfigs(job);
-    job.getConfiguration().set(VIS, opts.visibilities.toString());
-
-    job.setMapperClass(HistMapper.class);
-    job.setMapOutputKeyClass(Text.class);
-    job.setMapOutputValueClass(Mutation.class);
-
-    job.setNumReduceTasks(0);
-
-    job.setOutputFormatClass(AccumuloOutputFormat.class);
-
-    job.waitForCompletion(true);
-    return job.isSuccessful() ? 0 : 1;
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/ChunkCombiner.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/ChunkCombiner.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/ChunkCombiner.java
deleted file mode 100644
index 0ffeca0..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/ChunkCombiner.java
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.filedata;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Map;
-
-import org.apache.accumulo.core.data.ByteSequence;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.PartialKey;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.IteratorEnvironment;
-import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
-import org.apache.hadoop.io.Text;
-
-/**
- * This iterator dedupes chunks and sets their visibilities to the combined visibility of the refs columns. For example, it would combine
- *
- * <pre>
- *    row1 refs uid1\0a A&amp;B V0
- *    row1 refs uid2\0b C&amp;D V0
- *    row1 ~chunk 0 A&amp;B V1
- *    row1 ~chunk 0 C&amp;D V1
- *    row1 ~chunk 0 E&amp;F V1
- *    row1 ~chunk 0 G&amp;H V1
- * </pre>
- *
- * into the following
- *
- * <pre>
- *    row1 refs uid1\0a A&amp;B V0
- *    row1 refs uid2\0b C&amp;D V0
- *    row1 ~chunk 0 (A&amp;B)|(C&amp;D) V1
- * </pre>
- *
- * {@link VisibilityCombiner} is used to combie the visibilities.
- */
-
-public class ChunkCombiner implements SortedKeyValueIterator<Key,Value> {
-
-  private SortedKeyValueIterator<Key,Value> source;
-  private SortedKeyValueIterator<Key,Value> refsSource;
-  private static final Collection<ByteSequence> refsColf = Collections.singleton(FileDataIngest.REFS_CF_BS);
-  private Map<Text,byte[]> lastRowVC = Collections.emptyMap();
-
-  private Key topKey = null;
-  private Value topValue = null;
-
-  public ChunkCombiner() {}
-
-  @Override
-  public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
-    this.source = source;
-    this.refsSource = source.deepCopy(env);
-  }
-
-  @Override
-  public boolean hasTop() {
-    return topKey != null;
-  }
-
-  @Override
-  public void next() throws IOException {
-    findTop();
-  }
-
-  @Override
-  public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
-    source.seek(range, columnFamilies, inclusive);
-    findTop();
-  }
-
-  private void findTop() throws IOException {
-    do {
-      topKey = null;
-      topValue = null;
-    } while (source.hasTop() && _findTop() == null);
-  }
-
-  private byte[] _findTop() throws IOException {
-    long maxTS;
-
-    topKey = new Key(source.getTopKey());
-    topValue = new Value(source.getTopValue());
-    source.next();
-
-    if (!topKey.getColumnFamilyData().equals(FileDataIngest.CHUNK_CF_BS))
-      return topKey.getColumnVisibility().getBytes();
-
-    maxTS = topKey.getTimestamp();
-
-    while (source.hasTop() && source.getTopKey().equals(topKey, PartialKey.ROW_COLFAM_COLQUAL)) {
-      if (source.getTopKey().getTimestamp() > maxTS)
-        maxTS = source.getTopKey().getTimestamp();
-
-      if (!topValue.equals(source.getTopValue()))
-        throw new RuntimeException("values not equals " + topKey + " " + source.getTopKey() + " : " + diffInfo(topValue, source.getTopValue()));
-
-      source.next();
-    }
-
-    byte[] vis = getVisFromRefs();
-    if (vis != null) {
-      topKey = new Key(topKey.getRowData().toArray(), topKey.getColumnFamilyData().toArray(), topKey.getColumnQualifierData().toArray(), vis, maxTS);
-    }
-    return vis;
-  }
-
-  private byte[] getVisFromRefs() throws IOException {
-    Text row = topKey.getRow();
-    if (lastRowVC.containsKey(row))
-      return lastRowVC.get(row);
-    Range range = new Range(row);
-    refsSource.seek(range, refsColf, true);
-    VisibilityCombiner vc = null;
-    while (refsSource.hasTop()) {
-      if (vc == null)
-        vc = new VisibilityCombiner();
-      vc.add(refsSource.getTopKey().getColumnVisibilityData());
-      refsSource.next();
-    }
-    if (vc == null) {
-      lastRowVC = Collections.singletonMap(row, null);
-      return null;
-    }
-    lastRowVC = Collections.singletonMap(row, vc.get());
-    return vc.get();
-  }
-
-  private String diffInfo(Value v1, Value v2) {
-    if (v1.getSize() != v2.getSize()) {
-      return "val len not equal " + v1.getSize() + "!=" + v2.getSize();
-    }
-
-    byte[] vb1 = v1.get();
-    byte[] vb2 = v2.get();
-
-    for (int i = 0; i < vb1.length; i++) {
-      if (vb1[i] != vb2[i]) {
-        return String.format("first diff at offset %,d 0x%02x != 0x%02x", i, 0xff & vb1[i], 0xff & vb2[i]);
-      }
-    }
-
-    return null;
-  }
-
-  @Override
-  public Key getTopKey() {
-    return topKey;
-  }
-
-  @Override
-  public Value getTopValue() {
-    return topValue;
-  }
-
-  @Override
-  public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
-    ChunkCombiner cc = new ChunkCombiner();
-    try {
-      cc.init(source.deepCopy(env), null, env);
-    } catch (IOException e) {
-      throw new IllegalArgumentException(e);
-    }
-    return cc;
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/ChunkInputFormat.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/ChunkInputFormat.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/ChunkInputFormat.java
deleted file mode 100644
index bb7715b..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/ChunkInputFormat.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.filedata;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.mapreduce.InputFormatBase;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.util.PeekingIterator;
-import org.apache.accumulo.core.util.format.DefaultFormatter;
-import org.apache.hadoop.mapreduce.InputSplit;
-import org.apache.hadoop.mapreduce.RecordReader;
-import org.apache.hadoop.mapreduce.TaskAttemptContext;
-
-/**
- * An InputFormat that turns the file data ingested with {@link FileDataIngest} into an InputStream using {@link ChunkInputStream}. Mappers used with this
- * InputFormat must close the InputStream.
- */
-public class ChunkInputFormat extends InputFormatBase<List<Entry<Key,Value>>,InputStream> {
-  @Override
-  public RecordReader<List<Entry<Key,Value>>,InputStream> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException,
-      InterruptedException {
-    return new RecordReaderBase<List<Entry<Key,Value>>,InputStream>() {
-      private PeekingIterator<Entry<Key,Value>> peekingScannerIterator;
-
-      @Override
-      public void initialize(InputSplit inSplit, TaskAttemptContext attempt) throws IOException {
-        super.initialize(inSplit, attempt);
-        peekingScannerIterator = new PeekingIterator<>(scannerIterator);
-        currentK = new ArrayList<>();
-        currentV = new ChunkInputStream();
-      }
-
-      @Override
-      public boolean nextKeyValue() throws IOException, InterruptedException {
-        currentK.clear();
-        if (peekingScannerIterator.hasNext()) {
-          ++numKeysRead;
-          Entry<Key,Value> entry = peekingScannerIterator.peek();
-          while (!entry.getKey().getColumnFamily().equals(FileDataIngest.CHUNK_CF)) {
-            currentK.add(entry);
-            peekingScannerIterator.next();
-            if (!peekingScannerIterator.hasNext())
-              return true;
-            entry = peekingScannerIterator.peek();
-          }
-          currentKey = entry.getKey();
-          ((ChunkInputStream) currentV).setSource(peekingScannerIterator);
-          if (log.isTraceEnabled())
-            log.trace("Processing key/value pair: " + DefaultFormatter.formatEntry(entry, true));
-          return true;
-        }
-        return false;
-      }
-    };
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/ChunkInputStream.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/ChunkInputStream.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/ChunkInputStream.java
deleted file mode 100644
index 1774227..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/ChunkInputStream.java
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.filedata;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.PartialKey;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.util.PeekingIterator;
-import org.apache.hadoop.io.Text;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * An input stream that reads file data stored in one or more Accumulo values. Used by {@link ChunkInputFormat} to present input streams to a mapper.
- */
-public class ChunkInputStream extends InputStream {
-  private static final Logger log = LoggerFactory.getLogger(ChunkInputStream.class);
-
-  protected PeekingIterator<Entry<Key,Value>> source;
-  protected Key currentKey;
-  protected Set<Text> currentVis;
-  protected int currentChunk;
-  protected int currentChunkSize;
-  protected boolean gotEndMarker;
-
-  protected byte buf[];
-  protected int count;
-  protected int pos;
-
-  public ChunkInputStream() {
-    source = null;
-  }
-
-  public ChunkInputStream(PeekingIterator<Entry<Key,Value>> in) throws IOException {
-    setSource(in);
-  }
-
-  public void setSource(PeekingIterator<Entry<Key,Value>> in) throws IOException {
-    if (source != null)
-      throw new IOException("setting new source without closing old one");
-    this.source = in;
-    currentVis = new TreeSet<>();
-    count = pos = 0;
-    if (!source.hasNext()) {
-      log.debug("source has no next");
-      gotEndMarker = true;
-      return;
-    }
-
-    // read forward until we reach a chunk
-    Entry<Key,Value> entry = source.next();
-    currentKey = entry.getKey();
-    buf = entry.getValue().get();
-    while (!currentKey.getColumnFamily().equals(FileDataIngest.CHUNK_CF)) {
-      log.debug("skipping key: " + currentKey.toString());
-      if (!source.hasNext())
-        return;
-      entry = source.next();
-      currentKey = entry.getKey();
-      buf = entry.getValue().get();
-    }
-    log.debug("starting chunk: " + currentKey.toString());
-    count = buf.length;
-    currentVis.add(currentKey.getColumnVisibility());
-    currentChunk = FileDataIngest.bytesToInt(currentKey.getColumnQualifier().getBytes(), 4);
-    currentChunkSize = FileDataIngest.bytesToInt(currentKey.getColumnQualifier().getBytes(), 0);
-    gotEndMarker = false;
-    if (buf.length == 0)
-      gotEndMarker = true;
-    if (currentChunk != 0) {
-      source = null;
-      throw new IOException("starting chunk number isn't 0 for " + currentKey.getRow());
-    }
-  }
-
-  private int fill() throws IOException {
-    if (source == null || !source.hasNext()) {
-      if (gotEndMarker)
-        return count = pos = 0;
-      else
-        throw new IOException("no end chunk marker but source has no data");
-    }
-
-    Entry<Key,Value> entry = source.peek();
-    Key thisKey = entry.getKey();
-    log.debug("evaluating key: " + thisKey.toString());
-
-    // check that we're still on the same row
-    if (!thisKey.equals(currentKey, PartialKey.ROW)) {
-      if (gotEndMarker)
-        return -1;
-      else {
-        String currentRow = currentKey.getRow().toString();
-        clear();
-        throw new IOException("got to the end of the row without end chunk marker " + currentRow);
-      }
-    }
-    log.debug("matches current key");
-
-    // ok to advance the iterator
-    source.next();
-
-    // check that this is part of a chunk
-    if (!thisKey.getColumnFamily().equals(FileDataIngest.CHUNK_CF)) {
-      log.debug("skipping non-chunk key");
-      return fill();
-    }
-    log.debug("is a chunk");
-
-    // check that the chunk size is the same as the one being read
-    if (currentChunkSize != FileDataIngest.bytesToInt(thisKey.getColumnQualifier().getBytes(), 0)) {
-      log.debug("skipping chunk of different size");
-      return fill();
-    }
-
-    // add the visibility to the list if it's not there
-    if (!currentVis.contains(thisKey.getColumnVisibility()))
-      currentVis.add(thisKey.getColumnVisibility());
-
-    // check to see if it is an identical chunk with a different visibility
-    if (thisKey.getColumnQualifier().equals(currentKey.getColumnQualifier())) {
-      log.debug("skipping identical chunk with different visibility");
-      return fill();
-    }
-
-    if (gotEndMarker) {
-      log.debug("got another chunk after end marker: " + currentKey.toString() + " " + thisKey.toString());
-      clear();
-      throw new IOException("found extra chunk after end marker");
-    }
-
-    // got new chunk of the same file, check that it's the next chunk
-    int thisChunk = FileDataIngest.bytesToInt(thisKey.getColumnQualifier().getBytes(), 4);
-    if (thisChunk != currentChunk + 1) {
-      log.debug("new chunk same file, unexpected chunkID: " + currentKey.toString() + " " + thisKey.toString());
-      clear();
-      throw new IOException("missing chunks between " + currentChunk + " and " + thisChunk);
-    }
-
-    currentKey = thisKey;
-    currentChunk = thisChunk;
-    buf = entry.getValue().get();
-    pos = 0;
-
-    // check to see if it's the last chunk
-    if (buf.length == 0) {
-      gotEndMarker = true;
-      return fill();
-    }
-
-    return count = buf.length;
-  }
-
-  public Set<Text> getVisibilities() {
-    if (source != null)
-      throw new IllegalStateException("don't get visibilities before chunks have been completely read");
-    return currentVis;
-  }
-
-  @Override
-  public int read() throws IOException {
-    if (source == null)
-      return -1;
-    log.debug("pos: " + pos + " count: " + count);
-    if (pos >= count) {
-      if (fill() <= 0) {
-        log.debug("done reading input stream at key: " + (currentKey == null ? "null" : currentKey.toString()));
-        if (source != null && source.hasNext())
-          log.debug("next key: " + source.peek().getKey());
-        clear();
-        return -1;
-      }
-    }
-    return buf[pos++] & 0xff;
-  }
-
-  @Override
-  public int read(byte[] b, int off, int len) throws IOException {
-    if (b == null) {
-      throw new NullPointerException();
-    } else if ((off < 0) || (off > b.length) || (len < 0) || ((off + len) > b.length) || ((off + len) < 0)) {
-      throw new IndexOutOfBoundsException();
-    } else if (len == 0) {
-      return 0;
-    }
-
-    log.debug("filling buffer " + off + " " + len);
-    int total = 0;
-    while (total < len) {
-      int avail = count - pos;
-      log.debug(avail + " available in current local buffer");
-      if (avail <= 0) {
-        if (fill() <= 0) {
-          log.debug("done reading input stream at key: " + (currentKey == null ? "null" : currentKey.toString()));
-          if (source != null && source.hasNext())
-            log.debug("next key: " + source.peek().getKey());
-          clear();
-          log.debug("filled " + total + " bytes");
-          return total == 0 ? -1 : total;
-        }
-        avail = count - pos;
-      }
-
-      int cnt = (avail < len - total) ? avail : len - total;
-      log.debug("copying from local buffer: local pos " + pos + " into pos " + off + " len " + cnt);
-      System.arraycopy(buf, pos, b, off, cnt);
-      pos += cnt;
-      off += cnt;
-      total += cnt;
-    }
-    log.debug("filled " + total + " bytes");
-    return total;
-  }
-
-  public void clear() {
-    source = null;
-    buf = null;
-    currentKey = null;
-    currentChunk = 0;
-    pos = count = 0;
-  }
-
-  @Override
-  public void close() throws IOException {
-    try {
-      while (fill() > 0) {}
-    } catch (IOException e) {
-      clear();
-      throw new IOException(e);
-    }
-    clear();
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataIngest.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataIngest.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataIngest.java
deleted file mode 100644
index 1a0ec5d..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataIngest.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.filedata;
-
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.security.MessageDigest;
-import java.security.NoSuchAlgorithmException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ClientOnRequiredTable;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.data.ArrayByteSequence;
-import org.apache.accumulo.core.data.ByteSequence;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.hadoop.io.Text;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * Takes a list of files and archives them into Accumulo keyed on hashes of the files. See docs/examples/README.filedata for instructions.
- */
-public class FileDataIngest {
-  public static final Text CHUNK_CF = new Text("~chunk");
-  public static final Text REFS_CF = new Text("refs");
-  public static final String REFS_ORIG_FILE = "name";
-  public static final String REFS_FILE_EXT = "filext";
-  public static final ByteSequence CHUNK_CF_BS = new ArrayByteSequence(CHUNK_CF.getBytes(), 0, CHUNK_CF.getLength());
-  public static final ByteSequence REFS_CF_BS = new ArrayByteSequence(REFS_CF.getBytes(), 0, REFS_CF.getLength());
-
-  int chunkSize;
-  byte[] chunkSizeBytes;
-  byte[] buf;
-  MessageDigest md5digest;
-  ColumnVisibility cv;
-
-  public FileDataIngest(int chunkSize, ColumnVisibility colvis) {
-    this.chunkSize = chunkSize;
-    chunkSizeBytes = intToBytes(chunkSize);
-    buf = new byte[chunkSize];
-    try {
-      md5digest = MessageDigest.getInstance("MD5");
-    } catch (NoSuchAlgorithmException e) {
-      throw new RuntimeException(e);
-    }
-    cv = colvis;
-  }
-
-  public String insertFileData(String filename, BatchWriter bw) throws MutationsRejectedException, IOException {
-    if (chunkSize == 0)
-      return "";
-    md5digest.reset();
-    String uid = hexString(md5digest.digest(filename.getBytes()));
-
-    // read through file once, calculating hashes
-    md5digest.reset();
-    InputStream fis = null;
-    int numRead = 0;
-    try {
-      fis = new FileInputStream(filename);
-      numRead = fis.read(buf);
-      while (numRead >= 0) {
-        if (numRead > 0) {
-          md5digest.update(buf, 0, numRead);
-        }
-        numRead = fis.read(buf);
-      }
-    } finally {
-      if (fis != null) {
-        fis.close();
-      }
-    }
-
-    String hash = hexString(md5digest.digest());
-    Text row = new Text(hash);
-
-    // write info to accumulo
-    Mutation m = new Mutation(row);
-    m.put(REFS_CF, KeyUtil.buildNullSepText(uid, REFS_ORIG_FILE), cv, new Value(filename.getBytes()));
-    String fext = getExt(filename);
-    if (fext != null)
-      m.put(REFS_CF, KeyUtil.buildNullSepText(uid, REFS_FILE_EXT), cv, new Value(fext.getBytes()));
-    bw.addMutation(m);
-
-    // read through file again, writing chunks to accumulo
-    int chunkCount = 0;
-    try {
-      fis = new FileInputStream(filename);
-      numRead = fis.read(buf);
-      while (numRead >= 0) {
-        while (numRead < buf.length) {
-          int moreRead = fis.read(buf, numRead, buf.length - numRead);
-          if (moreRead > 0)
-            numRead += moreRead;
-          else if (moreRead < 0)
-            break;
-        }
-        m = new Mutation(row);
-        Text chunkCQ = new Text(chunkSizeBytes);
-        chunkCQ.append(intToBytes(chunkCount), 0, 4);
-        m.put(CHUNK_CF, chunkCQ, cv, new Value(buf, 0, numRead));
-        bw.addMutation(m);
-        if (chunkCount == Integer.MAX_VALUE)
-          throw new RuntimeException("too many chunks for file " + filename + ", try raising chunk size");
-        chunkCount++;
-        numRead = fis.read(buf);
-      }
-    } finally {
-      if (fis != null) {
-        fis.close();
-      }
-    }
-    m = new Mutation(row);
-    Text chunkCQ = new Text(chunkSizeBytes);
-    chunkCQ.append(intToBytes(chunkCount), 0, 4);
-    m.put(new Text(CHUNK_CF), chunkCQ, cv, new Value(new byte[0]));
-    bw.addMutation(m);
-    return hash;
-  }
-
-  public static int bytesToInt(byte[] b, int offset) {
-    if (b.length <= offset + 3)
-      throw new NumberFormatException("couldn't pull integer from bytes at offset " + offset);
-    int i = (((b[offset] & 255) << 24) + ((b[offset + 1] & 255) << 16) + ((b[offset + 2] & 255) << 8) + ((b[offset + 3] & 255) << 0));
-    return i;
-  }
-
-  public static byte[] intToBytes(int l) {
-    byte[] b = new byte[4];
-    b[0] = (byte) (l >>> 24);
-    b[1] = (byte) (l >>> 16);
-    b[2] = (byte) (l >>> 8);
-    b[3] = (byte) (l >>> 0);
-    return b;
-  }
-
-  private static String getExt(String filename) {
-    if (filename.indexOf(".") == -1)
-      return null;
-    return filename.substring(filename.lastIndexOf(".") + 1);
-  }
-
-  public String hexString(byte[] bytes) {
-    StringBuilder sb = new StringBuilder();
-    for (byte b : bytes) {
-      sb.append(String.format("%02x", b));
-    }
-    return sb.toString();
-  }
-
-  public static class Opts extends ClientOnRequiredTable {
-    @Parameter(names = "--vis", description = "use a given visibility for the new counts", converter = VisibilityConverter.class)
-    ColumnVisibility visibility = new ColumnVisibility();
-
-    @Parameter(names = "--chunk", description = "size of the chunks used to store partial files")
-    int chunkSize = 64 * 1024;
-
-    @Parameter(description = "<file> { <file> ... }")
-    List<String> files = new ArrayList<>();
-  }
-
-  public static void main(String[] args) throws Exception {
-    Opts opts = new Opts();
-    BatchWriterOpts bwOpts = new BatchWriterOpts();
-    opts.parseArgs(FileDataIngest.class.getName(), args, bwOpts);
-
-    Connector conn = opts.getConnector();
-    if (!conn.tableOperations().exists(opts.getTableName())) {
-      conn.tableOperations().create(opts.getTableName());
-      conn.tableOperations().attachIterator(opts.getTableName(), new IteratorSetting(1, ChunkCombiner.class));
-    }
-    BatchWriter bw = conn.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
-    FileDataIngest fdi = new FileDataIngest(opts.chunkSize, opts.visibility);
-    for (String filename : opts.files) {
-      fdi.insertFileData(filename, bw);
-    }
-    bw.close();
-    opts.stopTracing();
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataQuery.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataQuery.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataQuery.java
deleted file mode 100644
index 48746d0..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/FileDataQuery.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.filedata;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.PeekingIterator;
-
-/**
- * Retrieves file data based on the hash of the file. Used by the {@link org.apache.accumulo.examples.simple.dirlist.Viewer}. See README.dirlist for
- * instructions.
- */
-public class FileDataQuery {
-  private Connector conn = null;
-  List<Entry<Key,Value>> lastRefs;
-  private ChunkInputStream cis;
-  Scanner scanner;
-
-  public FileDataQuery(String instanceName, String zooKeepers, String user, AuthenticationToken token, String tableName, Authorizations auths)
-      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    ZooKeeperInstance instance = new ZooKeeperInstance(ClientConfiguration.loadDefault().withInstance(instanceName).withZkHosts(zooKeepers));
-    conn = instance.getConnector(user, token);
-    lastRefs = new ArrayList<>();
-    cis = new ChunkInputStream();
-    scanner = conn.createScanner(tableName, auths);
-  }
-
-  public List<Entry<Key,Value>> getLastRefs() {
-    return lastRefs;
-  }
-
-  public ChunkInputStream getData(String hash) throws IOException {
-    scanner.setRange(new Range(hash));
-    scanner.setBatchSize(1);
-    lastRefs.clear();
-    PeekingIterator<Entry<Key,Value>> pi = new PeekingIterator<>(scanner.iterator());
-    if (pi.hasNext()) {
-      while (!pi.peek().getKey().getColumnFamily().equals(FileDataIngest.CHUNK_CF)) {
-        lastRefs.add(pi.peek());
-        pi.next();
-      }
-    }
-    cis.clear();
-    cis.setSource(pi);
-    return cis;
-  }
-
-  public String getSomeData(String hash, int numBytes) throws IOException {
-    ChunkInputStream is = getData(hash);
-    byte[] buf = new byte[numBytes];
-    if (is.read(buf) >= 0) {
-      return new String(buf);
-    } else {
-      return "";
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/KeyUtil.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/KeyUtil.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/KeyUtil.java
deleted file mode 100644
index f9c52ba..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/KeyUtil.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.filedata;
-
-import java.util.ArrayList;
-
-import org.apache.hadoop.io.Text;
-
-/**
- * A utility for creating and parsing null-byte separated strings into/from Text objects.
- */
-public class KeyUtil {
-  public static final byte[] nullbyte = new byte[] {0};
-
-  /**
-   * Join some number of strings using a null byte separator into a text object.
-   *
-   * @param s
-   *          strings
-   * @return a text object containing the strings separated by null bytes
-   */
-  public static Text buildNullSepText(String... s) {
-    Text t = new Text(s[0]);
-    for (int i = 1; i < s.length; i++) {
-      t.append(nullbyte, 0, 1);
-      t.append(s[i].getBytes(), 0, s[i].length());
-    }
-    return t;
-  }
-
-  /**
-   * Split a text object using a null byte separator into an array of strings.
-   *
-   * @param t
-   *          null-byte separated text object
-   * @return an array of strings
-   */
-  public static String[] splitNullSepText(Text t) {
-    ArrayList<String> s = new ArrayList<>();
-    byte[] b = t.getBytes();
-    int lastindex = 0;
-    for (int i = 0; i < t.getLength(); i++) {
-      if (b[i] == (byte) 0) {
-        s.add(new String(b, lastindex, i - lastindex));
-        lastindex = i + 1;
-      }
-    }
-    s.add(new String(b, lastindex, t.getLength() - lastindex));
-    return s.toArray(new String[s.size()]);
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/VisibilityCombiner.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/VisibilityCombiner.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/VisibilityCombiner.java
deleted file mode 100644
index b205ec1..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/filedata/VisibilityCombiner.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.filedata;
-
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.data.ByteSequence;
-
-/**
- * A utility for merging visibilities into the form {@code (VIS1)|(VIS2)|...|(VISN)}. Used by the {@link ChunkCombiner}.
- */
-public class VisibilityCombiner {
-
-  private TreeSet<String> visibilities = new TreeSet<>();
-
-  void add(ByteSequence cv) {
-    if (cv.length() == 0)
-      return;
-
-    int depth = 0;
-    int offset = 0;
-
-    for (int i = 0; i < cv.length(); i++) {
-      switch (cv.byteAt(i)) {
-        case '(':
-          depth++;
-          break;
-        case ')':
-          depth--;
-          if (depth < 0)
-            throw new IllegalArgumentException("Invalid vis " + cv);
-          break;
-        case '|':
-          if (depth == 0) {
-            insert(cv.subSequence(offset, i));
-            offset = i + 1;
-          }
-
-          break;
-      }
-    }
-
-    insert(cv.subSequence(offset, cv.length()));
-
-    if (depth != 0)
-      throw new IllegalArgumentException("Invalid vis " + cv);
-
-  }
-
-  private void insert(ByteSequence cv) {
-    for (int i = 0; i < cv.length(); i++) {
-
-    }
-
-    String cvs = cv.toString();
-
-    if (cvs.charAt(0) != '(')
-      cvs = "(" + cvs + ")";
-    else {
-      int depth = 0;
-      int depthZeroCloses = 0;
-      for (int i = 0; i < cv.length(); i++) {
-        switch (cv.byteAt(i)) {
-          case '(':
-            depth++;
-            break;
-          case ')':
-            depth--;
-            if (depth == 0)
-              depthZeroCloses++;
-            break;
-        }
-      }
-
-      if (depthZeroCloses > 1)
-        cvs = "(" + cvs + ")";
-    }
-
-    visibilities.add(cvs);
-  }
-
-  byte[] get() {
-    StringBuilder sb = new StringBuilder();
-    String sep = "";
-    for (String cvs : visibilities) {
-      sb.append(sep);
-      sep = "|";
-      sb.append(cvs);
-    }
-
-    return sb.toString().getBytes();
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/InsertWithBatchWriter.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/InsertWithBatchWriter.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/InsertWithBatchWriter.java
deleted file mode 100644
index 0e60086..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/InsertWithBatchWriter.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.helloworld;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ClientOnRequiredTable;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.MultiTableBatchWriter;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.hadoop.io.Text;
-
-/**
- * Inserts 10K rows (50K entries) into accumulo with each row having 5 entries.
- */
-public class InsertWithBatchWriter {
-
-  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, MutationsRejectedException, TableExistsException,
-      TableNotFoundException {
-    ClientOnRequiredTable opts = new ClientOnRequiredTable();
-    BatchWriterOpts bwOpts = new BatchWriterOpts();
-    opts.parseArgs(InsertWithBatchWriter.class.getName(), args, bwOpts);
-
-    Connector connector = opts.getConnector();
-    MultiTableBatchWriter mtbw = connector.createMultiTableBatchWriter(bwOpts.getBatchWriterConfig());
-
-    if (!connector.tableOperations().exists(opts.getTableName()))
-      connector.tableOperations().create(opts.getTableName());
-    BatchWriter bw = mtbw.getBatchWriter(opts.getTableName());
-
-    Text colf = new Text("colfam");
-    System.out.println("writing ...");
-    for (int i = 0; i < 10000; i++) {
-      Mutation m = new Mutation(new Text(String.format("row_%d", i)));
-      for (int j = 0; j < 5; j++) {
-        m.put(colf, new Text(String.format("colqual_%d", j)), new Value((String.format("value_%d_%d", i, j)).getBytes()));
-      }
-      bw.addMutation(m);
-      if (i % 100 == 0)
-        System.out.println(i);
-    }
-    mtbw.close();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/ReadData.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/ReadData.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/ReadData.java
deleted file mode 100644
index c8c984d..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/helloworld/ReadData.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.helloworld;
-
-import java.util.Iterator;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.cli.ClientOnRequiredTable;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.hadoop.io.Text;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * Reads all data between two rows; all data after a given row; or all data in a table, depending on the number of arguments given.
- */
-public class ReadData {
-
-  private static final Logger log = LoggerFactory.getLogger(ReadData.class);
-
-  static class Opts extends ClientOnRequiredTable {
-    @Parameter(names = "--startKey")
-    String startKey;
-    @Parameter(names = "--endKey")
-    String endKey;
-  }
-
-  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    Opts opts = new Opts();
-    ScannerOpts scanOpts = new ScannerOpts();
-    opts.parseArgs(ReadData.class.getName(), args, scanOpts);
-
-    Connector connector = opts.getConnector();
-
-    Scanner scan = connector.createScanner(opts.getTableName(), opts.auths);
-    scan.setBatchSize(scanOpts.scanBatchSize);
-    Key start = null;
-    if (opts.startKey != null)
-      start = new Key(new Text(opts.startKey));
-    Key end = null;
-    if (opts.endKey != null)
-      end = new Key(new Text(opts.endKey));
-    scan.setRange(new Range(start, end));
-    Iterator<Entry<Key,Value>> iter = scan.iterator();
-
-    while (iter.hasNext()) {
-      Entry<Key,Value> e = iter.next();
-      Text colf = e.getKey().getColumnFamily();
-      Text colq = e.getKey().getColumnQualifier();
-      log.trace("row: " + e.getKey().getRow() + ", colf: " + colf + ", colq: " + colq);
-      log.trace(", value: " + e.getValue().toString());
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/isolation/InterferenceTest.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/isolation/InterferenceTest.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/isolation/InterferenceTest.java
deleted file mode 100644
index a2afcdf..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/isolation/InterferenceTest.java
+++ /dev/null
@@ -1,181 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.isolation;
-
-import java.util.HashSet;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ClientOnRequiredTable;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IsolatedScanner;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.ByteSequence;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.hadoop.io.Text;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * This example shows how a concurrent reader and writer can interfere with each other. It creates two threads that run forever reading and writing to the same
- * table.
- *
- * When the example is run with isolation enabled, no interference will be observed.
- *
- * When the example is run with out isolation, the reader will see partial mutations of a row.
- *
- */
-
-public class InterferenceTest {
-
-  private static final int NUM_ROWS = 500;
-  private static final int NUM_COLUMNS = 113; // scanner batches 1000 by default, so make num columns not a multiple of 10
-  private static final Logger log = LoggerFactory.getLogger(InterferenceTest.class);
-
-  static class Writer implements Runnable {
-
-    private final BatchWriter bw;
-    private final long iterations;
-
-    Writer(BatchWriter bw, long iterations) {
-      this.bw = bw;
-      this.iterations = iterations;
-    }
-
-    @Override
-    public void run() {
-      int row = 0;
-      int value = 0;
-
-      for (long i = 0; i < iterations; i++) {
-        Mutation m = new Mutation(new Text(String.format("%03d", row)));
-        row = (row + 1) % NUM_ROWS;
-
-        for (int cq = 0; cq < NUM_COLUMNS; cq++)
-          m.put(new Text("000"), new Text(String.format("%04d", cq)), new Value(("" + value).getBytes()));
-
-        value++;
-
-        try {
-          bw.addMutation(m);
-        } catch (MutationsRejectedException e) {
-          log.error("Mutation was rejected.", e);
-          System.exit(-1);
-        }
-      }
-      try {
-        bw.close();
-      } catch (MutationsRejectedException e) {
-        log.error("Mutation was rejected on BatchWriter close.", e);
-      }
-    }
-  }
-
-  static class Reader implements Runnable {
-
-    private Scanner scanner;
-    volatile boolean stop = false;
-
-    Reader(Scanner scanner) {
-      this.scanner = scanner;
-    }
-
-    @Override
-    public void run() {
-      while (!stop) {
-        ByteSequence row = null;
-        int count = 0;
-
-        // all columns in a row should have the same value,
-        // use this hash set to track that
-        HashSet<String> values = new HashSet<>();
-
-        for (Entry<Key,Value> entry : scanner) {
-          if (row == null)
-            row = entry.getKey().getRowData();
-
-          if (!row.equals(entry.getKey().getRowData())) {
-            if (count != NUM_COLUMNS)
-              System.err.println("ERROR Did not see " + NUM_COLUMNS + " columns in row " + row);
-
-            if (values.size() > 1)
-              System.err.println("ERROR Columns in row " + row + " had multiple values " + values);
-
-            row = entry.getKey().getRowData();
-            count = 0;
-            values.clear();
-          }
-
-          count++;
-
-          values.add(entry.getValue().toString());
-        }
-
-        if (count > 0 && count != NUM_COLUMNS)
-          System.err.println("ERROR Did not see " + NUM_COLUMNS + " columns in row " + row);
-
-        if (values.size() > 1)
-          System.err.println("ERROR Columns in row " + row + " had multiple values " + values);
-      }
-    }
-
-    public void stopNow() {
-      stop = true;
-    }
-  }
-
-  static class Opts extends ClientOnRequiredTable {
-    @Parameter(names = "--iterations", description = "number of times to run", required = true)
-    long iterations = 0;
-    @Parameter(names = "--isolated", description = "use isolated scans")
-    boolean isolated = false;
-  }
-
-  public static void main(String[] args) throws Exception {
-    Opts opts = new Opts();
-    BatchWriterOpts bwOpts = new BatchWriterOpts();
-    opts.parseArgs(InterferenceTest.class.getName(), args, bwOpts);
-
-    if (opts.iterations < 1)
-      opts.iterations = Long.MAX_VALUE;
-
-    Connector conn = opts.getConnector();
-    if (!conn.tableOperations().exists(opts.getTableName()))
-      conn.tableOperations().create(opts.getTableName());
-
-    Thread writer = new Thread(new Writer(conn.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig()), opts.iterations));
-    writer.start();
-    Reader r;
-    if (opts.isolated)
-      r = new Reader(new IsolatedScanner(conn.createScanner(opts.getTableName(), opts.auths)));
-    else
-      r = new Reader(conn.createScanner(opts.getTableName(), opts.auths));
-    Thread reader;
-    reader = new Thread(r);
-    reader.start();
-    writer.join();
-    r.stopNow();
-    reader.join();
-    System.out.println("finished");
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/NGramIngest.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/NGramIngest.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/NGramIngest.java
deleted file mode 100644
index 3355454..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/NGramIngest.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.mapreduce;
-
-import java.io.IOException;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.cli.MapReduceClientOnRequiredTable;
-import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * Map job to ingest n-gram files from http://storage.googleapis.com/books/ngrams/books/datasetsv2.html
- */
-public class NGramIngest extends Configured implements Tool {
-
-  private static final Logger log = LoggerFactory.getLogger(NGramIngest.class);
-
-  static class Opts extends MapReduceClientOnRequiredTable {
-    @Parameter(names = "--input", required = true)
-    String inputDirectory;
-  }
-
-  static class NGramMapper extends Mapper<LongWritable,Text,Text,Mutation> {
-
-    @Override
-    protected void map(LongWritable location, Text value, Context context) throws IOException, InterruptedException {
-      String parts[] = value.toString().split("\\t");
-      if (parts.length >= 4) {
-        Mutation m = new Mutation(parts[0]);
-        m.put(parts[1], String.format("%010d", Long.parseLong(parts[2])), new Value(parts[3].trim().getBytes()));
-        context.write(null, m);
-      }
-    }
-  }
-
-  @Override
-  public int run(String[] args) throws Exception {
-    Opts opts = new Opts();
-    opts.parseArgs(getClass().getName(), args);
-
-    Job job = Job.getInstance(getConf());
-    job.setJobName(getClass().getSimpleName());
-    job.setJarByClass(getClass());
-
-    opts.setAccumuloConfigs(job);
-    job.setInputFormatClass(TextInputFormat.class);
-    job.setOutputFormatClass(AccumuloOutputFormat.class);
-
-    job.setMapperClass(NGramMapper.class);
-    job.setMapOutputKeyClass(Text.class);
-    job.setMapOutputValueClass(Mutation.class);
-
-    job.setNumReduceTasks(0);
-    job.setSpeculativeExecution(false);
-
-    if (!opts.getConnector().tableOperations().exists(opts.getTableName())) {
-      log.info("Creating table " + opts.getTableName());
-      opts.getConnector().tableOperations().create(opts.getTableName());
-      SortedSet<Text> splits = new TreeSet<>();
-      String numbers[] = "1 2 3 4 5 6 7 8 9".split("\\s");
-      String lower[] = "a b c d e f g h i j k l m n o p q r s t u v w x y z".split("\\s");
-      String upper[] = "A B C D E F G H I J K L M N O P Q R S T U V W X Y Z".split("\\s");
-      for (String[] array : new String[][] {numbers, lower, upper}) {
-        for (String s : array) {
-          splits.add(new Text(s));
-        }
-      }
-      opts.getConnector().tableOperations().addSplits(opts.getTableName(), splits);
-    }
-
-    TextInputFormat.addInputPath(job, new Path(opts.inputDirectory));
-    job.waitForCompletion(true);
-    return job.isSuccessful() ? 0 : 1;
-  }
-
-  public static void main(String[] args) throws Exception {
-    int res = ToolRunner.run(new Configuration(), new NGramIngest(), args);
-    if (res != 0)
-      System.exit(res);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RegexExample.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RegexExample.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RegexExample.java
deleted file mode 100644
index ad5bc74..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/mapreduce/RegexExample.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.mapreduce;
-
-import java.io.IOException;
-
-import org.apache.accumulo.core.cli.MapReduceClientOnRequiredTable;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.user.RegExFilter;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-
-import com.beust.jcommander.Parameter;
-
-public class RegexExample extends Configured implements Tool {
-  public static class RegexMapper extends Mapper<Key,Value,Key,Value> {
-    @Override
-    public void map(Key row, Value data, Context context) throws IOException, InterruptedException {
-      context.write(row, data);
-    }
-  }
-
-  static class Opts extends MapReduceClientOnRequiredTable {
-    @Parameter(names = "--rowRegex")
-    String rowRegex;
-    @Parameter(names = "--columnFamilyRegex")
-    String columnFamilyRegex;
-    @Parameter(names = "--columnQualifierRegex")
-    String columnQualifierRegex;
-    @Parameter(names = "--valueRegex")
-    String valueRegex;
-    @Parameter(names = "--output", required = true)
-    String destination;
-  }
-
-  @Override
-  public int run(String[] args) throws Exception {
-    Opts opts = new Opts();
-    opts.parseArgs(getClass().getName(), args);
-
-    Job job = Job.getInstance(getConf());
-    job.setJobName(getClass().getSimpleName());
-    job.setJarByClass(getClass());
-
-    job.setInputFormatClass(AccumuloInputFormat.class);
-    opts.setAccumuloConfigs(job);
-
-    IteratorSetting regex = new IteratorSetting(50, "regex", RegExFilter.class);
-    RegExFilter.setRegexs(regex, opts.rowRegex, opts.columnFamilyRegex, opts.columnQualifierRegex, opts.valueRegex, false);
-    AccumuloInputFormat.addIterator(job, regex);
-
-    job.setMapperClass(RegexMapper.class);
-    job.setMapOutputKeyClass(Key.class);
-    job.setMapOutputValueClass(Value.class);
-
-    job.setNumReduceTasks(0);
-
-    job.setOutputFormatClass(TextOutputFormat.class);
-    TextOutputFormat.setOutputPath(job, new Path(opts.destination));
-
-    System.out.println("setRowRegex: " + opts.rowRegex);
-    System.out.println("setColumnFamilyRegex: " + opts.columnFamilyRegex);
-    System.out.println("setColumnQualifierRegex: " + opts.columnQualifierRegex);
-    System.out.println("setValueRegex: " + opts.valueRegex);
-
-    job.waitForCompletion(true);
-    return job.isSuccessful() ? 0 : 1;
-  }
-
-  public static void main(String[] args) throws Exception {
-    int res = ToolRunner.run(new Configuration(), new RegexExample(), args);
-    if (res != 0)
-      System.exit(res);
-  }
-}


[6/7] accumulo git commit: ACCUMULO-4511 Removed Accumulo Examples

Posted by mw...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/resources/examples/filter.md
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/filter.md b/docs/src/main/resources/examples/filter.md
deleted file mode 100644
index 563e247..0000000
--- a/docs/src/main/resources/examples/filter.md
+++ /dev/null
@@ -1,112 +0,0 @@
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
----
-title: Apache Accumulo Filter Example
----
-
-This is a simple filter example. It uses the AgeOffFilter that is provided as
-part of the core package org.apache.accumulo.core.iterators.user. Filters are
-iterators that select desired key/value pairs (or weed out undesired ones).
-Filters extend the org.apache.accumulo.core.iterators.Filter class
-and must implement a method accept(Key k, Value v). This method returns true
-if the key/value pair are to be delivered and false if they are to be ignored.
-Filter takes a "negate" parameter which defaults to false. If set to true, the
-return value of the accept method is negated, so that key/value pairs accepted
-by the method are omitted by the Filter.
-
-    username@instance> createtable filtertest
-    username@instance filtertest> setiter -t filtertest -scan -p 10 -n myfilter -ageoff
-    AgeOffFilter removes entries with timestamps more than <ttl> milliseconds old
-    ----------> set AgeOffFilter parameter negate, default false keeps k/v that pass accept method, true rejects k/v that pass accept method:
-    ----------> set AgeOffFilter parameter ttl, time to live (milliseconds): 30000
-    ----------> set AgeOffFilter parameter currentTime, if set, use the given value as the absolute time in milliseconds as the current time of day:
-    username@instance filtertest> scan
-    username@instance filtertest> insert foo a b c
-    username@instance filtertest> scan
-    foo a:b []    c
-    username@instance filtertest>
-
-... wait 30 seconds ...
-
-    username@instance filtertest> scan
-    username@instance filtertest>
-
-Note the absence of the entry inserted more than 30 seconds ago. Since the
-scope was set to "scan", this means the entry is still in Accumulo, but is
-being filtered out at query time. To delete entries from Accumulo based on
-the ages of their timestamps, AgeOffFilters should be set up for the "minc"
-and "majc" scopes, as well.
-
-To force an ageoff of the persisted data, after setting up the ageoff iterator
-on the "minc" and "majc" scopes you can flush and compact your table. This will
-happen automatically as a background operation on any table that is being
-actively written to, but can also be requested in the shell.
-
-The first setiter command used the special -ageoff flag to specify the
-AgeOffFilter, but any Filter can be configured by using the -class flag. The
-following commands show how to enable the AgeOffFilter for the minc and majc
-scopes using the -class flag, then flush and compact the table.
-
-    username@instance filtertest> setiter -t filtertest -minc -majc -p 10 -n myfilter -class org.apache.accumulo.core.iterators.user.AgeOffFilter
-    AgeOffFilter removes entries with timestamps more than <ttl> milliseconds old
-    ----------> set AgeOffFilter parameter negate, default false keeps k/v that pass accept method, true rejects k/v that pass accept method:
-    ----------> set AgeOffFilter parameter ttl, time to live (milliseconds): 30000
-    ----------> set AgeOffFilter parameter currentTime, if set, use the given value as the absolute time in milliseconds as the current time of day:
-    username@instance filtertest> flush
-    06 10:42:24,806 [shell.Shell] INFO : Flush of table filtertest initiated...
-    username@instance filtertest> compact
-    06 10:42:36,781 [shell.Shell] INFO : Compaction of table filtertest started for given range
-    username@instance filtertest> flush -t filtertest -w
-    06 10:42:52,881 [shell.Shell] INFO : Flush of table filtertest completed.
-    username@instance filtertest> compact -t filtertest -w
-    06 10:43:00,632 [shell.Shell] INFO : Compacting table ...
-    06 10:43:01,307 [shell.Shell] INFO : Compaction of table filtertest completed for given range
-    username@instance filtertest>
-
-By default, flush and compact execute in the background, but with the -w flag
-they will wait to return until the operation has completed. Both are
-demonstrated above, though only one call to each would be necessary. A
-specific table can be specified with -t.
-
-After the compaction runs, the newly created files will not contain any data
-that should have been aged off, and the Accumulo garbage collector will remove
-the old files.
-
-To see the iterator settings for a table, use config.
-
-    username@instance filtertest> config -t filtertest -f iterator
-    ---------+---------------------------------------------+---------------------------------------------------------------------------
-    SCOPE    | NAME                                        | VALUE
-    ---------+---------------------------------------------+---------------------------------------------------------------------------
-    table    | table.iterator.majc.myfilter .............. | 10,org.apache.accumulo.core.iterators.user.AgeOffFilter
-    table    | table.iterator.majc.myfilter.opt.ttl ...... | 30000
-    table    | table.iterator.majc.vers .................. | 20,org.apache.accumulo.core.iterators.user.VersioningIterator
-    table    | table.iterator.majc.vers.opt.maxVersions .. | 1
-    table    | table.iterator.minc.myfilter .............. | 10,org.apache.accumulo.core.iterators.user.AgeOffFilter
-    table    | table.iterator.minc.myfilter.opt.ttl ...... | 30000
-    table    | table.iterator.minc.vers .................. | 20,org.apache.accumulo.core.iterators.user.VersioningIterator
-    table    | table.iterator.minc.vers.opt.maxVersions .. | 1
-    table    | table.iterator.scan.myfilter .............. | 10,org.apache.accumulo.core.iterators.user.AgeOffFilter
-    table    | table.iterator.scan.myfilter.opt.ttl ...... | 30000
-    table    | table.iterator.scan.vers .................. | 20,org.apache.accumulo.core.iterators.user.VersioningIterator
-    table    | table.iterator.scan.vers.opt.maxVersions .. | 1
-    ---------+---------------------------------------------+---------------------------------------------------------------------------
-    username@instance filtertest>
-
-When setting new iterators, make sure to order their priority numbers
-(specified with -p) in the order you would like the iterators to be applied.
-Also, each iterator must have a unique name and priority within each scope.

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/resources/examples/helloworld.md
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/helloworld.md b/docs/src/main/resources/examples/helloworld.md
deleted file mode 100644
index bc9f04b..0000000
--- a/docs/src/main/resources/examples/helloworld.md
+++ /dev/null
@@ -1,49 +0,0 @@
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
----
-title: Apache Accumulo Hello World Example
----
-
-This tutorial uses the following Java classes, which can be found in org.apache.accumulo.examples.simple.helloworld in the examples-simple module:
-
- * InsertWithBatchWriter.java - Inserts 10K rows (50K entries) into accumulo with each row having 5 entries
- * ReadData.java - Reads all data between two rows
-
-Log into the accumulo shell:
-
-    $ ./bin/accumulo shell -u username -p password
-
-Create a table called 'hellotable':
-
-    username@instance> createtable hellotable
-
-Launch a Java program that inserts data with a BatchWriter:
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.helloworld.InsertWithBatchWriter -i instance -z zookeepers -u username -p password -t hellotable
-
-On the accumulo status page at the URL below (where 'master' is replaced with the name or IP of your accumulo master), you should see 50K entries
-
-    http://master:9995/
-
-To view the entries, use the shell to scan the table:
-
-    username@instance> table hellotable
-    username@instance hellotable> scan
-
-You can also use a Java class to scan the table:
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.helloworld.ReadData -i instance -z zookeepers -u username -p password -t hellotable --startKey row_0 --endKey row_1001

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/resources/examples/index.md
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/index.md b/docs/src/main/resources/examples/index.md
deleted file mode 100644
index efb55f6..0000000
--- a/docs/src/main/resources/examples/index.md
+++ /dev/null
@@ -1,100 +0,0 @@
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
----
-title: Apache Accumulo Examples
----
-
-## Setup instructions
-
-Before running any of the examples, the following steps must be performed.
-
-1. Install and run Accumulo via the instructions found in INSTALL.md.
-   Remember the instance name. It will be referred to as "instance" throughout
-   the examples. A comma-separated list of zookeeper servers will be referred
-   to as "zookeepers".
-
-2. Create an Accumulo user (for help see the 'User Administration' section of the 
-   [user manual][manual]), or use the root user. This user and their password
-   should replace any reference to "username" or "password" in the examples. This
-   user needs the ability to create tables.
-
-In all commands, you will need to replace "instance", "zookeepers",
-"username", and "password" with the values you set for your Accumulo instance.
-
-Commands intended to be run in bash are prefixed by '$'. These are always
-assumed to be run the from the root of your Accumulo installation.
-
-Commands intended to be run in the Accumulo shell are prefixed by '>'.
-
-## Accumulo Examples
-
-Each example below highlights a feature of Apache Accumulo.
-
-| Accumulo Example | Description |
-|------------------|-------------|
-| [batch] | Using the batch writer and batch scanner |
-| [bloom] | Creating a bloom filter enabled table to increase query performance |
-| [bulkIngest] | Ingesting bulk data using map/reduce jobs on Hadoop |
-| [classpath] | Using per-table classpaths |
-| [client] | Using table operations, reading and writing data in Java. |
-| [combiner] | Using example StatsCombiner to find min, max, sum, and count. |
-| [compactionStrategy] | Configuring a compaction strategy |
-| [constraints] | Using constraints with tables. |
-| [dirlist] | Storing filesystem information. |
-| [export] | Exporting and importing tables. |
-| [filedata] | Storing file data. |
-| [filter] | Using the AgeOffFilter to remove records more than 30 seconds old. |
-| [helloworld] | Inserting records both inside map/reduce jobs and outside. And reading records between two rows. |
-| [isolation] | Using the isolated scanner to ensure partial changes are not seen. |
-| [mapred] | Using MapReduce to read from and write to Accumulo tables. |
-| [maxmutation] | Limiting mutation size to avoid running out of memory. |
-| [regex] | Using MapReduce and Accumulo to find data using regular expressions. |
-| [reservations] | Using conditional mutations to implement simple reservation system. |
-| [rgbalancer] | Using a balancer to spread groups of tablets within a table evenly |
-| [rowhash] | Using MapReduce to read a table and write to a new column in the same table. |
-| [sample] | Building and using sample data in Accumulo. |
-| [shard] | Using the intersecting iterator with a term index partitioned by document. |
-| [tabletofile] | Using MapReduce to read a table and write one of its columns to a file in HDFS. |
-| [terasort] | Generating random data and sorting it using Accumulo. |
-| [visibility] | Using visibilities (or combinations of authorizations). Also shows user permissions. |
-
-[manual]: https://accumulo.apache.org/latest/accumulo_user_manual/
-[batch]: batch.md
-[bloom]: bloom.md
-[bulkIngest]: bulkIngest.md
-[classpath]: classpath.md
-[client]: client.md 
-[combiner]: combiner.md
-[compactionStrategy]: compactionStrategy.md
-[constraints]: constraints.md
-[dirlist]: dirlist.md
-[export]: export.md
-[filedata]: filedata.md
-[filter]: filter.md
-[helloworld]: helloworld.md
-[isolation]: isolation.md
-[mapred]: mapred.md
-[maxmutation]: maxmutation.md
-[regex]: regex.md
-[reservations]: reservations.md
-[rgbalancer]: rgbalancer.md
-[rowhash]: rowhash.md
-[sample]: sample.md
-[shard]: shard.md
-[tabletofile]: tabletofile.md
-[terasort]: terasort.md
-[visibility]: visibility.md

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/resources/examples/isolation.md
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/isolation.md b/docs/src/main/resources/examples/isolation.md
deleted file mode 100644
index 9b4e0af..0000000
--- a/docs/src/main/resources/examples/isolation.md
+++ /dev/null
@@ -1,51 +0,0 @@
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
----
-title: Apache Accumulo Isolation Example
----
-
-Accumulo has an isolated scanner that ensures partial changes to rows are not
-seen. Isolation is documented in ../docs/isolation.html and the user manual.
-
-InterferenceTest is a simple example that shows the effects of scanning with
-and without isolation. This program starts two threads. One threads
-continually upates all of the values in a row to be the same thing, but
-different from what it used to be. The other thread continually scans the
-table and checks that all values in a row are the same. Without isolation the
-scanning thread will sometimes see different values, which is the result of
-reading the row at the same time a mutation is changing the row.
-
-Below, Interference Test is run without isolation enabled for 5000 iterations
-and it reports problems.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.isolation.InterferenceTest -i instance -z zookeepers -u username -p password -t isotest --iterations 5000
-    ERROR Columns in row 053 had multiple values [53, 4553]
-    ERROR Columns in row 061 had multiple values [561, 61]
-    ERROR Columns in row 070 had multiple values [570, 1070]
-    ERROR Columns in row 079 had multiple values [1079, 1579]
-    ERROR Columns in row 088 had multiple values [2588, 1588]
-    ERROR Columns in row 106 had multiple values [2606, 3106]
-    ERROR Columns in row 115 had multiple values [4615, 3115]
-    finished
-
-Below, Interference Test is run with isolation enabled for 5000 iterations and
-it reports no problems.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.isolation.InterferenceTest -i instance -z zookeepers -u username -p password -t isotest --iterations 5000 --isolated
-    finished
-
-

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/resources/examples/mapred.md
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/mapred.md b/docs/src/main/resources/examples/mapred.md
deleted file mode 100644
index e1a49eb..0000000
--- a/docs/src/main/resources/examples/mapred.md
+++ /dev/null
@@ -1,156 +0,0 @@
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
----
-title: Apache Accumulo MapReduce Example
----
-
-This example uses mapreduce and accumulo to compute word counts for a set of
-documents. This is accomplished using a map-only mapreduce job and a
-accumulo table with combiners.
-
-To run this example you will need a directory in HDFS containing text files.
-The accumulo readme will be used to show how to run this example.
-
-    $ hadoop fs -copyFromLocal /path/to/accumulo/README.md /user/username/wc/Accumulo.README
-    $ hadoop fs -ls /user/username/wc
-    Found 1 items
-    -rw-r--r--   2 username supergroup       9359 2009-07-15 17:54 /user/username/wc/Accumulo.README
-
-The first part of running this example is to create a table with a combiner
-for the column family count.
-
-    $ ./bin/accumulo shell -u username -p password
-    Shell - Apache Accumulo Interactive Shell
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> createtable wordCount
-    username@instance wordCount> setiter -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 10 -t wordCount -majc -minc -scan
-    SummingCombiner interprets Values as Longs and adds them together. A variety of encodings (variable length, fixed length, or string) are available
-    ----------> set SummingCombiner parameter all, set to true to apply Combiner to every column, otherwise leave blank. if true, columns option will be ignored.: false
-    ----------> set SummingCombiner parameter columns, <col fam>[:<col qual>]{,<col fam>[:<col qual>]} escape non-alphanum chars using %<hex>.: count
-    ----------> set SummingCombiner parameter lossy, if true, failed decodes are ignored. Otherwise combiner will error on failed decodes (default false): <TRUE|FALSE>: false
-    ----------> set SummingCombiner parameter type, <VARLEN|FIXEDLEN|STRING|fullClassName>: STRING
-    username@instance wordCount> quit
-
-After creating the table, run the word count map reduce job.
-
-    $ ./contrib/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.mapreduce.WordCount -i instance -z zookeepers  --input /user/username/wc -t wordCount -u username -p password
-
-    11/02/07 18:20:11 INFO input.FileInputFormat: Total input paths to process : 1
-    11/02/07 18:20:12 INFO mapred.JobClient: Running job: job_201102071740_0003
-    11/02/07 18:20:13 INFO mapred.JobClient:  map 0% reduce 0%
-    11/02/07 18:20:20 INFO mapred.JobClient:  map 100% reduce 0%
-    11/02/07 18:20:22 INFO mapred.JobClient: Job complete: job_201102071740_0003
-    11/02/07 18:20:22 INFO mapred.JobClient: Counters: 6
-    11/02/07 18:20:22 INFO mapred.JobClient:   Job Counters
-    11/02/07 18:20:22 INFO mapred.JobClient:     Launched map tasks=1
-    11/02/07 18:20:22 INFO mapred.JobClient:     Data-local map tasks=1
-    11/02/07 18:20:22 INFO mapred.JobClient:   FileSystemCounters
-    11/02/07 18:20:22 INFO mapred.JobClient:     HDFS_BYTES_READ=10487
-    11/02/07 18:20:22 INFO mapred.JobClient:   Map-Reduce Framework
-    11/02/07 18:20:22 INFO mapred.JobClient:     Map input records=255
-    11/02/07 18:20:22 INFO mapred.JobClient:     Spilled Records=0
-    11/02/07 18:20:22 INFO mapred.JobClient:     Map output records=1452
-
-After the map reduce job completes, query the accumulo table to see word
-counts.
-
-    $ ./bin/accumulo shell -u username -p password
-    username@instance> table wordCount
-    username@instance wordCount> scan -b the
-    the count:20080906 []    75
-    their count:20080906 []    2
-    them count:20080906 []    1
-    then count:20080906 []    1
-    there count:20080906 []    1
-    these count:20080906 []    3
-    this count:20080906 []    6
-    through count:20080906 []    1
-    time count:20080906 []    3
-    time. count:20080906 []    1
-    to count:20080906 []    27
-    total count:20080906 []    1
-    tserver, count:20080906 []    1
-    tserver.compaction.major.concurrent.max count:20080906 []    1
-    ...
-
-Another example to look at is
-org.apache.accumulo.examples.simple.mapreduce.UniqueColumns. This example
-computes the unique set of columns in a table and shows how a map reduce job
-can directly read a tables files from HDFS.
-
-One more example available is
-org.apache.accumulo.examples.simple.mapreduce.TokenFileWordCount.
-The TokenFileWordCount example works exactly the same as the WordCount example
-explained above except that it uses a token file rather than giving the
-password directly to the map-reduce job (this avoids having the password
-displayed in the job's configuration which is world-readable).
-
-To create a token file, use the create-token utility
-
-  $ ./bin/accumulo create-token
-
-It defaults to creating a PasswordToken, but you can specify the token class
-with -tc (requires the fully qualified class name). Based on the token class,
-it will prompt you for each property required to create the token.
-
-The last value it prompts for is a local filename to save to. If this file
-exists, it will append the new token to the end. Multiple tokens can exist in
-a file, but only the first one for each user will be recognized.
-
-Rather than waiting for the prompts, you can specify some options when calling
-create-token, for example
-
-  $ ./bin/accumulo create-token -u root -p secret -f root.pw
-
-would create a token file containing a PasswordToken for
-user 'root' with password 'secret' and saved to 'root.pw'
-
-This local file needs to be uploaded to hdfs to be used with the
-map-reduce job. For example, if the file were 'root.pw' in the local directory:
-
-  $ hadoop fs -put root.pw root.pw
-
-This would put 'root.pw' in the user's home directory in hdfs.
-
-Because the basic WordCount example uses Opts to parse its arguments
-(which extends ClientOnRequiredTable), you can use a token file with
-the basic WordCount example by calling the same command as explained above
-except replacing the password with the token file (rather than -p, use -tf).
-
-  $ ./contrib/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.mapreduce.WordCount -i instance -z zookeepers  --input /user/username/wc -t wordCount -u username -tf tokenfile
-
-In the above examples, username was 'root' and tokenfile was 'root.pw'
-
-However, if you don't want to use the Opts class to parse arguments,
-the TokenFileWordCount is an example of using the token file manually.
-
-  $ ./contrib/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.mapreduce.TokenFileWordCount instance zookeepers username tokenfile /user/username/wc wordCount
-
-The results should be the same as the WordCount example except that the
-authentication token was not stored in the configuration. It was instead
-stored in a file that the map-reduce job pulled into the distributed cache.
-(If you ran either of these on the same table right after the
-WordCount example, then the resulting counts should just double.)
-
-
-
-

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/resources/examples/maxmutation.md
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/maxmutation.md b/docs/src/main/resources/examples/maxmutation.md
deleted file mode 100644
index 48c918a..0000000
--- a/docs/src/main/resources/examples/maxmutation.md
+++ /dev/null
@@ -1,51 +0,0 @@
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
----
-title: Apache Accumulo MaxMutation Constraints Example
----
-
-This an example of how to limit the size of mutations that will be accepted into
-a table. Under the default configuration, accumulo does not provide a limitation
-on the size of mutations that can be ingested. Poorly behaved writers might
-inadvertently create mutations so large, that they cause the tablet servers to
-run out of memory. A simple contraint can be added to a table to reject very
-large mutations.
-
-    $ ./bin/accumulo shell -u username -p password
-
-    Shell - Apache Accumulo Interactive Shell
-    -
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> createtable test_ingest
-    username@instance test_ingest> config -t test_ingest -s table.constraint.1=org.apache.accumulo.examples.simple.constraints.MaxMutationSize
-    username@instance test_ingest>
-
-
-Now the table will reject any mutation that is larger than 1/256th of the 
-working memory of the tablet server.  The following command attempts to ingest 
-a single row with 10000 columns, which exceeds the memory limit. Depending on the
-amount of Java heap your tserver(s) are given, you may have to increase the number
-of columns provided to see the failure.
-
-    $ ./bin/accumulo org.apache.accumulo.test.TestIngest -i instance -z zookeepers -u username -p password --rows 1 --cols 10000 
-    ERROR : Constraint violates : ConstraintViolationSummary(constrainClass:org.apache.accumulo.examples.simple.constraints.MaxMutationSize, violationCode:0, violationDescription:mutation exceeded maximum size of 188160, numberOfViolatingMutations:1)
-

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/resources/examples/regex.md
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/regex.md b/docs/src/main/resources/examples/regex.md
deleted file mode 100644
index 29d47e1..0000000
--- a/docs/src/main/resources/examples/regex.md
+++ /dev/null
@@ -1,59 +0,0 @@
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
----
-title: Apache Accumulo Regex Example
----
-
-This example uses mapreduce and accumulo to find items using regular expressions.
-This is accomplished using a map-only mapreduce job and a scan-time iterator.
-
-To run this example you will need some data in a table. The following will
-put a trivial amount of data into accumulo using the accumulo shell:
-
-    $ ./bin/accumulo shell -u username -p password
-    Shell - Apache Accumulo Interactive Shell
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> createtable input
-    username@instance> insert dogrow dogcf dogcq dogvalue
-    username@instance> insert catrow catcf catcq catvalue
-    username@instance> quit
-
-The RegexExample class sets an iterator on the scanner. This does pattern matching
-against each key/value in accumulo, and only returns matching items. It will do this
-in parallel and will store the results in files in hdfs.
-
-The following will search for any rows in the input table that starts with "dog":
-
-    $ ./contrib/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.mapreduce.RegexExample -u user -p passwd -i instance -t input --rowRegex 'dog.*' --output /tmp/output
-
-    $ hadoop fs -ls /tmp/output
-    Found 3 items
-    -rw-r--r--   1 username supergroup          0 2013-01-10 14:11 /tmp/output/_SUCCESS
-    drwxr-xr-x   - username supergroup          0 2013-01-10 14:10 /tmp/output/_logs
-    -rw-r--r--   1 username supergroup         51 2013-01-10 14:10 /tmp/output/part-m-00000
-
-We can see the output of our little map-reduce job:
-
-    $ hadoop fs -text /tmp/output/part-m-00000
-    dogrow dogcf:dogcq [] 1357844987994 false	dogvalue
-
-

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/resources/examples/reservations.md
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/reservations.md b/docs/src/main/resources/examples/reservations.md
deleted file mode 100644
index 6b4886c..0000000
--- a/docs/src/main/resources/examples/reservations.md
+++ /dev/null
@@ -1,68 +0,0 @@
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
----
-title: Apache Accumulo Reservations Example
----
-
-This example shows running a simple reservation system implemented using
-conditional mutations. This system guarantees that only one concurrent user can
-reserve a resource. The example's reserve command allows multiple users to be
-specified. When this is done, it creates a separate reservation thread for each
-user. In the example below threads are spun up for alice, bob, eve, mallory,
-and trent to reserve room06 on 20140101. Bob ends up getting the reservation
-and everyone else is put on a wait list. The example code will take any string
-for what, when and who.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.reservations.ARS
-    >connect test16 localhost root secret ars
-      connected
-    >
-      Commands :
-        reserve <what> <when> <who> {who}
-        cancel <what> <when> <who>
-        list <what> <when>
-    >reserve room06 20140101 alice bob eve mallory trent
-                       bob : RESERVED
-                   mallory : WAIT_LISTED
-                     alice : WAIT_LISTED
-                     trent : WAIT_LISTED
-                       eve : WAIT_LISTED
-    >list room06 20140101
-      Reservation holder : bob
-      Wait list : [mallory, alice, trent, eve]
-    >cancel room06 20140101 alice
-    >cancel room06 20140101 bob
-    >list room06 20140101
-      Reservation holder : mallory
-      Wait list : [trent, eve]
-    >quit
-
-Scanning the table in the Accumulo shell after running the example shows the
-following:
-
-    root@test16> table ars
-    root@test16 ars> scan
-    room06:20140101 res:0001 []    mallory
-    room06:20140101 res:0003 []    trent
-    room06:20140101 res:0004 []    eve
-    room06:20140101 tx:seq []    6
-
-The tx:seq column is incremented for each update to the row allowing for
-detection of concurrent changes. For an update to go through, the sequence
-number must not have changed since the data was read. If it does change,
-the conditional mutation will fail and the example code will retry.
-

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/resources/examples/rgbalancer.md
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/rgbalancer.md b/docs/src/main/resources/examples/rgbalancer.md
deleted file mode 100644
index 3c80861..0000000
--- a/docs/src/main/resources/examples/rgbalancer.md
+++ /dev/null
@@ -1,161 +0,0 @@
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
----
-title: Apache Accumulo Balancer Example
----
-
-For some data access patterns, its important to spread groups of tablets within
-a table out evenly.  Accumulo has a balancer that can do this using a regular
-expression to group tablets. This example shows how this balancer spreads 4
-groups of tablets within a table evenly across 17 tablet servers.
-
-Below shows creating a table and adding splits.  For this example we would like
-all of the tablets where the split point has the same two digits to be on
-different tservers.  This gives us four groups of tablets: 01, 02, 03, and 04.   
-
-    root@accumulo> createtable testRGB
-    root@accumulo testRGB> addsplits -t testRGB 01b 01m 01r 01z  02b 02m 02r 02z 03b 03m 03r 03z 04a 04b 04c 04d 04e 04f 04g 04h 04i 04j 04k 04l 04m 04n 04o 04p
-    root@accumulo testRGB> tables -l
-    accumulo.metadata    =>        !0
-    accumulo.replication =>      +rep
-    accumulo.root        =>        +r
-    testRGB              =>         2
-    trace                =>         1
-
-After adding the splits we look at the locations in the metadata table.
-
-    root@accumulo testRGB> scan -t accumulo.metadata -b 2; -e 2< -c loc
-    2;01b loc:34a5f6e086b000c []    ip-10-1-2-25:9997
-    2;01m loc:34a5f6e086b000c []    ip-10-1-2-25:9997
-    2;01r loc:14a5f6e079d0011 []    ip-10-1-2-15:9997
-    2;01z loc:14a5f6e079d000f []    ip-10-1-2-13:9997
-    2;02b loc:34a5f6e086b000b []    ip-10-1-2-26:9997
-    2;02m loc:14a5f6e079d000c []    ip-10-1-2-28:9997
-    2;02r loc:14a5f6e079d0012 []    ip-10-1-2-27:9997
-    2;02z loc:14a5f6e079d0012 []    ip-10-1-2-27:9997
-    2;03b loc:14a5f6e079d000d []    ip-10-1-2-21:9997
-    2;03m loc:14a5f6e079d000e []    ip-10-1-2-20:9997
-    2;03r loc:14a5f6e079d000d []    ip-10-1-2-21:9997
-    2;03z loc:14a5f6e079d000e []    ip-10-1-2-20:9997
-    2;04a loc:34a5f6e086b000b []    ip-10-1-2-26:9997
-    2;04b loc:14a5f6e079d0010 []    ip-10-1-2-17:9997
-    2;04c loc:14a5f6e079d0010 []    ip-10-1-2-17:9997
-    2;04d loc:24a5f6e07d3000c []    ip-10-1-2-16:9997
-    2;04e loc:24a5f6e07d3000d []    ip-10-1-2-29:9997
-    2;04f loc:24a5f6e07d3000c []    ip-10-1-2-16:9997
-    2;04g loc:24a5f6e07d3000a []    ip-10-1-2-14:9997
-    2;04h loc:14a5f6e079d000c []    ip-10-1-2-28:9997
-    2;04i loc:34a5f6e086b000d []    ip-10-1-2-19:9997
-    2;04j loc:34a5f6e086b000d []    ip-10-1-2-19:9997
-    2;04k loc:24a5f6e07d30009 []    ip-10-1-2-23:9997
-    2;04l loc:24a5f6e07d3000b []    ip-10-1-2-22:9997
-    2;04m loc:24a5f6e07d30009 []    ip-10-1-2-23:9997
-    2;04n loc:24a5f6e07d3000b []    ip-10-1-2-22:9997
-    2;04o loc:34a5f6e086b000a []    ip-10-1-2-18:9997
-    2;04p loc:24a5f6e07d30008 []    ip-10-1-2-24:9997
-    2< loc:24a5f6e07d30008 []    ip-10-1-2-24:9997
-
-Below the information above was massaged to show which tablet groups are on
-each tserver.  The four tablets in group 03 are on two tservers, ideally those
-tablets would be spread across 4 tservers.  Note the default tablet (2<) was
-categorized as group 04 below.
-
-    ip-10-1-2-13:9997 01
-    ip-10-1-2-14:9997 04
-    ip-10-1-2-15:9997 01
-    ip-10-1-2-16:9997 04 04
-    ip-10-1-2-17:9997 04 04
-    ip-10-1-2-18:9997 04
-    ip-10-1-2-19:9997 04 04
-    ip-10-1-2-20:9997 03 03
-    ip-10-1-2-21:9997 03 03
-    ip-10-1-2-22:9997 04 04
-    ip-10-1-2-23:9997 04 04
-    ip-10-1-2-24:9997 04 04
-    ip-10-1-2-25:9997 01 01
-    ip-10-1-2-26:9997 02 04
-    ip-10-1-2-27:9997 02 02
-    ip-10-1-2-28:9997 02 04
-    ip-10-1-2-29:9997 04
-
-To remedy this situation, the RegexGroupBalancer is configured with the
-commands below.  The configured regular expression selects the first two digits
-from a tablets end row as the group id.  Tablets that don't match and the
-default tablet are configured to be in group 04.
-
-    root@accumulo testRGB> config -t testRGB -s table.custom.balancer.group.regex.pattern=(\\d\\d).*
-    root@accumulo testRGB> config -t testRGB -s table.custom.balancer.group.regex.default=04
-    root@accumulo testRGB> config -t testRGB -s table.balancer=org.apache.accumulo.server.master.balancer.RegexGroupBalancer
-
-After waiting a little bit, look at the tablet locations again and all is good.
-
-    root@accumulo testRGB> scan -t accumulo.metadata -b 2; -e 2< -c loc
-    2;01b loc:34a5f6e086b000a []    ip-10-1-2-18:9997
-    2;01m loc:34a5f6e086b000c []    ip-10-1-2-25:9997
-    2;01r loc:14a5f6e079d0011 []    ip-10-1-2-15:9997
-    2;01z loc:14a5f6e079d000f []    ip-10-1-2-13:9997
-    2;02b loc:34a5f6e086b000b []    ip-10-1-2-26:9997
-    2;02m loc:14a5f6e079d000c []    ip-10-1-2-28:9997
-    2;02r loc:34a5f6e086b000d []    ip-10-1-2-19:9997
-    2;02z loc:14a5f6e079d0012 []    ip-10-1-2-27:9997
-    2;03b loc:24a5f6e07d3000d []    ip-10-1-2-29:9997
-    2;03m loc:24a5f6e07d30009 []    ip-10-1-2-23:9997
-    2;03r loc:14a5f6e079d000d []    ip-10-1-2-21:9997
-    2;03z loc:14a5f6e079d000e []    ip-10-1-2-20:9997
-    2;04a loc:34a5f6e086b000b []    ip-10-1-2-26:9997
-    2;04b loc:34a5f6e086b000c []    ip-10-1-2-25:9997
-    2;04c loc:14a5f6e079d0010 []    ip-10-1-2-17:9997
-    2;04d loc:14a5f6e079d000e []    ip-10-1-2-20:9997
-    2;04e loc:24a5f6e07d3000d []    ip-10-1-2-29:9997
-    2;04f loc:24a5f6e07d3000c []    ip-10-1-2-16:9997
-    2;04g loc:24a5f6e07d3000a []    ip-10-1-2-14:9997
-    2;04h loc:14a5f6e079d000c []    ip-10-1-2-28:9997
-    2;04i loc:14a5f6e079d0011 []    ip-10-1-2-15:9997
-    2;04j loc:34a5f6e086b000d []    ip-10-1-2-19:9997
-    2;04k loc:14a5f6e079d0012 []    ip-10-1-2-27:9997
-    2;04l loc:14a5f6e079d000f []    ip-10-1-2-13:9997
-    2;04m loc:24a5f6e07d30009 []    ip-10-1-2-23:9997
-    2;04n loc:24a5f6e07d3000b []    ip-10-1-2-22:9997
-    2;04o loc:34a5f6e086b000a []    ip-10-1-2-18:9997
-    2;04p loc:14a5f6e079d000d []    ip-10-1-2-21:9997
-    2< loc:24a5f6e07d30008 []    ip-10-1-2-24:9997
-
-Once again, the data above is transformed to make it easier to see which groups
-are on tservers.  The transformed data below shows that all groups are now
-evenly spread.
-
-    ip-10-1-2-13:9997 01 04
-    ip-10-1-2-14:9997    04
-    ip-10-1-2-15:9997 01 04
-    ip-10-1-2-16:9997    04
-    ip-10-1-2-17:9997    04
-    ip-10-1-2-18:9997 01 04
-    ip-10-1-2-19:9997 02 04
-    ip-10-1-2-20:9997 03 04
-    ip-10-1-2-21:9997 03 04
-    ip-10-1-2-22:9997    04
-    ip-10-1-2-23:9997 03 04
-    ip-10-1-2-24:9997    04
-    ip-10-1-2-25:9997 01 04
-    ip-10-1-2-26:9997 02 04
-    ip-10-1-2-27:9997 02 04
-    ip-10-1-2-28:9997 02 04
-    ip-10-1-2-29:9997 03 04
-
-If you need this functionality, but a regular expression does not meet your
-needs then extend GroupBalancer.  This allows you to specify a partitioning
-function in Java.  Use the RegexGroupBalancer source as an example.

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/resources/examples/rowhash.md
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/rowhash.md b/docs/src/main/resources/examples/rowhash.md
deleted file mode 100644
index 9cd71a7..0000000
--- a/docs/src/main/resources/examples/rowhash.md
+++ /dev/null
@@ -1,61 +0,0 @@
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
----
-title: Apache Accumulo RowHash Example
----
-
-This example shows a simple map/reduce job that reads from an accumulo table and
-writes back into that table.
-
-To run this example you will need some data in a table. The following will
-put a trivial amount of data into accumulo using the accumulo shell:
-
-    $ ./bin/accumulo shell -u username -p password
-    Shell - Apache Accumulo Interactive Shell
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> createtable input
-    username@instance> insert a-row cf cq value
-    username@instance> insert b-row cf cq value
-    username@instance> quit
-
-The RowHash class will insert a hash for each row in the database if it contains a
-specified colum. Here's how you run the map/reduce job
-
-    $ ./contrib/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.mapreduce.RowHash -u user -p passwd -i instance -t input --column cf:cq
-
-Now we can scan the table and see the hashes:
-
-    $ ./bin/accumulo shell -u username -p password
-    Shell - Apache Accumulo Interactive Shell
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> scan -t input
-    a-row cf:cq []    value
-    a-row cf-HASHTYPE:cq-MD5BASE64 []    IGPBYI1uC6+AJJxC4r5YBA==
-    b-row cf:cq []    value
-    b-row cf-HASHTYPE:cq-MD5BASE64 []    IGPBYI1uC6+AJJxC4r5YBA==
-    username@instance>
-

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/resources/examples/sample.md
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/sample.md b/docs/src/main/resources/examples/sample.md
deleted file mode 100644
index 432067e..0000000
--- a/docs/src/main/resources/examples/sample.md
+++ /dev/null
@@ -1,193 +0,0 @@
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
----
-title: Apache Accumulo Sampling Example
----
-
-Basic Sampling Example
-----------------------
-
-Accumulo supports building a set of sample data that can be efficiently
-accessed by scanners.  What data is included in the sample set is configurable.
-Below, some data representing documents are inserted.  
-
-    root@instance sampex> createtable sampex
-    root@instance sampex> insert 9255 doc content 'abcde'
-    root@instance sampex> insert 9255 doc url file://foo.txt
-    root@instance sampex> insert 8934 doc content 'accumulo scales'
-    root@instance sampex> insert 8934 doc url file://accumulo_notes.txt
-    root@instance sampex> insert 2317 doc content 'milk, eggs, bread, parmigiano-reggiano'
-    root@instance sampex> insert 2317 doc url file://groceries/9.txt
-    root@instance sampex> insert 3900 doc content 'EC2 ate my homework'
-    root@instance sampex> insert 3900 doc uril file://final_project.txt
-
-Below the table sampex is configured to build a sample set.  The configuration
-causes Accumulo to include any row where `murmur3_32(row) % 3 ==0` in the
-tables sample data.
-
-    root@instance sampex> config -t sampex -s table.sampler.opt.hasher=murmur3_32
-    root@instance sampex> config -t sampex -s table.sampler.opt.modulus=3
-    root@instance sampex> config -t sampex -s table.sampler=org.apache.accumulo.core.client.sample.RowSampler
-
-Below, attempting to scan the sample returns an error.  This is because data
-was inserted before the sample set was configured.
-
-    root@instance sampex> scan --sample
-    2015-09-09 12:21:50,643 [shell.Shell] ERROR: org.apache.accumulo.core.client.SampleNotPresentException: Table sampex(ID:2) does not have sampling configured or built
-
-To remedy this problem, the following command will flush in memory data and
-compact any files that do not contain the correct sample data.   
-
-    root@instance sampex> compact -t sampex --sf-no-sample
-
-After the compaction, the sample scan works.  
-
-    root@instance sampex> scan --sample
-    2317 doc:content []    milk, eggs, bread, parmigiano-reggiano
-    2317 doc:url []    file://groceries/9.txt
-
-The commands below show that updates to data in the sample are seen when
-scanning the sample.
-
-    root@instance sampex> insert 2317 doc content 'milk, eggs, bread, parmigiano-reggiano, butter'
-    root@instance sampex> scan --sample
-    2317 doc:content []    milk, eggs, bread, parmigiano-reggiano, butter
-    2317 doc:url []    file://groceries/9.txt
-
-Inorder to make scanning the sample fast, sample data is partitioned as data is
-written to Accumulo.  This means if the sample configuration is changed, that
-data written previously is partitioned using a different criteria.  Accumulo
-will detect this situation and fail sample scans.  The commands below show this
-failure and fixiing the problem with a compaction.
-
-    root@instance sampex> config -t sampex -s table.sampler.opt.modulus=2
-    root@instance sampex> scan --sample
-    2015-09-09 12:22:51,058 [shell.Shell] ERROR: org.apache.accumulo.core.client.SampleNotPresentException: Table sampex(ID:2) does not have sampling configured or built
-    root@instance sampex> compact -t sampex --sf-no-sample
-    2015-09-09 12:23:07,242 [shell.Shell] INFO : Compaction of table sampex started for given range
-    root@instance sampex> scan --sample
-    2317 doc:content []    milk, eggs, bread, parmigiano-reggiano
-    2317 doc:url []    file://groceries/9.txt
-    3900 doc:content []    EC2 ate my homework
-    3900 doc:uril []    file://final_project.txt
-    9255 doc:content []    abcde
-    9255 doc:url []    file://foo.txt
-
-The example above is replicated in a java program using the Accumulo API.
-Below is the program name and the command to run it.
-
-    ./bin/accumulo org.apache.accumulo.examples.simple.sample.SampleExample -i instance -z localhost -u root -p secret
-
-The commands below look under the hood to give some insight into how this
-feature works.  The commands determine what files the sampex table is using.
-
-    root@instance sampex> tables -l
-    accumulo.metadata    =>        !0
-    accumulo.replication =>      +rep
-    accumulo.root        =>        +r
-    sampex               =>         2
-    trace                =>         1
-    root@instance sampex> scan -t accumulo.metadata -c file -b 2 -e 2<
-    2< file:hdfs://localhost:10000/accumulo/tables/2/default_tablet/A000000s.rf []    702,8
-
-Below shows running `accumulo rfile-info` on the file above.  This shows the
-rfile has a normal default locality group and a sample default locality group.
-The output also shows the configuration used to create the sample locality
-group.  The sample configuration within a rfile must match the tables sample
-configuration for sample scan to work.
-
-    $ ./bin/accumulo rfile-info hdfs://localhost:10000/accumulo/tables/2/default_tablet/A000000s.rf
-    Reading file: hdfs://localhost:10000/accumulo/tables/2/default_tablet/A000000s.rf
-    RFile Version            : 8
-    
-    Locality group           : <DEFAULT>
-    	Start block            : 0
-    	Num   blocks           : 1
-    	Index level 0          : 35 bytes  1 blocks
-    	First key              : 2317 doc:content [] 1437672014986 false
-    	Last key               : 9255 doc:url [] 1437672014875 false
-    	Num entries            : 8
-    	Column families        : [doc]
-    
-    Sample Configuration     :
-    	Sampler class          : org.apache.accumulo.core.client.sample.RowSampler
-    	Sampler options        : {hasher=murmur3_32, modulus=2}
-
-    Sample Locality group    : <DEFAULT>
-    	Start block            : 0
-    	Num   blocks           : 1
-    	Index level 0          : 36 bytes  1 blocks
-    	First key              : 2317 doc:content [] 1437672014986 false
-    	Last key               : 9255 doc:url [] 1437672014875 false
-    	Num entries            : 6
-    	Column families        : [doc]
-    
-    Meta block     : BCFile.index
-          Raw size             : 4 bytes
-          Compressed size      : 12 bytes
-          Compression type     : gz
-
-    Meta block     : RFile.index
-          Raw size             : 309 bytes
-          Compressed size      : 176 bytes
-          Compression type     : gz
-
-
-Shard Sampling Example
-----------------------
-
-The [shard example][shard] shows how to index and search files using Accumulo.  That
-example indexes documents into a table named `shard`.  The indexing scheme used
-in that example places the document name in the column qualifier.  A useful
-sample of this indexing scheme should contain all data for any document in the
-sample.   To accomplish this, the following commands build a sample for the
-shard table based on the column qualifier.
-
-    root@instance shard> config -t shard -s table.sampler.opt.hasher=murmur3_32
-    root@instance shard> config -t shard -s table.sampler.opt.modulus=101
-    root@instance shard> config -t shard -s table.sampler.opt.qualifier=true
-    root@instance shard> config -t shard -s table.sampler=org.apache.accumulo.core.client.sample.RowColumnSampler
-    root@instance shard> compact -t shard --sf-no-sample -w
-    2015-07-23 15:00:09,280 [shell.Shell] INFO : Compacting table ...
-    2015-07-23 15:00:10,134 [shell.Shell] INFO : Compaction of table shard completed for given range
-
-After enabling sampling, the command below counts the number of documents in
-the sample containing the words `import` and `int`.     
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.shard.Query --sample -i instance16 -z localhost -t shard -u root -p secret import int | fgrep '.java' | wc
-         11      11    1246
-
-The command below counts the total number of documents containing the words
-`import` and `int`.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.shard.Query -i instance16 -z localhost -t shard -u root -p secret import int | fgrep '.java' | wc
-       1085    1085  118175
-
-The counts 11 out of 1085 total are around what would be expected for a modulus
-of 101.  Querying the sample first provides a quick way to estimate how much data
-the real query will bring back. 
-
-Another way sample data could be used with the shard example is with a
-specialized iterator.  In the examples source code there is an iterator named
-CutoffIntersectingIterator.  This iterator first checks how many documents are
-found in the sample data.  If too many documents are found in the sample data,
-then it returns nothing.   Otherwise it proceeds to query the full data set.
-To experiment with this iterator, use the following command.  The
-`--sampleCutoff` option below will cause the query to return nothing if based
-on the sample it appears a query would return more than 1000 documents.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.shard.Query --sampleCutoff 1000 -i instance16 -z localhost -t shard -u root -p secret import int | fgrep '.java' | wc

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/resources/examples/shard.md
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/shard.md b/docs/src/main/resources/examples/shard.md
deleted file mode 100644
index 5e5789b..0000000
--- a/docs/src/main/resources/examples/shard.md
+++ /dev/null
@@ -1,68 +0,0 @@
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
----
-title: Apache Accumulo Shard Example
----
-
-Accumulo has an iterator called the intersecting iterator which supports querying a term index that is partitioned by
-document, or "sharded". This example shows how to use the intersecting iterator through these four programs:
-
- * Index.java - Indexes a set of text files into an Accumulo table
- * Query.java - Finds documents containing a given set of terms.
- * Reverse.java - Reads the index table and writes a map of documents to terms into another table.
- * ContinuousQuery.java  Uses the table populated by Reverse.java to select N random terms per document. Then it continuously and randomly queries those terms.
-
-To run these example programs, create two tables like below.
-
-    username@instance> createtable shard
-    username@instance shard> createtable doc2term
-
-After creating the tables, index some files. The following command indexes all of the java files in the Accumulo source code.
-
-    $ cd /local/username/workspace/accumulo/
-    $ find core/src server/src -name "*.java" | xargs ./bin/accumulo org.apache.accumulo.examples.simple.shard.Index -i instance -z zookeepers -t shard -u username -p password --partitions 30
-
-The following command queries the index to find all files containing 'foo' and 'bar'.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.shard.Query -i instance -z zookeepers -t shard -u username -p password foo bar
-    /local/username/workspace/accumulo/src/core/src/test/java/accumulo/core/security/ColumnVisibilityTest.java
-    /local/username/workspace/accumulo/src/core/src/test/java/accumulo/core/client/mock/MockConnectorTest.java
-    /local/username/workspace/accumulo/src/core/src/test/java/accumulo/core/security/VisibilityEvaluatorTest.java
-    /local/username/workspace/accumulo/src/server/src/main/java/accumulo/test/functional/RowDeleteTest.java
-    /local/username/workspace/accumulo/src/server/src/test/java/accumulo/server/logger/TestLogWriter.java
-    /local/username/workspace/accumulo/src/server/src/main/java/accumulo/test/functional/DeleteEverythingTest.java
-    /local/username/workspace/accumulo/src/core/src/test/java/accumulo/core/data/KeyExtentTest.java
-    /local/username/workspace/accumulo/src/server/src/test/java/accumulo/server/constraints/MetadataConstraintsTest.java
-    /local/username/workspace/accumulo/src/core/src/test/java/accumulo/core/iterators/WholeRowIteratorTest.java
-    /local/username/workspace/accumulo/src/server/src/test/java/accumulo/server/util/DefaultMapTest.java
-    /local/username/workspace/accumulo/src/server/src/test/java/accumulo/server/tabletserver/InMemoryMapTest.java
-
-In order to run ContinuousQuery, we need to run Reverse.java to populate doc2term.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.shard.Reverse -i instance -z zookeepers --shardTable shard --doc2Term doc2term -u username -p password
-
-Below ContinuousQuery is run using 5 terms. So it selects 5 random terms from each document, then it continually
-randomly selects one set of 5 terms and queries. It prints the number of matching documents and the time in seconds.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.shard.ContinuousQuery -i instance -z zookeepers --shardTable shard --doc2Term doc2term -u username -p password --terms 5
-    [public, core, class, binarycomparable, b] 2  0.081
-    [wordtodelete, unindexdocument, doctablename, putdelete, insert] 1  0.041
-    [import, columnvisibilityinterpreterfactory, illegalstateexception, cv, columnvisibility] 1  0.049
-    [getpackage, testversion, util, version, 55] 1  0.048
-    [for, static, println, public, the] 55  0.211
-    [sleeptime, wrappingiterator, options, long, utilwaitthread] 1  0.057
-    [string, public, long, 0, wait] 12  0.132

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/resources/examples/tabletofile.md
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/tabletofile.md b/docs/src/main/resources/examples/tabletofile.md
deleted file mode 100644
index 5316b51..0000000
--- a/docs/src/main/resources/examples/tabletofile.md
+++ /dev/null
@@ -1,61 +0,0 @@
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
----
-Title: Apache Accumulo Table-to-File Example
----
-
-This example uses mapreduce to extract specified columns from an existing table.
-
-To run this example you will need some data in a table. The following will
-put a trivial amount of data into accumulo using the accumulo shell:
-
-    $ ./bin/accumulo shell -u username -p password
-    Shell - Apache Accumulo Interactive Shell
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> createtable input
-    username@instance> insert dog cf cq dogvalue
-    username@instance> insert cat cf cq catvalue
-    username@instance> insert junk family qualifier junkvalue
-    username@instance> quit
-
-The TableToFile class configures a map-only job to read the specified columns and
-write the key/value pairs to a file in HDFS.
-
-The following will extract the rows containing the column "cf:cq":
-
-    $ ./contrib/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.mapreduce.TableToFile -u user -p passwd -i instance -t input --columns cf:cq --output /tmp/output
-
-    $ hadoop fs -ls /tmp/output
-    -rw-r--r--   1 username supergroup          0 2013-01-10 14:44 /tmp/output/_SUCCESS
-    drwxr-xr-x   - username supergroup          0 2013-01-10 14:44 /tmp/output/_logs
-    drwxr-xr-x   - username supergroup          0 2013-01-10 14:44 /tmp/output/_logs/history
-    -rw-r--r--   1 username supergroup       9049 2013-01-10 14:44 /tmp/output/_logs/history/job_201301081658_0011_1357847072863_username_TableToFile%5F1357847071434
-    -rw-r--r--   1 username supergroup      26172 2013-01-10 14:44 /tmp/output/_logs/history/job_201301081658_0011_conf.xml
-    -rw-r--r--   1 username supergroup         50 2013-01-10 14:44 /tmp/output/part-m-00000
-
-We can see the output of our little map-reduce job:
-
-    $ hadoop fs -text /tmp/output/output/part-m-00000
-    catrow cf:cq []	catvalue
-    dogrow cf:cq []	dogvalue
-    $
-

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/resources/examples/terasort.md
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/terasort.md b/docs/src/main/resources/examples/terasort.md
deleted file mode 100644
index 195bb4a..0000000
--- a/docs/src/main/resources/examples/terasort.md
+++ /dev/null
@@ -1,52 +0,0 @@
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
----
-title: Apache Accumulo Terasort Example
----
-
-This example uses map/reduce to generate random input data that will
-be sorted by storing it into accumulo. It uses data very similar to the
-hadoop terasort benchmark.
-
-To run this example you run it with arguments describing the amount of data:
-
-    $ ./contrib/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.mapreduce.TeraSortIngest \
-    -i instance -z zookeepers -u user -p password \
-    --count 10 \
-    --minKeySize 10 \
-    --maxKeySize 10 \
-    --minValueSize 78 \
-    --maxValueSize 78 \
-    --table sort \
-    --splits 10 \
-
-After the map reduce job completes, scan the data:
-
-    $ ./bin/accumulo shell -u username -p password
-    username@instance> scan -t sort
-    +l-$$OE/ZH c:         4 []    GGGGGGGGGGWWWWWWWWWWMMMMMMMMMMCCCCCCCCCCSSSSSSSSSSIIIIIIIIIIYYYYYYYYYYOOOOOOOO
-    ,C)wDw//u= c:        10 []    CCCCCCCCCCSSSSSSSSSSIIIIIIIIIIYYYYYYYYYYOOOOOOOOOOEEEEEEEEEEUUUUUUUUUUKKKKKKKK
-    75@~?'WdUF c:         1 []    IIIIIIIIIIYYYYYYYYYYOOOOOOOOOOEEEEEEEEEEUUUUUUUUUUKKKKKKKKKKAAAAAAAAAAQQQQQQQQ
-    ;L+!2rT~hd c:         8 []    MMMMMMMMMMCCCCCCCCCCSSSSSSSSSSIIIIIIIIIIYYYYYYYYYYOOOOOOOOOOEEEEEEEEEEUUUUUUUU
-    LsS8)|.ZLD c:         5 []    OOOOOOOOOOEEEEEEEEEEUUUUUUUUUUKKKKKKKKKKAAAAAAAAAAQQQQQQQQQQGGGGGGGGGGWWWWWWWW
-    M^*dDE;6^< c:         9 []    UUUUUUUUUUKKKKKKKKKKAAAAAAAAAAQQQQQQQQQQGGGGGGGGGGWWWWWWWWWWMMMMMMMMMMCCCCCCCC
-    ^Eu)<n#kdP c:         3 []    YYYYYYYYYYOOOOOOOOOOEEEEEEEEEEUUUUUUUUUUKKKKKKKKKKAAAAAAAAAAQQQQQQQQQQGGGGGGGG
-    le5awB.$sm c:         6 []    WWWWWWWWWWMMMMMMMMMMCCCCCCCCCCSSSSSSSSSSIIIIIIIIIIYYYYYYYYYYOOOOOOOOOOEEEEEEEE
-    q__[fwhKFg c:         7 []    EEEEEEEEEEUUUUUUUUUUKKKKKKKKKKAAAAAAAAAAQQQQQQQQQQGGGGGGGGGGWWWWWWWWWWMMMMMMMM
-    w[o||:N&H, c:         2 []    QQQQQQQQQQGGGGGGGGGGWWWWWWWWWWMMMMMMMMMMCCCCCCCCCCSSSSSSSSSSIIIIIIIIIIYYYYYYYY
-
-Of course, a real benchmark would ingest millions of entries.

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/docs/src/main/resources/examples/visibility.md
----------------------------------------------------------------------
diff --git a/docs/src/main/resources/examples/visibility.md b/docs/src/main/resources/examples/visibility.md
deleted file mode 100644
index 8345a9b..0000000
--- a/docs/src/main/resources/examples/visibility.md
+++ /dev/null
@@ -1,133 +0,0 @@
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
----
-title: Apache Accumulo Visibility, Authorizations, and Permissions Example
----
-
-## Creating a new user
-
-    root@instance> createuser username
-    Enter new password for 'username': ********
-    Please confirm new password for 'username': ********
-    root@instance> user username
-    Enter password for user username: ********
-    username@instance> createtable vistest
-    06 10:48:47,931 [shell.Shell] ERROR: org.apache.accumulo.core.client.AccumuloSecurityException: Error PERMISSION_DENIED - User does not have permission to perform this action
-    username@instance> userpermissions
-    System permissions:
-
-    Table permissions (accumulo.metadata): Table.READ
-    username@instance>
-
-A user does not by default have permission to create a table.
-
-## Granting permissions to a user
-
-    username@instance> user root
-    Enter password for user root: ********
-    root@instance> grant -s System.CREATE_TABLE -u username
-    root@instance> user username
-    Enter password for user username: ********
-    username@instance> createtable vistest
-    username@instance> userpermissions
-    System permissions: System.CREATE_TABLE
-
-    Table permissions (accumulo.metadata): Table.READ
-    Table permissions (vistest): Table.READ, Table.WRITE, Table.BULK_IMPORT, Table.ALTER_TABLE, Table.GRANT, Table.DROP_TABLE
-    username@instance vistest>
-
-## Inserting data with visibilities
-
-Visibilities are boolean AND (&) and OR (|) combinations of authorization
-tokens. Authorization tokens are arbitrary strings taken from a restricted
-ASCII character set. Parentheses are required to specify order of operations
-in visibilities.
-
-    username@instance vistest> insert row f1 q1 v1 -l A
-    username@instance vistest> insert row f2 q2 v2 -l A&B
-    username@instance vistest> insert row f3 q3 v3 -l apple&carrot|broccoli|spinach
-    06 11:19:01,432 [shell.Shell] ERROR: org.apache.accumulo.core.util.BadArgumentException: cannot mix | and & near index 12
-    apple&carrot|broccoli|spinach
-                ^
-    username@instance vistest> insert row f3 q3 v3 -l (apple&carrot)|broccoli|spinach
-    username@instance vistest>
-
-## Scanning with authorizations
-
-Authorizations are sets of authorization tokens. Each Accumulo user has
-authorizations and each Accumulo scan has authorizations. Scan authorizations
-are only allowed to be a subset of the user's authorizations. By default, a
-user's authorizations set is empty.
-
-    username@instance vistest> scan
-    username@instance vistest> scan -s A
-    06 11:43:14,951 [shell.Shell] ERROR: java.lang.RuntimeException: org.apache.accumulo.core.client.AccumuloSecurityException: Error BAD_AUTHORIZATIONS - The user does not have the specified authorizations assigned
-    username@instance vistest>
-
-## Setting authorizations for a user
-
-    username@instance vistest> setauths -s A
-    06 11:53:42,056 [shell.Shell] ERROR: org.apache.accumulo.core.client.AccumuloSecurityException: Error PERMISSION_DENIED - User does not have permission to perform this action
-    username@instance vistest>
-
-A user cannot set authorizations unless the user has the System.ALTER_USER permission.
-The root user has this permission.
-
-    username@instance vistest> user root
-    Enter password for user root: ********
-    root@instance vistest> setauths -s A -u username
-    root@instance vistest> user username
-    Enter password for user username: ********
-    username@instance vistest> scan -s A
-    row f1:q1 [A]    v1
-    username@instance vistest> scan
-    row f1:q1 [A]    v1
-    username@instance vistest>
-
-The default authorizations for a scan are the user's entire set of authorizations.
-
-    username@instance vistest> user root
-    Enter password for user root: ********
-    root@instance vistest> setauths -s A,B,broccoli -u username
-    root@instance vistest> user username
-    Enter password for user username: ********
-    username@instance vistest> scan
-    row f1:q1 [A]    v1
-    row f2:q2 [A&B]    v2
-    row f3:q3 [(apple&carrot)|broccoli|spinach]    v3
-    username@instance vistest> scan -s B
-    username@instance vistest>
-
-If you want, you can limit a user to only be able to insert data which they can read themselves.
-It can be set with the following constraint.
-
-    username@instance vistest> user root
-    Enter password for user root: ******
-    root@instance vistest> config -t vistest -s table.constraint.1=org.apache.accumulo.core.security.VisibilityConstraint
-    root@instance vistest> user username
-    Enter password for user username: ********
-    username@instance vistest> insert row f4 q4 v4 -l spinach
-        Constraint Failures:
-            ConstraintViolationSummary(constrainClass:org.apache.accumulo.core.security.VisibilityConstraint, violationCode:2, violationDescription:User does not have authorization on column visibility, numberOfViolatingMutations:1)
-    username@instance vistest> insert row f4 q4 v4 -l spinach|broccoli
-    username@instance vistest> scan
-    row f1:q1 [A]    v1
-    row f2:q2 [A&B]    v2
-    row f3:q3 [(apple&carrot)|broccoli|spinach]    v3
-    row f4:q4 [spinach|broccoli]    v4
-    username@instance vistest>
-

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/.gitignore
----------------------------------------------------------------------
diff --git a/examples/simple/.gitignore b/examples/simple/.gitignore
deleted file mode 100644
index e77a822..0000000
--- a/examples/simple/.gitignore
+++ /dev/null
@@ -1,28 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Maven ignores
-/target/
-
-# IDE ignores
-/.settings/
-/.project
-/.classpath
-/.pydevproject
-/.idea
-/*.iml
-/nbproject/
-/nbactions.xml
-/nb-configuration.xml

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/pom.xml
----------------------------------------------------------------------
diff --git a/examples/simple/pom.xml b/examples/simple/pom.xml
deleted file mode 100644
index a9e6b7b..0000000
--- a/examples/simple/pom.xml
+++ /dev/null
@@ -1,104 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.accumulo</groupId>
-    <artifactId>accumulo-project</artifactId>
-    <version>2.0.0-SNAPSHOT</version>
-    <relativePath>../../pom.xml</relativePath>
-  </parent>
-  <artifactId>accumulo-examples-simple</artifactId>
-  <name>Apache Accumulo Simple Examples</name>
-  <description>Simple Apache Accumulo examples.</description>
-  <dependencies>
-    <dependency>
-      <groupId>com.beust</groupId>
-      <artifactId>jcommander</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.google.auto.service</groupId>
-      <artifactId>auto-service</artifactId>
-      <optional>true</optional>
-    </dependency>
-    <dependency>
-      <groupId>commons-cli</groupId>
-      <artifactId>commons-cli</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>commons-configuration</groupId>
-      <artifactId>commons-configuration</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>jline</groupId>
-      <artifactId>jline</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>log4j</groupId>
-      <artifactId>log4j</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.accumulo</groupId>
-      <artifactId>accumulo-core</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.accumulo</groupId>
-      <artifactId>accumulo-fate</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.accumulo</groupId>
-      <artifactId>accumulo-shell</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.accumulo</groupId>
-      <artifactId>accumulo-tracer</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-client</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.htrace</groupId>
-      <artifactId>htrace-core</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>commons-httpclient</groupId>
-      <artifactId>commons-httpclient</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-  </dependencies>
-  <build>
-    <pluginManagement>
-      <plugins>
-        <plugin>
-          <groupId>org.apache.maven.plugins</groupId>
-          <artifactId>maven-site-plugin</artifactId>
-          <configuration>
-            <!-- this is just an example. Leave it out. -->
-            <skip>true</skip>
-          </configuration>
-        </plugin>
-      </plugins>
-    </pluginManagement>
-  </build>
-</project>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/findbugs/exclude-filter.xml
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/findbugs/exclude-filter.xml b/examples/simple/src/main/findbugs/exclude-filter.xml
deleted file mode 100644
index 0ce1597..0000000
--- a/examples/simple/src/main/findbugs/exclude-filter.xml
+++ /dev/null
@@ -1,23 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<FindBugsFilter>
-  <Match>
-    <!-- example classes can call System.exit -->
-    <Class name="org.apache.accumulo.examples.simple.isolation.InterferenceTest$Writer" />
-    <Bug code="DM" pattern="DM_EXIT" />
-  </Match>
-</FindBugsFilter>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/CountingVerifyingReceiver.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/CountingVerifyingReceiver.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/CountingVerifyingReceiver.java
deleted file mode 100644
index 4ac9db1..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/CountingVerifyingReceiver.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.client;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.util.Arrays;
-import java.util.HashMap;
-
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.hadoop.io.Text;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Internal class used to verify validity of data read.
- */
-class CountingVerifyingReceiver {
-  private static final Logger log = LoggerFactory.getLogger(CountingVerifyingReceiver.class);
-
-  long count = 0;
-  int expectedValueSize = 0;
-  HashMap<Text,Boolean> expectedRows;
-
-  CountingVerifyingReceiver(HashMap<Text,Boolean> expectedRows, int expectedValueSize) {
-    this.expectedRows = expectedRows;
-    this.expectedValueSize = expectedValueSize;
-  }
-
-  public void receive(Key key, Value value) {
-
-    String row = key.getRow().toString();
-    long rowid = Integer.parseInt(row.split("_")[1]);
-
-    byte expectedValue[] = RandomBatchWriter.createValue(rowid, expectedValueSize);
-
-    if (!Arrays.equals(expectedValue, value.get())) {
-      log.error("Got unexpected value for " + key + " expected : " + new String(expectedValue, UTF_8) + " got : " + new String(value.get(), UTF_8));
-    }
-
-    if (!expectedRows.containsKey(key.getRow())) {
-      log.error("Got unexpected key " + key);
-    } else {
-      expectedRows.put(key.getRow(), true);
-    }
-
-    count++;
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/Flush.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/Flush.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/Flush.java
deleted file mode 100644
index fb460f6..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/Flush.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.client;
-
-import org.apache.accumulo.core.cli.ClientOnRequiredTable;
-import org.apache.accumulo.core.client.Connector;
-
-/**
- * Simple example for using tableOperations() (like create, delete, flush, etc).
- */
-public class Flush {
-
-  public static void main(String[] args) {
-    ClientOnRequiredTable opts = new ClientOnRequiredTable();
-    opts.parseArgs(Flush.class.getName(), args);
-    try {
-      Connector connector = opts.getConnector();
-      connector.tableOperations().flush(opts.getTableName(), null, null, true);
-    } catch (Exception e) {
-      throw new RuntimeException(e);
-    }
-  }
-}