You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by mw...@apache.org on 2016/12/09 17:16:51 UTC

[5/7] accumulo git commit: ACCUMULO-4511 Removed Accumulo Examples

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchScanner.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchScanner.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchScanner.java
deleted file mode 100644
index e762e7d..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchScanner.java
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.client;
-
-import static org.apache.accumulo.examples.simple.client.RandomBatchWriter.abs;
-
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map.Entry;
-import java.util.Random;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.accumulo.core.cli.BatchScannerOpts;
-import org.apache.accumulo.core.cli.ClientOnRequiredTable;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.hadoop.io.Text;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * Simple example for reading random batches of data from Accumulo. See docs/examples/README.batch for instructions.
- */
-public class RandomBatchScanner {
-  private static final Logger log = LoggerFactory.getLogger(RandomBatchScanner.class);
-
-  /**
-   * Generate a number of ranges, each covering a single random row.
-   *
-   * @param num
-   *          the number of ranges to generate
-   * @param min
-   *          the minimum row that will be generated
-   * @param max
-   *          the maximum row that will be generated
-   * @param r
-   *          a random number generator
-   * @param ranges
-   *          a set in which to store the generated ranges
-   * @param expectedRows
-   *          a map in which to store the rows covered by the ranges (initially mapped to false)
-   */
-  static void generateRandomQueries(int num, long min, long max, Random r, HashSet<Range> ranges, HashMap<Text,Boolean> expectedRows) {
-    log.info(String.format("Generating %,d random queries...", num));
-    while (ranges.size() < num) {
-      long rowid = (abs(r.nextLong()) % (max - min)) + min;
-
-      Text row1 = new Text(String.format("row_%010d", rowid));
-
-      Range range = new Range(new Text(row1));
-      ranges.add(range);
-      expectedRows.put(row1, false);
-    }
-
-    log.info("finished");
-  }
-
-  /**
-   * Prints a count of the number of rows mapped to false.
-   *
-   * @return boolean indicating "were all the rows found?"
-   */
-  private static boolean checkAllRowsFound(HashMap<Text,Boolean> expectedRows) {
-    int count = 0;
-    boolean allFound = true;
-    for (Entry<Text,Boolean> entry : expectedRows.entrySet())
-      if (!entry.getValue())
-        count++;
-
-    if (count > 0) {
-      log.warn("Did not find " + count + " rows");
-      allFound = false;
-    }
-    return allFound;
-  }
-
-  /**
-   * Generates a number of random queries, verifies that the key/value pairs returned were in the queried ranges and that the values were generated by
-   * {@link RandomBatchWriter#createValue(long, int)}. Prints information about the results.
-   *
-   * @param num
-   *          the number of queries to generate
-   * @param min
-   *          the min row to query
-   * @param max
-   *          the max row to query
-   * @param evs
-   *          the expected size of the values
-   * @param r
-   *          a random number generator
-   * @param tsbr
-   *          a batch scanner
-   * @return boolean indicating "did the queries go fine?"
-   */
-  static boolean doRandomQueries(int num, long min, long max, int evs, Random r, BatchScanner tsbr) {
-
-    HashSet<Range> ranges = new HashSet<>(num);
-    HashMap<Text,Boolean> expectedRows = new java.util.HashMap<>();
-
-    generateRandomQueries(num, min, max, r, ranges, expectedRows);
-
-    tsbr.setRanges(ranges);
-
-    CountingVerifyingReceiver receiver = new CountingVerifyingReceiver(expectedRows, evs);
-
-    long t1 = System.currentTimeMillis();
-
-    for (Entry<Key,Value> entry : tsbr) {
-      receiver.receive(entry.getKey(), entry.getValue());
-    }
-
-    long t2 = System.currentTimeMillis();
-
-    log.info(String.format("%6.2f lookups/sec %6.2f secs%n", num / ((t2 - t1) / 1000.0), ((t2 - t1) / 1000.0)));
-    log.info(String.format("num results : %,d%n", receiver.count));
-
-    return checkAllRowsFound(expectedRows);
-  }
-
-  public static class Opts extends ClientOnRequiredTable {
-    @Parameter(names = "--min", description = "miniumum row that will be generated")
-    long min = 0;
-    @Parameter(names = "--max", description = "maximum ow that will be generated")
-    long max = 0;
-    @Parameter(names = "--num", required = true, description = "number of ranges to generate")
-    int num = 0;
-    @Parameter(names = "--size", required = true, description = "size of the value to write")
-    int size = 0;
-    @Parameter(names = "--seed", description = "seed for pseudo-random number generator")
-    Long seed = null;
-  }
-
-  /**
-   * Scans over a specified number of entries to Accumulo using a {@link BatchScanner}. Completes scans twice to compare times for a fresh query with those for
-   * a repeated query which has cached metadata and connections already established.
-   */
-  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    Opts opts = new Opts();
-    BatchScannerOpts bsOpts = new BatchScannerOpts();
-    opts.parseArgs(RandomBatchScanner.class.getName(), args, bsOpts);
-
-    Connector connector = opts.getConnector();
-    BatchScanner batchReader = connector.createBatchScanner(opts.getTableName(), opts.auths, bsOpts.scanThreads);
-    batchReader.setTimeout(bsOpts.scanTimeout, TimeUnit.MILLISECONDS);
-
-    Random r;
-    if (opts.seed == null)
-      r = new Random();
-    else
-      r = new Random(opts.seed);
-
-    // do one cold
-    boolean status = doRandomQueries(opts.num, opts.min, opts.max, opts.size, r, batchReader);
-
-    System.gc();
-    System.gc();
-    System.gc();
-
-    if (opts.seed == null)
-      r = new Random();
-    else
-      r = new Random(opts.seed);
-
-    // do one hot (connections already established, metadata table cached)
-    status = status && doRandomQueries(opts.num, opts.min, opts.max, opts.size, r, batchReader);
-
-    batchReader.close();
-    if (!status) {
-      System.exit(1);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
deleted file mode 100644
index 51aee8f..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.client;
-
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map.Entry;
-import java.util.Random;
-import java.util.Set;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ClientOnRequiredTable;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.security.SecurityErrorCode;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.TabletId;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.hadoop.io.Text;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * Simple example for writing random data to Accumulo. See docs/examples/README.batch for instructions.
- *
- * The rows of the entries will be randomly generated numbers between a specified min and max (prefixed by "row_"). The column families will be "foo" and column
- * qualifiers will be "1". The values will be random byte arrays of a specified size.
- */
-public class RandomBatchWriter {
-
-  /**
-   * Creates a random byte array of specified size using the specified seed.
-   *
-   * @param rowid
-   *          the seed to use for the random number generator
-   * @param dataSize
-   *          the size of the array
-   * @return a random byte array
-   */
-  public static byte[] createValue(long rowid, int dataSize) {
-    Random r = new Random(rowid);
-    byte value[] = new byte[dataSize];
-
-    r.nextBytes(value);
-
-    // transform to printable chars
-    for (int j = 0; j < value.length; j++) {
-      value[j] = (byte) (((0xff & value[j]) % 92) + ' ');
-    }
-
-    return value;
-  }
-
-  /**
-   * Creates a mutation on a specified row with column family "foo", column qualifier "1", specified visibility, and a random value of specified size.
-   *
-   * @param rowid
-   *          the row of the mutation
-   * @param dataSize
-   *          the size of the random value
-   * @param visibility
-   *          the visibility of the entry to insert
-   * @return a mutation
-   */
-  public static Mutation createMutation(long rowid, int dataSize, ColumnVisibility visibility) {
-    Text row = new Text(String.format("row_%010d", rowid));
-
-    Mutation m = new Mutation(row);
-
-    // create a random value that is a function of the
-    // row id for verification purposes
-    byte value[] = createValue(rowid, dataSize);
-
-    m.put(new Text("foo"), new Text("1"), visibility, new Value(value));
-
-    return m;
-  }
-
-  static class Opts extends ClientOnRequiredTable {
-    @Parameter(names = "--num", required = true)
-    int num = 0;
-    @Parameter(names = "--min")
-    long min = 0;
-    @Parameter(names = "--max")
-    long max = Long.MAX_VALUE;
-    @Parameter(names = "--size", required = true, description = "size of the value to write")
-    int size = 0;
-    @Parameter(names = "--vis", converter = VisibilityConverter.class)
-    ColumnVisibility visiblity = new ColumnVisibility("");
-    @Parameter(names = "--seed", description = "seed for pseudo-random number generator")
-    Long seed = null;
-  }
-
-  public static long abs(long l) {
-    l = Math.abs(l); // abs(Long.MIN_VALUE) == Long.MIN_VALUE...
-    if (l < 0)
-      return 0;
-    return l;
-  }
-
-  /**
-   * Writes a specified number of entries to Accumulo using a {@link BatchWriter}.
-   */
-  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    Opts opts = new Opts();
-    BatchWriterOpts bwOpts = new BatchWriterOpts();
-    opts.parseArgs(RandomBatchWriter.class.getName(), args, bwOpts);
-    if ((opts.max - opts.min) < 1L * opts.num) { // right-side multiplied by 1L to convert to long in a way that doesn't trigger FindBugs
-      System.err.println(String.format("You must specify a min and a max that allow for at least num possible values. "
-          + "For example, you requested %d rows, but a min of %d and a max of %d (exclusive), which only allows for %d rows.", opts.num, opts.min, opts.max,
-          (opts.max - opts.min)));
-      System.exit(1);
-    }
-    Random r;
-    if (opts.seed == null)
-      r = new Random();
-    else {
-      r = new Random(opts.seed);
-    }
-    Connector connector = opts.getConnector();
-    BatchWriter bw = connector.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
-
-    // reuse the ColumnVisibility object to improve performance
-    ColumnVisibility cv = opts.visiblity;
-
-    // Generate num unique row ids in the given range
-    HashSet<Long> rowids = new HashSet<>(opts.num);
-    while (rowids.size() < opts.num) {
-      rowids.add((abs(r.nextLong()) % (opts.max - opts.min)) + opts.min);
-    }
-    for (long rowid : rowids) {
-      Mutation m = createMutation(rowid, opts.size, cv);
-      bw.addMutation(m);
-    }
-
-    try {
-      bw.close();
-    } catch (MutationsRejectedException e) {
-      if (e.getSecurityErrorCodes().size() > 0) {
-        HashMap<String,Set<SecurityErrorCode>> tables = new HashMap<>();
-        for (Entry<TabletId,Set<SecurityErrorCode>> ke : e.getSecurityErrorCodes().entrySet()) {
-          String tableId = ke.getKey().getTableId().toString();
-          Set<SecurityErrorCode> secCodes = tables.get(tableId);
-          if (secCodes == null) {
-            secCodes = new HashSet<>();
-            tables.put(tableId, secCodes);
-          }
-          secCodes.addAll(ke.getValue());
-        }
-        System.err.println("ERROR : Not authorized to write to tables : " + tables);
-      }
-
-      if (e.getConstraintViolationSummaries().size() > 0) {
-        System.err.println("ERROR : Constraint violations occurred : " + e.getConstraintViolationSummaries());
-      }
-      System.exit(1);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/ReadWriteExample.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/ReadWriteExample.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/ReadWriteExample.java
deleted file mode 100644
index 44d4b6f..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/ReadWriteExample.java
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.client;
-
-import java.util.Map.Entry;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.cli.ClientOnDefaultTable;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Durability;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.impl.DurabilityImpl;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.core.util.ByteArraySet;
-import org.apache.hadoop.io.Text;
-
-import com.beust.jcommander.IStringConverter;
-import com.beust.jcommander.Parameter;
-
-public class ReadWriteExample {
-  // defaults
-  private static final String DEFAULT_AUTHS = "LEVEL1,GROUP1";
-  private static final String DEFAULT_TABLE_NAME = "test";
-
-  private Connector conn;
-
-  static class DurabilityConverter implements IStringConverter<Durability> {
-    @Override
-    public Durability convert(String value) {
-      return DurabilityImpl.fromString(value);
-    }
-  }
-
-  static class Opts extends ClientOnDefaultTable {
-    @Parameter(names = {"-C", "--createtable"}, description = "create table before doing anything")
-    boolean createtable = false;
-    @Parameter(names = {"-D", "--deletetable"}, description = "delete table when finished")
-    boolean deletetable = false;
-    @Parameter(names = {"-c", "--create"}, description = "create entries before any deletes")
-    boolean createEntries = false;
-    @Parameter(names = {"-r", "--read"}, description = "read entries after any creates/deletes")
-    boolean readEntries = false;
-    @Parameter(names = {"-d", "--delete"}, description = "delete entries after any creates")
-    boolean deleteEntries = false;
-    @Parameter(names = {"--durability"}, description = "durability used for writes (none, log, flush or sync)", converter = DurabilityConverter.class)
-    Durability durability = Durability.DEFAULT;
-
-    public Opts() {
-      super(DEFAULT_TABLE_NAME);
-      auths = new Authorizations(DEFAULT_AUTHS.split(","));
-    }
-  }
-
-  // hidden constructor
-  private ReadWriteExample() {}
-
-  private void execute(Opts opts, ScannerOpts scanOpts) throws Exception {
-    conn = opts.getConnector();
-
-    // add the authorizations to the user
-    Authorizations userAuthorizations = conn.securityOperations().getUserAuthorizations(opts.getPrincipal());
-    ByteArraySet auths = new ByteArraySet(userAuthorizations.getAuthorizations());
-    auths.addAll(opts.auths.getAuthorizations());
-    if (!auths.isEmpty())
-      conn.securityOperations().changeUserAuthorizations(opts.getPrincipal(), new Authorizations(auths));
-
-    // create table
-    if (opts.createtable) {
-      SortedSet<Text> partitionKeys = new TreeSet<>();
-      for (int i = Byte.MIN_VALUE; i < Byte.MAX_VALUE; i++)
-        partitionKeys.add(new Text(new byte[] {(byte) i}));
-      conn.tableOperations().create(opts.getTableName());
-      conn.tableOperations().addSplits(opts.getTableName(), partitionKeys);
-    }
-
-    // send mutations
-    createEntries(opts);
-
-    // read entries
-    if (opts.readEntries) {
-      // Note that the user needs to have the authorizations for the specified scan authorizations
-      // by an administrator first
-      Scanner scanner = conn.createScanner(opts.getTableName(), opts.auths);
-      scanner.setBatchSize(scanOpts.scanBatchSize);
-      for (Entry<Key,Value> entry : scanner)
-        System.out.println(entry.getKey().toString() + " -> " + entry.getValue().toString());
-    }
-
-    // delete table
-    if (opts.deletetable)
-      conn.tableOperations().delete(opts.getTableName());
-  }
-
-  private void createEntries(Opts opts) throws Exception {
-    if (opts.createEntries || opts.deleteEntries) {
-      BatchWriterConfig cfg = new BatchWriterConfig();
-      cfg.setDurability(opts.durability);
-      BatchWriter writer = conn.createBatchWriter(opts.getTableName(), cfg);
-      ColumnVisibility cv = new ColumnVisibility(opts.auths.toString().replace(',', '|'));
-
-      Text cf = new Text("datatypes");
-      Text cq = new Text("xml");
-      byte[] row = {'h', 'e', 'l', 'l', 'o', '\0'};
-      byte[] value = {'w', 'o', 'r', 'l', 'd', '\0'};
-
-      for (int i = 0; i < 10; i++) {
-        row[row.length - 1] = (byte) i;
-        Mutation m = new Mutation(new Text(row));
-        if (opts.deleteEntries) {
-          m.putDelete(cf, cq, cv);
-        }
-        if (opts.createEntries) {
-          value[value.length - 1] = (byte) i;
-          m.put(cf, cq, cv, new Value(value));
-        }
-        writer.addMutation(m);
-      }
-      writer.close();
-    }
-  }
-
-  public static void main(String[] args) throws Exception {
-    ReadWriteExample rwe = new ReadWriteExample();
-    Opts opts = new Opts();
-    ScannerOpts scanOpts = new ScannerOpts();
-    opts.parseArgs(ReadWriteExample.class.getName(), args, scanOpts);
-    rwe.execute(opts, scanOpts);
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RowOperations.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RowOperations.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RowOperations.java
deleted file mode 100644
index 007619d..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RowOperations.java
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.client;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ClientOpts;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.hadoop.io.Text;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * A demonstration of reading entire rows and deleting entire rows.
- */
-public class RowOperations {
-
-  private static final Logger log = LoggerFactory.getLogger(RowOperations.class);
-
-  private static Connector connector;
-  private static String tableName = "example";
-  private static BatchWriter bw;
-
-  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException,
-      MutationsRejectedException {
-
-    ClientOpts opts = new ClientOpts();
-    ScannerOpts scanOpts = new ScannerOpts();
-    BatchWriterOpts bwOpts = new BatchWriterOpts();
-    opts.parseArgs(RowOperations.class.getName(), args, scanOpts, bwOpts);
-
-    // First the setup work
-    connector = opts.getConnector();
-
-    // lets create an example table
-    connector.tableOperations().create(tableName);
-
-    // lets create 3 rows of information
-    Text row1 = new Text("row1");
-    Text row2 = new Text("row2");
-    Text row3 = new Text("row3");
-
-    // Which means 3 different mutations
-    Mutation mut1 = new Mutation(row1);
-    Mutation mut2 = new Mutation(row2);
-    Mutation mut3 = new Mutation(row3);
-
-    // And we'll put 4 columns in each row
-    Text col1 = new Text("1");
-    Text col2 = new Text("2");
-    Text col3 = new Text("3");
-    Text col4 = new Text("4");
-
-    // Now we'll add them to the mutations
-    mut1.put(new Text("column"), col1, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
-    mut1.put(new Text("column"), col2, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
-    mut1.put(new Text("column"), col3, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
-    mut1.put(new Text("column"), col4, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
-
-    mut2.put(new Text("column"), col1, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
-    mut2.put(new Text("column"), col2, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
-    mut2.put(new Text("column"), col3, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
-    mut2.put(new Text("column"), col4, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
-
-    mut3.put(new Text("column"), col1, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
-    mut3.put(new Text("column"), col2, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
-    mut3.put(new Text("column"), col3, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
-    mut3.put(new Text("column"), col4, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
-
-    // Now we'll make a Batch Writer
-    bw = connector.createBatchWriter(tableName, bwOpts.getBatchWriterConfig());
-
-    // And add the mutations
-    bw.addMutation(mut1);
-    bw.addMutation(mut2);
-    bw.addMutation(mut3);
-
-    // Force a send
-    bw.flush();
-
-    // Now lets look at the rows
-    Scanner rowThree = getRow(scanOpts, new Text("row3"));
-    Scanner rowTwo = getRow(scanOpts, new Text("row2"));
-    Scanner rowOne = getRow(scanOpts, new Text("row1"));
-
-    // And print them
-    log.info("This is everything");
-    printRow(rowOne);
-    printRow(rowTwo);
-    printRow(rowThree);
-    System.out.flush();
-
-    // Now lets delete rowTwo with the iterator
-    rowTwo = getRow(scanOpts, new Text("row2"));
-    deleteRow(rowTwo);
-
-    // Now lets look at the rows again
-    rowThree = getRow(scanOpts, new Text("row3"));
-    rowTwo = getRow(scanOpts, new Text("row2"));
-    rowOne = getRow(scanOpts, new Text("row1"));
-
-    // And print them
-    log.info("This is row1 and row3");
-    printRow(rowOne);
-    printRow(rowTwo);
-    printRow(rowThree);
-    System.out.flush();
-
-    // Should only see the two rows
-    // Now lets delete rowOne without passing in the iterator
-
-    deleteRow(scanOpts, row1);
-
-    // Now lets look at the rows one last time
-    rowThree = getRow(scanOpts, new Text("row3"));
-    rowTwo = getRow(scanOpts, new Text("row2"));
-    rowOne = getRow(scanOpts, new Text("row1"));
-
-    // And print them
-    log.info("This is just row3");
-    printRow(rowOne);
-    printRow(rowTwo);
-    printRow(rowThree);
-    System.out.flush();
-
-    // Should only see rowThree
-
-    // Always close your batchwriter
-
-    bw.close();
-
-    // and lets clean up our mess
-    connector.tableOperations().delete(tableName);
-
-    // fin~
-
-  }
-
-  /**
-   * Deletes a row given a text object
-   */
-  private static void deleteRow(ScannerOpts scanOpts, Text row) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    deleteRow(getRow(scanOpts, row));
-  }
-
-  /**
-   * Deletes a row, given a Scanner of JUST that row
-   */
-  private static void deleteRow(Scanner scanner) throws MutationsRejectedException {
-    Mutation deleter = null;
-    // iterate through the keys
-    for (Entry<Key,Value> entry : scanner) {
-      // create a mutation for the row
-      if (deleter == null)
-        deleter = new Mutation(entry.getKey().getRow());
-      // the remove function adds the key with the delete flag set to true
-      deleter.putDelete(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier());
-    }
-    bw.addMutation(deleter);
-    bw.flush();
-  }
-
-  /**
-   * Just a generic print function given an iterator. Not necessarily just for printing a single row
-   */
-  private static void printRow(Scanner scanner) {
-    // iterates through and prints
-    for (Entry<Key,Value> entry : scanner)
-      log.info("Key: " + entry.getKey().toString() + " Value: " + entry.getValue().toString());
-  }
-
-  /**
-   * Gets a scanner over one row
-   */
-  private static Scanner getRow(ScannerOpts scanOpts, Text row) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    // Create a scanner
-    Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY);
-    scanner.setBatchSize(scanOpts.scanBatchSize);
-    // Say start key is the one with key of row
-    // and end key is the one that immediately follows the row
-    scanner.setRange(new Range(row));
-    return scanner;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/SequentialBatchWriter.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/SequentialBatchWriter.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/SequentialBatchWriter.java
deleted file mode 100644
index f2bd4d7..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/SequentialBatchWriter.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.client;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ClientOnRequiredTable;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.security.ColumnVisibility;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * Simple example for writing random data in sequential order to Accumulo. See docs/examples/README.batch for instructions.
- */
-public class SequentialBatchWriter {
-
-  static class Opts extends ClientOnRequiredTable {
-    @Parameter(names = "--start")
-    long start = 0;
-    @Parameter(names = "--num", required = true)
-    long num = 0;
-    @Parameter(names = "--size", required = true, description = "size of the value to write")
-    int valueSize = 0;
-    @Parameter(names = "--vis", converter = VisibilityConverter.class)
-    ColumnVisibility vis = new ColumnVisibility();
-  }
-
-  /**
-   * Writes a specified number of entries to Accumulo using a {@link BatchWriter}. The rows of the entries will be sequential starting at a specified number.
-   * The column families will be "foo" and column qualifiers will be "1". The values will be random byte arrays of a specified size.
-   */
-  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException {
-    Opts opts = new Opts();
-    BatchWriterOpts bwOpts = new BatchWriterOpts();
-    opts.parseArgs(SequentialBatchWriter.class.getName(), args, bwOpts);
-    Connector connector = opts.getConnector();
-    BatchWriter bw = connector.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
-
-    long end = opts.start + opts.num;
-
-    for (long i = opts.start; i < end; i++) {
-      Mutation m = RandomBatchWriter.createMutation(i, opts.valueSize, opts.vis);
-      bw.addMutation(m);
-    }
-
-    bw.close();
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/TraceDumpExample.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/TraceDumpExample.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/TraceDumpExample.java
deleted file mode 100644
index 5885094..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/TraceDumpExample.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.client;
-
-import org.apache.accumulo.core.cli.ClientOnDefaultTable;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.tracer.TraceDump;
-import org.apache.accumulo.tracer.TraceDump.Printer;
-import org.apache.hadoop.io.Text;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * Example of using the TraceDump class to print a formatted view of a Trace
- *
- */
-public class TraceDumpExample {
-  private static final Logger log = LoggerFactory.getLogger(TraceDumpExample.class);
-
-  static class Opts extends ClientOnDefaultTable {
-    public Opts() {
-      super("trace");
-    }
-
-    @Parameter(names = {"--traceid"}, description = "The hex string id of a given trace, for example 16cfbbd7beec4ae3")
-    public String traceId = "";
-  }
-
-  public void dump(Opts opts) throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
-
-    if (opts.traceId.isEmpty()) {
-      throw new IllegalArgumentException("--traceid option is required");
-    }
-
-    final Connector conn = opts.getConnector();
-    final String principal = opts.getPrincipal();
-    final String table = opts.getTableName();
-    if (!conn.securityOperations().hasTablePermission(principal, table, TablePermission.READ)) {
-      conn.securityOperations().grantTablePermission(principal, table, TablePermission.READ);
-      try {
-        Thread.sleep(1000);
-      } catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-        throw new RuntimeException(e);
-      }
-      while (!conn.securityOperations().hasTablePermission(principal, table, TablePermission.READ)) {
-        log.info("{} didn't propagate read permission on {}", principal, table);
-        try {
-          Thread.sleep(1000);
-        } catch (InterruptedException e) {
-          Thread.currentThread().interrupt();
-          throw new RuntimeException(e);
-        }
-      }
-    }
-    Scanner scanner = conn.createScanner(table, opts.auths);
-    scanner.setRange(new Range(new Text(opts.traceId)));
-    TraceDump.printTrace(scanner, new Printer() {
-      @Override
-      public void print(String line) {
-        System.out.println(line);
-      }
-    });
-  }
-
-  public static void main(String[] args) throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
-    TraceDumpExample traceDumpExample = new TraceDumpExample();
-    Opts opts = new Opts();
-    ScannerOpts scannerOpts = new ScannerOpts();
-    opts.parseArgs(TraceDumpExample.class.getName(), args, scannerOpts);
-
-    traceDumpExample.dump(opts);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/TracingExample.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/TracingExample.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/TracingExample.java
deleted file mode 100644
index 3ee0a27..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/TracingExample.java
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.accumulo.examples.simple.client;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.cli.ClientOnDefaultTable;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.trace.DistributedTrace;
-import org.apache.htrace.Sampler;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * A simple example showing how to use the distributed tracing API in client code
- *
- */
-public class TracingExample {
-  private static final Logger log = LoggerFactory.getLogger(TracingExample.class);
-  private static final String DEFAULT_TABLE_NAME = "test";
-
-  static class Opts extends ClientOnDefaultTable {
-    @Parameter(names = {"-C", "--createtable"}, description = "create table before doing anything")
-    boolean createtable = false;
-    @Parameter(names = {"-D", "--deletetable"}, description = "delete table when finished")
-    boolean deletetable = false;
-    @Parameter(names = {"-c", "--create"}, description = "create entries before any deletes")
-    boolean createEntries = false;
-    @Parameter(names = {"-r", "--read"}, description = "read entries after any creates/deletes")
-    boolean readEntries = false;
-
-    public Opts() {
-      super(DEFAULT_TABLE_NAME);
-      auths = new Authorizations();
-    }
-  }
-
-  public void enableTracing(Opts opts) throws Exception {
-    DistributedTrace.enable("myHost", "myApp");
-  }
-
-  public void execute(Opts opts) throws TableNotFoundException, InterruptedException, AccumuloException, AccumuloSecurityException, TableExistsException {
-
-    if (opts.createtable) {
-      opts.getConnector().tableOperations().create(opts.getTableName());
-    }
-
-    if (opts.createEntries) {
-      createEntries(opts);
-    }
-
-    if (opts.readEntries) {
-      readEntries(opts);
-    }
-
-    if (opts.deletetable) {
-      opts.getConnector().tableOperations().delete(opts.getTableName());
-    }
-  }
-
-  private void createEntries(Opts opts) throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
-
-    // Trace the write operation. Note, unless you flush the BatchWriter, you will not capture
-    // the write operation as it is occurs asynchronously. You can optionally create additional Spans
-    // within a given Trace as seen below around the flush
-    TraceScope scope = Trace.startSpan("Client Write", Sampler.ALWAYS);
-
-    System.out.println("TraceID: " + Long.toHexString(scope.getSpan().getTraceId()));
-    BatchWriter batchWriter = opts.getConnector().createBatchWriter(opts.getTableName(), new BatchWriterConfig());
-
-    Mutation m = new Mutation("row");
-    m.put("cf", "cq", "value");
-
-    batchWriter.addMutation(m);
-    // You can add timeline annotations to Spans which will be able to be viewed in the Monitor
-    scope.getSpan().addTimelineAnnotation("Initiating Flush");
-    batchWriter.flush();
-
-    batchWriter.close();
-    scope.close();
-  }
-
-  private void readEntries(Opts opts) throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
-
-    Scanner scanner = opts.getConnector().createScanner(opts.getTableName(), opts.auths);
-
-    // Trace the read operation.
-    TraceScope readScope = Trace.startSpan("Client Read", Sampler.ALWAYS);
-    System.out.println("TraceID: " + Long.toHexString(readScope.getSpan().getTraceId()));
-
-    int numberOfEntriesRead = 0;
-    for (Entry<Key,Value> entry : scanner) {
-      System.out.println(entry.getKey().toString() + " -> " + entry.getValue().toString());
-      ++numberOfEntriesRead;
-    }
-    // You can add additional metadata (key, values) to Spans which will be able to be viewed in the Monitor
-    readScope.getSpan().addKVAnnotation("Number of Entries Read".getBytes(UTF_8), String.valueOf(numberOfEntriesRead).getBytes(UTF_8));
-
-    readScope.close();
-  }
-
-  public static void main(String[] args) throws Exception {
-    try {
-      TracingExample tracingExample = new TracingExample();
-      Opts opts = new Opts();
-      ScannerOpts scannerOpts = new ScannerOpts();
-      opts.parseArgs(TracingExample.class.getName(), args, scannerOpts);
-
-      tracingExample.enableTracing(opts);
-      tracingExample.execute(opts);
-    } catch (Exception e) {
-      log.error("Caught exception running TraceExample", e);
-      System.exit(1);
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/combiner/StatsCombiner.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/combiner/StatsCombiner.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/combiner/StatsCombiner.java
deleted file mode 100644
index 7dad89c..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/combiner/StatsCombiner.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.combiner;
-
-import java.io.IOException;
-import java.util.Iterator;
-import java.util.Map;
-
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.Combiner;
-import org.apache.accumulo.core.iterators.IteratorEnvironment;
-import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
-
-/**
- * This combiner calculates the max, min, sum, and count of long integers represented as strings in values. It stores the result in a comma-separated value of
- * the form min,max,sum,count. If such a value is encountered while combining, its information is incorporated into the running calculations of min, max, sum,
- * and count. See {@link Combiner} for more information on which values are combined together. See docs/examples/README.combiner for instructions.
- */
-public class StatsCombiner extends Combiner {
-
-  public static final String RADIX_OPTION = "radix";
-
-  private int radix = 10;
-
-  @Override
-  public Value reduce(Key key, Iterator<Value> iter) {
-
-    long min = Long.MAX_VALUE;
-    long max = Long.MIN_VALUE;
-    long sum = 0;
-    long count = 0;
-
-    while (iter.hasNext()) {
-      String stats[] = iter.next().toString().split(",");
-
-      if (stats.length == 1) {
-        long val = Long.parseLong(stats[0], radix);
-        min = Math.min(val, min);
-        max = Math.max(val, max);
-        sum += val;
-        count += 1;
-      } else {
-        min = Math.min(Long.parseLong(stats[0], radix), min);
-        max = Math.max(Long.parseLong(stats[1], radix), max);
-        sum += Long.parseLong(stats[2], radix);
-        count += Long.parseLong(stats[3], radix);
-      }
-    }
-
-    String ret = Long.toString(min, radix) + "," + Long.toString(max, radix) + "," + Long.toString(sum, radix) + "," + Long.toString(count, radix);
-    return new Value(ret.getBytes());
-  }
-
-  @Override
-  public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
-    super.init(source, options, env);
-
-    if (options.containsKey(RADIX_OPTION))
-      radix = Integer.parseInt(options.get(RADIX_OPTION));
-    else
-      radix = 10;
-  }
-
-  @Override
-  public IteratorOptions describeOptions() {
-    IteratorOptions io = super.describeOptions();
-    io.setName("statsCombiner");
-    io.setDescription("Combiner that keeps track of min, max, sum, and count");
-    io.addNamedOption(RADIX_OPTION, "radix/base of the numbers");
-    return io;
-  }
-
-  @Override
-  public boolean validateOptions(Map<String,String> options) {
-    if (!super.validateOptions(options))
-      return false;
-
-    if (options.containsKey(RADIX_OPTION) && !options.get(RADIX_OPTION).matches("\\d+"))
-      throw new IllegalArgumentException("invalid option " + RADIX_OPTION + ":" + options.get(RADIX_OPTION));
-
-    return true;
-  }
-
-  /**
-   * A convenience method for setting the expected base/radix of the numbers
-   *
-   * @param iterConfig
-   *          Iterator settings to configure
-   * @param base
-   *          The expected base/radix of the numbers.
-   */
-  public static void setRadix(IteratorSetting iterConfig, int base) {
-    iterConfig.addOption(RADIX_OPTION, base + "");
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/constraints/AlphaNumKeyConstraint.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/constraints/AlphaNumKeyConstraint.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/constraints/AlphaNumKeyConstraint.java
deleted file mode 100644
index 14e3c8e..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/constraints/AlphaNumKeyConstraint.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.constraints;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.accumulo.core.constraints.Constraint;
-import org.apache.accumulo.core.data.ColumnUpdate;
-import org.apache.accumulo.core.data.Mutation;
-
-/**
- * This class is an accumulo constraint that ensures all fields of a key are alpha numeric.
- *
- * See docs/examples/README.constraint for instructions.
- *
- */
-
-public class AlphaNumKeyConstraint implements Constraint {
-
-  static final short NON_ALPHA_NUM_ROW = 1;
-  static final short NON_ALPHA_NUM_COLF = 2;
-  static final short NON_ALPHA_NUM_COLQ = 3;
-
-  static final String ROW_VIOLATION_MESSAGE = "Row was not alpha numeric";
-  static final String COLF_VIOLATION_MESSAGE = "Column family was not alpha numeric";
-  static final String COLQ_VIOLATION_MESSAGE = "Column qualifier was not alpha numeric";
-
-  private boolean isAlphaNum(byte bytes[]) {
-    for (byte b : bytes) {
-      boolean ok = ((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || (b >= '0' && b <= '9'));
-      if (!ok)
-        return false;
-    }
-
-    return true;
-  }
-
-  private Set<Short> addViolation(Set<Short> violations, short violation) {
-    if (violations == null) {
-      violations = new LinkedHashSet<>();
-      violations.add(violation);
-    } else if (!violations.contains(violation)) {
-      violations.add(violation);
-    }
-    return violations;
-  }
-
-  @Override
-  public List<Short> check(Environment env, Mutation mutation) {
-    Set<Short> violations = null;
-
-    if (!isAlphaNum(mutation.getRow()))
-      violations = addViolation(violations, NON_ALPHA_NUM_ROW);
-
-    Collection<ColumnUpdate> updates = mutation.getUpdates();
-    for (ColumnUpdate columnUpdate : updates) {
-      if (!isAlphaNum(columnUpdate.getColumnFamily()))
-        violations = addViolation(violations, NON_ALPHA_NUM_COLF);
-
-      if (!isAlphaNum(columnUpdate.getColumnQualifier()))
-        violations = addViolation(violations, NON_ALPHA_NUM_COLQ);
-    }
-
-    return null == violations ? null : new ArrayList<>(violations);
-  }
-
-  @Override
-  public String getViolationDescription(short violationCode) {
-
-    switch (violationCode) {
-      case NON_ALPHA_NUM_ROW:
-        return ROW_VIOLATION_MESSAGE;
-      case NON_ALPHA_NUM_COLF:
-        return COLF_VIOLATION_MESSAGE;
-      case NON_ALPHA_NUM_COLQ:
-        return COLQ_VIOLATION_MESSAGE;
-    }
-
-    return null;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/constraints/MaxMutationSize.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/constraints/MaxMutationSize.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/constraints/MaxMutationSize.java
deleted file mode 100644
index 3d94861..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/constraints/MaxMutationSize.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.constraints;
-
-import java.util.Collections;
-import java.util.List;
-
-import org.apache.accumulo.core.constraints.Constraint;
-import org.apache.accumulo.core.data.Mutation;
-
-/**
- * Ensure that mutations are a reasonable size: we must be able to fit several in memory at a time.
- *
- */
-public class MaxMutationSize implements Constraint {
-  static final long MAX_SIZE = Runtime.getRuntime().maxMemory() >> 8;
-  static final List<Short> empty = Collections.emptyList();
-  static final List<Short> violations = Collections.singletonList(Short.valueOf((short) 0));
-
-  @Override
-  public String getViolationDescription(short violationCode) {
-    return String.format("mutation exceeded maximum size of %d", MAX_SIZE);
-  }
-
-  @Override
-  public List<Short> check(Environment env, Mutation mutation) {
-    if (mutation.estimatedMemoryUsed() < MAX_SIZE)
-      return empty;
-    return violations;
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/constraints/NumericValueConstraint.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/constraints/NumericValueConstraint.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/constraints/NumericValueConstraint.java
deleted file mode 100644
index 2b22e6b..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/constraints/NumericValueConstraint.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.constraints;
-
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-
-import org.apache.accumulo.core.constraints.Constraint;
-import org.apache.accumulo.core.data.ColumnUpdate;
-import org.apache.accumulo.core.data.Mutation;
-
-/**
- * This class is an accumulo constraint that ensures values are numeric strings. See docs/examples/README.constraint for instructions.
- */
-public class NumericValueConstraint implements Constraint {
-
-  static final short NON_NUMERIC_VALUE = 1;
-  static final String VIOLATION_MESSAGE = "Value is not numeric";
-
-  private static final List<Short> VIOLATION_LIST = Collections.unmodifiableList(Arrays.asList(NON_NUMERIC_VALUE));
-
-  private boolean isNumeric(byte bytes[]) {
-    for (byte b : bytes) {
-      boolean ok = (b >= '0' && b <= '9');
-      if (!ok)
-        return false;
-    }
-
-    return true;
-  }
-
-  @Override
-  public List<Short> check(Environment env, Mutation mutation) {
-    Collection<ColumnUpdate> updates = mutation.getUpdates();
-
-    for (ColumnUpdate columnUpdate : updates) {
-      if (!isNumeric(columnUpdate.getValue()))
-        return VIOLATION_LIST;
-    }
-
-    return null;
-  }
-
-  @Override
-  public String getViolationDescription(short violationCode) {
-
-    switch (violationCode) {
-      case NON_NUMERIC_VALUE:
-        return "Value is not numeric";
-    }
-
-    return null;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/FileCount.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/FileCount.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/FileCount.java
deleted file mode 100644
index 111fae0..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/FileCount.java
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.dirlist;
-
-import java.util.Iterator;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ClientOnRequiredTable;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.hadoop.io.Text;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * Computes recursive counts over file system information and stores them back into the same Accumulo table. See docs/examples/README.dirlist for instructions.
- */
-public class FileCount {
-
-  private int entriesScanned;
-  private int inserts;
-
-  private Opts opts;
-  private ScannerOpts scanOpts;
-  private BatchWriterOpts bwOpts;
-
-  private static class CountValue {
-    int dirCount = 0;
-    int fileCount = 0;
-    int recursiveDirCount = 0;
-    int recusiveFileCount = 0;
-
-    void set(Value val) {
-      String sa[] = val.toString().split(",");
-      dirCount = Integer.parseInt(sa[0]);
-      fileCount = Integer.parseInt(sa[1]);
-      recursiveDirCount = Integer.parseInt(sa[2]);
-      recusiveFileCount = Integer.parseInt(sa[3]);
-    }
-
-    Value toValue() {
-      return new Value((dirCount + "," + fileCount + "," + recursiveDirCount + "," + recusiveFileCount).getBytes());
-    }
-
-    void incrementFiles() {
-      fileCount++;
-      recusiveFileCount++;
-    }
-
-    void incrementDirs() {
-      dirCount++;
-      recursiveDirCount++;
-    }
-
-    public void clear() {
-      dirCount = 0;
-      fileCount = 0;
-      recursiveDirCount = 0;
-      recusiveFileCount = 0;
-    }
-
-    public void incrementRecursive(CountValue other) {
-      recursiveDirCount += other.recursiveDirCount;
-      recusiveFileCount += other.recusiveFileCount;
-    }
-  }
-
-  private int findMaxDepth(Scanner scanner, int min, int max) {
-    int mid = min + (max - min) / 2;
-    return findMaxDepth(scanner, min, mid, max);
-  }
-
-  private int findMaxDepth(Scanner scanner, int min, int mid, int max) {
-    // check to see if the mid point exist
-    if (max < min)
-      return -1;
-
-    scanner.setRange(new Range(String.format("%03d", mid), true, String.format("%03d", mid + 1), false));
-
-    if (scanner.iterator().hasNext()) {
-      // this depth exist, check to see if a larger depth exist
-      int ret = findMaxDepth(scanner, mid + 1, max);
-      if (ret == -1)
-        return mid; // this must the max
-      else
-        return ret;
-    } else {
-      // this depth does not exist, look lower
-      return findMaxDepth(scanner, min, mid - 1);
-    }
-
-  }
-
-  private int findMaxDepth(Scanner scanner) {
-    // do binary search to find max depth
-    int origBatchSize = scanner.getBatchSize();
-    scanner.setBatchSize(100);
-    int depth = findMaxDepth(scanner, 0, 64, 999);
-    scanner.setBatchSize(origBatchSize);
-    return depth;
-  }
-
-  // find the count column and consume a row
-  private Entry<Key,Value> findCount(Entry<Key,Value> entry, Iterator<Entry<Key,Value>> iterator, CountValue cv) {
-
-    Key key = entry.getKey();
-    Text currentRow = key.getRow();
-
-    if (key.compareColumnQualifier(QueryUtil.COUNTS_COLQ) == 0)
-      cv.set(entry.getValue());
-
-    while (iterator.hasNext()) {
-      entry = iterator.next();
-      entriesScanned++;
-      key = entry.getKey();
-
-      if (key.compareRow(currentRow) != 0)
-        return entry;
-
-      if (key.compareColumnFamily(QueryUtil.DIR_COLF) == 0 && key.compareColumnQualifier(QueryUtil.COUNTS_COLQ) == 0) {
-        cv.set(entry.getValue());
-      }
-
-    }
-
-    return null;
-  }
-
-  private Entry<Key,Value> consumeRow(Entry<Key,Value> entry, Iterator<Entry<Key,Value>> iterator) {
-    Key key = entry.getKey();
-    Text currentRow = key.getRow();
-
-    while (iterator.hasNext()) {
-      entry = iterator.next();
-      entriesScanned++;
-      key = entry.getKey();
-
-      if (key.compareRow(currentRow) != 0)
-        return entry;
-    }
-
-    return null;
-  }
-
-  private String extractDir(Key key) {
-    String row = key.getRowData().toString();
-    return row.substring(3, row.lastIndexOf('/'));
-  }
-
-  private Mutation createMutation(int depth, String dir, CountValue countVal) {
-    Mutation m = new Mutation(String.format("%03d%s", depth, dir));
-    m.put(QueryUtil.DIR_COLF, QueryUtil.COUNTS_COLQ, opts.visibility, countVal.toValue());
-    return m;
-  }
-
-  private void calculateCounts(Scanner scanner, int depth, BatchWriter batchWriter) throws Exception {
-
-    scanner.setRange(new Range(String.format("%03d", depth), true, String.format("%03d", depth + 1), false));
-
-    CountValue countVal = new CountValue();
-
-    Iterator<Entry<Key,Value>> iterator = scanner.iterator();
-
-    String currentDir = null;
-
-    Entry<Key,Value> entry = null;
-    if (iterator.hasNext()) {
-      entry = iterator.next();
-      entriesScanned++;
-    }
-
-    while (entry != null) {
-      Key key = entry.getKey();
-
-      String dir = extractDir(key);
-
-      if (currentDir == null) {
-        currentDir = dir;
-      } else if (!currentDir.equals(dir)) {
-        batchWriter.addMutation(createMutation(depth - 1, currentDir, countVal));
-        inserts++;
-        currentDir = dir;
-        countVal.clear();
-      }
-
-      // process a whole row
-      if (key.compareColumnFamily(QueryUtil.DIR_COLF) == 0) {
-        CountValue tmpCount = new CountValue();
-        entry = findCount(entry, iterator, tmpCount);
-
-        if (tmpCount.dirCount == 0 && tmpCount.fileCount == 0) {
-          // in this case the higher depth will not insert anything if the
-          // dir has no children, so insert something here
-          Mutation m = new Mutation(key.getRow());
-          m.put(QueryUtil.DIR_COLF, QueryUtil.COUNTS_COLQ, opts.visibility, tmpCount.toValue());
-          batchWriter.addMutation(m);
-          inserts++;
-        }
-
-        countVal.incrementRecursive(tmpCount);
-        countVal.incrementDirs();
-      } else {
-        entry = consumeRow(entry, iterator);
-        countVal.incrementFiles();
-      }
-    }
-
-    if (currentDir != null) {
-      batchWriter.addMutation(createMutation(depth - 1, currentDir, countVal));
-      inserts++;
-    }
-  }
-
-  public FileCount(Opts opts, ScannerOpts scanOpts, BatchWriterOpts bwOpts) throws Exception {
-    this.opts = opts;
-    this.scanOpts = scanOpts;
-    this.bwOpts = bwOpts;
-  }
-
-  public void run() throws Exception {
-
-    entriesScanned = 0;
-    inserts = 0;
-
-    Connector conn = opts.getConnector();
-    Scanner scanner = conn.createScanner(opts.getTableName(), opts.auths);
-    scanner.setBatchSize(scanOpts.scanBatchSize);
-    BatchWriter bw = conn.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
-
-    long t1 = System.currentTimeMillis();
-
-    int depth = findMaxDepth(scanner);
-
-    long t2 = System.currentTimeMillis();
-
-    for (int d = depth; d > 0; d--) {
-      calculateCounts(scanner, d, bw);
-      // must flush so next depth can read what prev depth wrote
-      bw.flush();
-    }
-
-    bw.close();
-
-    long t3 = System.currentTimeMillis();
-
-    System.out.printf("Max depth              : %d%n", depth);
-    System.out.printf("Time to find max depth : %,d ms%n", (t2 - t1));
-    System.out.printf("Time to compute counts : %,d ms%n", (t3 - t2));
-    System.out.printf("Entries scanned        : %,d %n", entriesScanned);
-    System.out.printf("Counts inserted        : %,d %n", inserts);
-  }
-
-  public static class Opts extends ClientOnRequiredTable {
-    @Parameter(names = "--vis", description = "use a given visibility for the new counts", converter = VisibilityConverter.class)
-    ColumnVisibility visibility = new ColumnVisibility();
-  }
-
-  public static void main(String[] args) throws Exception {
-    Opts opts = new Opts();
-    ScannerOpts scanOpts = new ScannerOpts();
-    BatchWriterOpts bwOpts = new BatchWriterOpts();
-    String programName = FileCount.class.getName();
-    opts.parseArgs(programName, args, scanOpts, bwOpts);
-
-    FileCount fileCount = new FileCount(opts, scanOpts, bwOpts);
-    fileCount.run();
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/8e0f19a1/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/Ingest.java
----------------------------------------------------------------------
diff --git a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/Ingest.java b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/Ingest.java
deleted file mode 100644
index c0808fe..0000000
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/dirlist/Ingest.java
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.examples.simple.dirlist;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ClientOpts;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.LongCombiner;
-import org.apache.accumulo.core.iterators.TypedValueCombiner.Encoder;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.examples.simple.filedata.ChunkCombiner;
-import org.apache.accumulo.examples.simple.filedata.FileDataIngest;
-import org.apache.hadoop.io.Text;
-
-import com.beust.jcommander.Parameter;
-
-/**
- * Recursively lists the files and directories under a given path, ingests their names and file info into one Accumulo table, indexes the file names in a
- * separate table, and the file data into a third table. See docs/examples/README.dirlist for instructions.
- */
-public class Ingest {
-  static final Value nullValue = new Value(new byte[0]);
-  public static final String LENGTH_CQ = "length";
-  public static final String HIDDEN_CQ = "hidden";
-  public static final String EXEC_CQ = "exec";
-  public static final String LASTMOD_CQ = "lastmod";
-  public static final String HASH_CQ = "md5";
-  public static final Encoder<Long> encoder = LongCombiner.FIXED_LEN_ENCODER;
-
-  public static Mutation buildMutation(ColumnVisibility cv, String path, boolean isDir, boolean isHidden, boolean canExec, long length, long lastmod,
-      String hash) {
-    if (path.equals("/"))
-      path = "";
-    Mutation m = new Mutation(QueryUtil.getRow(path));
-    Text colf = null;
-    if (isDir)
-      colf = QueryUtil.DIR_COLF;
-    else
-      colf = new Text(encoder.encode(Long.MAX_VALUE - lastmod));
-    m.put(colf, new Text(LENGTH_CQ), cv, new Value(Long.toString(length).getBytes()));
-    m.put(colf, new Text(HIDDEN_CQ), cv, new Value(Boolean.toString(isHidden).getBytes()));
-    m.put(colf, new Text(EXEC_CQ), cv, new Value(Boolean.toString(canExec).getBytes()));
-    m.put(colf, new Text(LASTMOD_CQ), cv, new Value(Long.toString(lastmod).getBytes()));
-    if (hash != null && hash.length() > 0)
-      m.put(colf, new Text(HASH_CQ), cv, new Value(hash.getBytes()));
-    return m;
-  }
-
-  private static void ingest(File src, ColumnVisibility cv, BatchWriter dirBW, BatchWriter indexBW, FileDataIngest fdi, BatchWriter data) throws Exception {
-    // build main table entry
-    String path = null;
-    try {
-      path = src.getCanonicalPath();
-    } catch (IOException e) {
-      path = src.getAbsolutePath();
-    }
-    System.out.println(path);
-
-    String hash = null;
-    if (!src.isDirectory()) {
-      try {
-        hash = fdi.insertFileData(path, data);
-      } catch (Exception e) {
-        // if something goes wrong, just skip this one
-        return;
-      }
-    }
-
-    dirBW.addMutation(buildMutation(cv, path, src.isDirectory(), src.isHidden(), src.canExecute(), src.length(), src.lastModified(), hash));
-
-    // build index table entries
-    Text row = QueryUtil.getForwardIndex(path);
-    if (row != null) {
-      Text p = new Text(QueryUtil.getRow(path));
-      Mutation m = new Mutation(row);
-      m.put(QueryUtil.INDEX_COLF, p, cv, nullValue);
-      indexBW.addMutation(m);
-
-      row = QueryUtil.getReverseIndex(path);
-      m = new Mutation(row);
-      m.put(QueryUtil.INDEX_COLF, p, cv, nullValue);
-      indexBW.addMutation(m);
-    }
-  }
-
-  private static void recurse(File src, ColumnVisibility cv, BatchWriter dirBW, BatchWriter indexBW, FileDataIngest fdi, BatchWriter data) throws Exception {
-    // ingest this File
-    ingest(src, cv, dirBW, indexBW, fdi, data);
-    // recurse into subdirectories
-    if (src.isDirectory()) {
-      File[] files = src.listFiles();
-      if (files == null)
-        return;
-      for (File child : files) {
-        recurse(child, cv, dirBW, indexBW, fdi, data);
-      }
-    }
-  }
-
-  static class Opts extends ClientOpts {
-    @Parameter(names = "--dirTable", description = "a table to hold the directory information")
-    String nameTable = "dirTable";
-    @Parameter(names = "--indexTable", description = "an index over the ingested data")
-    String indexTable = "indexTable";
-    @Parameter(names = "--dataTable", description = "the file data, chunked into parts")
-    String dataTable = "dataTable";
-    @Parameter(names = "--vis", description = "the visibility to mark the data", converter = VisibilityConverter.class)
-    ColumnVisibility visibility = new ColumnVisibility();
-    @Parameter(names = "--chunkSize", description = "the size of chunks when breaking down files")
-    int chunkSize = 100000;
-    @Parameter(description = "<dir> { <dir> ... }")
-    List<String> directories = new ArrayList<>();
-  }
-
-  public static void main(String[] args) throws Exception {
-    Opts opts = new Opts();
-    BatchWriterOpts bwOpts = new BatchWriterOpts();
-    opts.parseArgs(Ingest.class.getName(), args, bwOpts);
-
-    Connector conn = opts.getConnector();
-    if (!conn.tableOperations().exists(opts.nameTable))
-      conn.tableOperations().create(opts.nameTable);
-    if (!conn.tableOperations().exists(opts.indexTable))
-      conn.tableOperations().create(opts.indexTable);
-    if (!conn.tableOperations().exists(opts.dataTable)) {
-      conn.tableOperations().create(opts.dataTable);
-      conn.tableOperations().attachIterator(opts.dataTable, new IteratorSetting(1, ChunkCombiner.class));
-    }
-
-    BatchWriter dirBW = conn.createBatchWriter(opts.nameTable, bwOpts.getBatchWriterConfig());
-    BatchWriter indexBW = conn.createBatchWriter(opts.indexTable, bwOpts.getBatchWriterConfig());
-    BatchWriter dataBW = conn.createBatchWriter(opts.dataTable, bwOpts.getBatchWriterConfig());
-    FileDataIngest fdi = new FileDataIngest(opts.chunkSize, opts.visibility);
-    for (String dir : opts.directories) {
-      recurse(new File(dir), opts.visibility, dirBW, indexBW, fdi, dataBW);
-
-      // fill in parent directory info
-      int slashIndex = -1;
-      while ((slashIndex = dir.lastIndexOf("/")) > 0) {
-        dir = dir.substring(0, slashIndex);
-        ingest(new File(dir), opts.visibility, dirBW, indexBW, fdi, dataBW);
-      }
-    }
-    ingest(new File("/"), opts.visibility, dirBW, indexBW, fdi, dataBW);
-
-    dirBW.close();
-    indexBW.close();
-    dataBW.close();
-  }
-}