You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ct...@apache.org on 2019/04/12 01:45:16 UTC

[accumulo-website] 02/02: Jekyll build from master:4ad5b56

This is an automated email from the ASF dual-hosted git repository.

ctubbsii pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/accumulo-website.git

commit c627ec700d68289c9da99b428151fac3e6420089
Author: Christopher Tubbs <ct...@apache.org>
AuthorDate: Thu Apr 11 21:43:17 2019 -0400

    Jekyll build from master:4ad5b56
    
    Update 1.8/1.9 examples
    
    * Redirect to identical examples
    * Fix broken links to unrendered markdown files by adding front-matter
    * Update example index to include two previously unreferenced examples
    * Update two previously unreferenced examples to have a correct title
---
 1.8/examples/batch.html                            |  11 +
 1.8/examples/batch.md                              |  55 ---
 1.8/examples/bloom.html                            |  11 +
 1.8/examples/bloom.md                              | 219 ------------
 1.8/examples/bulkIngest.html                       |  11 +
 1.8/examples/bulkIngest.md                         |  33 --
 1.8/examples/classpath.html                        |  11 +
 1.8/examples/classpath.md                          |  68 ----
 1.8/examples/client.html                           |  11 +
 1.8/examples/client.md                             |  79 -----
 1.8/examples/combiner.html                         |  11 +
 1.8/examples/combiner.md                           |  70 ----
 1.8/examples/constraints.html                      |  11 +
 1.8/examples/constraints.md                        |  54 ---
 1.8/examples/dirlist.html                          |  11 +
 1.8/examples/dirlist.md                            | 114 -------
 1.8/examples/export.html                           |  11 +
 1.8/examples/export.md                             |  91 -----
 1.8/examples/filedata.html                         |  11 +
 1.8/examples/filedata.md                           |  47 ---
 1.8/examples/filter.html                           |  11 +
 1.8/examples/filter.md                             | 110 ------
 1.8/examples/helloworld.html                       |  11 +
 1.8/examples/helloworld.md                         |  47 ---
 1.8/examples/index.html                            |   4 +
 1.8/examples/isolation.html                        |  11 +
 1.8/examples/isolation.md                          |  50 ---
 1.8/examples/mapred.html                           |  11 +
 1.8/examples/mapred.md                             | 154 ---------
 1.8/examples/maxmutation.html                      |  11 +
 1.8/examples/maxmutation.md                        |  49 ---
 1.8/examples/regex.html                            |  11 +
 1.8/examples/regex.md                              |  57 ----
 1.8/examples/reservations.html                     |  11 +
 1.8/examples/reservations.md                       |  66 ----
 1.8/examples/rgbalancer.html                       |  11 +
 1.8/examples/rgbalancer.md                         | 159 ---------
 1.8/examples/rowhash.html                          |  11 +
 1.8/examples/rowhash.md                            |  59 ----
 1.8/examples/sample.html                           |  11 +
 1.8/examples/sample.md                             | 192 -----------
 1.8/examples/shard.html                            |  11 +
 1.8/examples/shard.md                              |  67 ----
 1.8/examples/tabletofile.html                      |  11 +
 1.8/examples/tabletofile.md                        |  59 ----
 1.8/examples/terasort.html                         |  11 +
 1.8/examples/terasort.md                           |  50 ---
 1.8/examples/visibility.html                       |  11 +
 1.8/examples/visibility.md                         | 131 -------
 1.8/examples/index.html => 1.9/examples/batch.html | 105 ++----
 1.9/examples/batch.md                              |  55 ---
 1.9/examples/bloom.html                            | 378 +++++++++++++++++++++
 1.9/examples/bloom.md                              | 219 ------------
 .../index.html => 1.9/examples/bulkIngest.html     | 102 +-----
 1.9/examples/bulkIngest.md                         |  33 --
 .../index.html => 1.9/examples/classpath.html      | 110 +++---
 1.9/examples/classpath.md                          |  68 ----
 1.9/examples/{index.html => client.html}           | 150 ++++----
 1.9/examples/client.md                             |  79 -----
 .../index.html => 1.9/examples/combiner.html       | 141 ++++----
 1.9/examples/combiner.md                           |  70 ----
 .../index.html => 1.9/examples/constraints.html    | 123 +++----
 1.9/examples/constraints.md                        |  54 ---
 1.9/examples/dirlist.html                          | 279 +++++++++++++++
 1.9/examples/dirlist.md                            | 114 -------
 .../index.html => 1.9/examples/export.html         | 160 +++++----
 1.9/examples/export.md                             |  91 -----
 .../index.html => 1.9/examples/filedata.html       | 109 ++----
 1.9/examples/filedata.md                           |  47 ---
 1.9/examples/{index.html => filter.html}           | 182 +++++-----
 1.9/examples/filter.md                             | 110 ------
 .../index.html => 1.9/examples/helloworld.html     |  97 ++----
 1.9/examples/helloworld.md                         |  47 ---
 1.9/examples/index.html                            |   4 +
 .../index.html => 1.9/examples/isolation.html      | 116 ++-----
 1.9/examples/isolation.md                          |  50 ---
 1.9/examples/{index.html => mapred.html}           | 221 +++++++-----
 1.9/examples/mapred.md                             | 154 ---------
 .../index.html => 1.9/examples/maxmutation.html    | 116 ++-----
 1.9/examples/maxmutation.md                        |  49 ---
 1.8/examples/index.html => 1.9/examples/regex.html | 125 +++----
 1.9/examples/regex.md                              |  57 ----
 .../index.html => 1.9/examples/reservations.html   | 134 +++-----
 1.9/examples/reservations.md                       |  66 ----
 1.9/examples/{index.html => rgbalancer.html}       | 233 ++++++++-----
 1.9/examples/rgbalancer.md                         | 159 ---------
 .../index.html => 1.9/examples/rowhash.html        | 128 +++----
 1.9/examples/rowhash.md                            |  59 ----
 1.9/examples/sample.html                           | 347 +++++++++++++++++++
 1.9/examples/sample.md                             | 192 -----------
 1.8/examples/index.html => 1.9/examples/shard.html | 142 ++++----
 1.9/examples/shard.md                              |  67 ----
 .../index.html => 1.9/examples/tabletofile.html    | 128 +++----
 1.9/examples/tabletofile.md                        |  59 ----
 .../index.html => 1.9/examples/terasort.html       | 120 ++-----
 1.9/examples/terasort.md                           |  50 ---
 1.9/examples/{index.html => visibility.html}       | 205 ++++++-----
 1.9/examples/visibility.md                         | 131 -------
 feed.xml                                           |   4 +-
 redirects.json                                     |   2 +-
 100 files changed, 2523 insertions(+), 5866 deletions(-)

diff --git a/1.8/examples/batch.html b/1.8/examples/batch.html
new file mode 100644
index 0000000..85b03bb
--- /dev/null
+++ b/1.8/examples/batch.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html>
+<html lang="en-US">
+  <meta charset="utf-8">
+  <title>Redirecting&hellip;</title>
+  <link rel="canonical" href="https://accumulo.apache.org/1.9/examples/batch.html">
+  <meta http-equiv="refresh" content="0; url=https://accumulo.apache.org/1.9/examples/batch.html">
+  <meta name="robots" content="noindex">
+  <h1>Redirecting&hellip;</h1>
+  <a href="https://accumulo.apache.org/1.9/examples/batch.html">Click here if you are not redirected.</a>
+  <script>location="https://accumulo.apache.org/1.9/examples/batch.html"</script>
+</html>
diff --git a/1.8/examples/batch.md b/1.8/examples/batch.md
deleted file mode 100644
index 463481b..0000000
--- a/1.8/examples/batch.md
+++ /dev/null
@@ -1,55 +0,0 @@
-Title: Apache Accumulo Batch Writing and Scanning Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This tutorial uses the following Java classes, which can be found in org.apache.accumulo.examples.simple.client in the examples-simple module:
-
- * SequentialBatchWriter.java - writes mutations with sequential rows and random values
- * RandomBatchWriter.java - used by SequentialBatchWriter to generate random values
- * RandomBatchScanner.java - reads random rows and verifies their values
-
-This is an example of how to use the batch writer and batch scanner. To compile
-the example, run maven and copy the produced jar into the accumulo lib dir.
-This is already done in the tar distribution.
-
-Below are commands that add 10000 entries to accumulo and then do 100 random
-queries. The write command generates random 50 byte values.
-
-Be sure to use the name of your instance (given as instance here) and the appropriate
-list of zookeeper nodes (given as zookeepers here).
-
-Before you run this, you must ensure that the user you are running has the
-"exampleVis" authorization. (you can set this in the shell with "setauths -u username -s exampleVis")
-
-    $ ./bin/accumulo shell -u root -e "setauths -u username -s exampleVis"
-
-You must also create the table, batchtest1, ahead of time. (In the shell, use "createtable batchtest1")
-
-    $ ./bin/accumulo shell -u username -e "createtable batchtest1"
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.SequentialBatchWriter -i instance -z zookeepers -u username -p password -t batchtest1 --start 0 --num 10000 --size 50 --batchMemory 20M --batchLatency 500 --batchThreads 20 --vis exampleVis
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner -i instance -z zookeepers -u username -p password -t batchtest1 --num 100 --min 0 --max 10000 --size 50 --scanThreads 20 --auths exampleVis
-    07 11:33:11,103 [client.CountingVerifyingReceiver] INFO : Generating 100 random queries...
-    07 11:33:11,112 [client.CountingVerifyingReceiver] INFO : finished
-    07 11:33:11,260 [client.CountingVerifyingReceiver] INFO : 694.44 lookups/sec   0.14 secs
-
-    07 11:33:11,260 [client.CountingVerifyingReceiver] INFO : num results : 100
-
-    07 11:33:11,364 [client.CountingVerifyingReceiver] INFO : Generating 100 random queries...
-    07 11:33:11,370 [client.CountingVerifyingReceiver] INFO : finished
-    07 11:33:11,416 [client.CountingVerifyingReceiver] INFO : 2173.91 lookups/sec   0.05 secs
-
-    07 11:33:11,416 [client.CountingVerifyingReceiver] INFO : num results : 100
diff --git a/1.8/examples/bloom.html b/1.8/examples/bloom.html
new file mode 100644
index 0000000..0a77741
--- /dev/null
+++ b/1.8/examples/bloom.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html>
+<html lang="en-US">
+  <meta charset="utf-8">
+  <title>Redirecting&hellip;</title>
+  <link rel="canonical" href="https://accumulo.apache.org/1.9/examples/bloom.html">
+  <meta http-equiv="refresh" content="0; url=https://accumulo.apache.org/1.9/examples/bloom.html">
+  <meta name="robots" content="noindex">
+  <h1>Redirecting&hellip;</h1>
+  <a href="https://accumulo.apache.org/1.9/examples/bloom.html">Click here if you are not redirected.</a>
+  <script>location="https://accumulo.apache.org/1.9/examples/bloom.html"</script>
+</html>
diff --git a/1.8/examples/bloom.md b/1.8/examples/bloom.md
deleted file mode 100644
index 555f06d..0000000
--- a/1.8/examples/bloom.md
+++ /dev/null
@@ -1,219 +0,0 @@
-Title: Apache Accumulo Bloom Filter Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This example shows how to create a table with bloom filters enabled.  It also
-shows how bloom filters increase query performance when looking for values that
-do not exist in a table.
-
-Below table named bloom_test is created and bloom filters are enabled.
-
-    $ ./bin/accumulo shell -u username -p password
-    Shell - Apache Accumulo Interactive Shell
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> setauths -u username -s exampleVis
-    username@instance> createtable bloom_test
-    username@instance bloom_test> config -t bloom_test -s table.bloom.enabled=true
-    username@instance bloom_test> exit
-
-Below 1 million random values are inserted into accumulo. The randomly
-generated rows range between 0 and 1 billion. The random number generator is
-initialized with the seed 7.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 7 -i instance -z zookeepers -u username -p password -t bloom_test --num 1000000 --min 0 --max 1000000000 --size 50 --batchMemory 2M --batchLatency 60s --batchThreads 3 --vis exampleVis
-
-Below the table is flushed:
-
-    $ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test -w'
-    05 10:40:06,069 [shell.Shell] INFO : Flush of table bloom_test completed.
-
-After the flush completes, 500 random queries are done against the table. The
-same seed is used to generate the queries, therefore everything is found in the
-table.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner --seed 7 -i instance -z zookeepers -u username -p password -t bloom_test --num 500 --min 0 --max 1000000000 --size 50 --scanThreads 20 --auths exampleVis
-    Generating 500 random queries...finished
-    96.19 lookups/sec   5.20 secs
-    num results : 500
-    Generating 500 random queries...finished
-    102.35 lookups/sec   4.89 secs
-    num results : 500
-
-Below another 500 queries are performed, using a different seed which results
-in nothing being found. In this case the lookups are much faster because of
-the bloom filters.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner --seed 8 -i instance -z zookeepers -u username -p password -t bloom_test --num 500 --min 0 --max 1000000000 --size 50 -batchThreads 20 -auths exampleVis
-    Generating 500 random queries...finished
-    2212.39 lookups/sec   0.23 secs
-    num results : 0
-    Did not find 500 rows
-    Generating 500 random queries...finished
-    4464.29 lookups/sec   0.11 secs
-    num results : 0
-    Did not find 500 rows
-
-********************************************************************************
-
-Bloom filters can also speed up lookups for entries that exist. In accumulo
-data is divided into tablets and each tablet has multiple map files. Every
-lookup in accumulo goes to a specific tablet where a lookup is done on each
-map file in the tablet. So if a tablet has three map files, lookup performance
-can be three times slower than a tablet with one map file. However if the map
-files contain unique sets of data, then bloom filters can help eliminate map
-files that do not contain the row being looked up. To illustrate this two
-identical tables were created using the following process. One table had bloom
-filters, the other did not. Also the major compaction ratio was increased to
-prevent the files from being compacted into one file.
-
- * Insert 1 million entries using  RandomBatchWriter with a seed of 7
- * Flush the table using the shell
- * Insert 1 million entries using  RandomBatchWriter with a seed of 8
- * Flush the table using the shell
- * Insert 1 million entries using  RandomBatchWriter with a seed of 9
- * Flush the table using the shell
-
-After following the above steps, each table will have a tablet with three map
-files. Flushing the table after each batch of inserts will create a map file.
-Each map file will contain 1 million entries generated with a different seed.
-This is assuming that Accumulo is configured with enough memory to hold 1
-million inserts. If not, then more map files will be created.
-
-The commands for creating the first table without bloom filters are below.
-
-    $ ./bin/accumulo shell -u username -p password
-    Shell - Apache Accumulo Interactive Shell
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> setauths -u username -s exampleVis
-    username@instance> createtable bloom_test1
-    username@instance bloom_test1> config -t bloom_test1 -s table.compaction.major.ratio=7
-    username@instance bloom_test1> exit
-
-    $ ARGS="-i instance -z zookeepers -u username -p password -t bloom_test1 --num 1000000 --min 0 --max 1000000000 --size 50 --batchMemory 2M --batchLatency 60s --batchThreads 3 --vis exampleVis"
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 7 $ARGS
-    $ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test1 -w'
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 8 $ARGS
-    $ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test1 -w'
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 9 $ARGS
-    $ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test1 -w'
-
-The commands for creating the second table with bloom filers are below.
-
-    $ ./bin/accumulo shell -u username -p password
-    Shell - Apache Accumulo Interactive Shell
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> setauths -u username -s exampleVis
-    username@instance> createtable bloom_test2
-    username@instance bloom_test2> config -t bloom_test2 -s table.compaction.major.ratio=7
-    username@instance bloom_test2> config -t bloom_test2 -s table.bloom.enabled=true
-    username@instance bloom_test2> exit
-
-    $ ARGS="-i instance -z zookeepers -u username -p password -t bloom_test2 --num 1000000 --min 0 --max 1000000000 --size 50 --batchMemory 2M --batchLatency 60s --batchThreads 3 --vis exampleVis"
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 7 $ARGS
-    $ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test2 -w'
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 8 $ARGS
-    $ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test2 -w'
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 9 $ARGS
-    $ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test2 -w'
-
-Below 500 lookups are done against the table without bloom filters using random
-NG seed 7. Even though only one map file will likely contain entries for this
-seed, all map files will be interrogated.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner --seed 7 -i instance -z zookeepers -u username -p password -t bloom_test1 --num 500 --min 0 --max 1000000000 --size 50 --scanThreads 20 --auths exampleVis
-    Generating 500 random queries...finished
-    35.09 lookups/sec  14.25 secs
-    num results : 500
-    Generating 500 random queries...finished
-    35.33 lookups/sec  14.15 secs
-    num results : 500
-
-Below the same lookups are done against the table with bloom filters. The
-lookups were 2.86 times faster because only one map file was used, even though three
-map files existed.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner --seed 7 -i instance -z zookeepers -u username -p password -t bloom_test2 --num 500 --min 0 --max 1000000000 --size 50 -scanThreads 20 --auths exampleVis
-    Generating 500 random queries...finished
-    99.03 lookups/sec   5.05 secs
-    num results : 500
-    Generating 500 random queries...finished
-    101.15 lookups/sec   4.94 secs
-    num results : 500
-
-You can verify the table has three files by looking in HDFS. To look in HDFS
-you will need the table ID, because this is used in HDFS instead of the table
-name. The following command will show table ids.
-
-    $ ./bin/accumulo shell -u username -p password -e 'tables -l'
-    accumulo.metadata    =>        !0
-    accumulo.root        =>        +r
-    bloom_test1          =>        o7
-    bloom_test2          =>        o8
-    trace                =>         1
-
-So the table id for bloom_test2 is o8. The command below shows what files this
-table has in HDFS. This assumes Accumulo is at the default location in HDFS.
-
-    $ hadoop fs -lsr /accumulo/tables/o8
-    drwxr-xr-x   - username supergroup          0 2012-01-10 14:02 /accumulo/tables/o8/default_tablet
-    -rw-r--r--   3 username supergroup   52672650 2012-01-10 14:01 /accumulo/tables/o8/default_tablet/F00000dj.rf
-    -rw-r--r--   3 username supergroup   52436176 2012-01-10 14:01 /accumulo/tables/o8/default_tablet/F00000dk.rf
-    -rw-r--r--   3 username supergroup   52850173 2012-01-10 14:02 /accumulo/tables/o8/default_tablet/F00000dl.rf
-
-Running the rfile-info command shows that one of the files has a bloom filter
-and its 1.5MB.
-
-    $ ./bin/accumulo rfile-info /accumulo/tables/o8/default_tablet/F00000dj.rf
-    Locality group         : <DEFAULT>
-	Start block          : 0
-	Num   blocks         : 752
-	Index level 0        : 43,598 bytes  1 blocks
-	First key            : row_0000001169 foo:1 [exampleVis] 1326222052539 false
-	Last key             : row_0999999421 foo:1 [exampleVis] 1326222052058 false
-	Num entries          : 999,536
-	Column families      : [foo]
-
-    Meta block     : BCFile.index
-      Raw size             : 4 bytes
-      Compressed size      : 12 bytes
-      Compression type     : gz
-
-    Meta block     : RFile.index
-      Raw size             : 43,696 bytes
-      Compressed size      : 15,592 bytes
-      Compression type     : gz
-
-    Meta block     : acu_bloom
-      Raw size             : 1,540,292 bytes
-      Compressed size      : 1,433,115 bytes
-      Compression type     : gz
-
diff --git a/1.8/examples/bulkIngest.html b/1.8/examples/bulkIngest.html
new file mode 100644
index 0000000..04d5271
--- /dev/null
+++ b/1.8/examples/bulkIngest.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html>
+<html lang="en-US">
+  <meta charset="utf-8">
+  <title>Redirecting&hellip;</title>
+  <link rel="canonical" href="https://accumulo.apache.org/1.9/examples/bulkIngest.html">
+  <meta http-equiv="refresh" content="0; url=https://accumulo.apache.org/1.9/examples/bulkIngest.html">
+  <meta name="robots" content="noindex">
+  <h1>Redirecting&hellip;</h1>
+  <a href="https://accumulo.apache.org/1.9/examples/bulkIngest.html">Click here if you are not redirected.</a>
+  <script>location="https://accumulo.apache.org/1.9/examples/bulkIngest.html"</script>
+</html>
diff --git a/1.8/examples/bulkIngest.md b/1.8/examples/bulkIngest.md
deleted file mode 100644
index e07dc9b..0000000
--- a/1.8/examples/bulkIngest.md
+++ /dev/null
@@ -1,33 +0,0 @@
-Title: Apache Accumulo Bulk Ingest Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This is an example of how to bulk ingest data into accumulo using map reduce.
-
-The following commands show how to run this example. This example creates a
-table called test_bulk which has two initial split points. Then 1000 rows of
-test data are created in HDFS. After that the 1000 rows are ingested into
-accumulo. Then we verify the 1000 rows are in accumulo.
-
-    $ PKG=org.apache.accumulo.examples.simple.mapreduce.bulk
-    $ ARGS="-i instance -z zookeepers -u username -p password"
-    $ ./bin/accumulo $PKG.SetupTable $ARGS -t test_bulk row_00000333 row_00000666
-    $ ./bin/accumulo $PKG.GenerateTestData --start-row 0 --count 1000 --output bulk/test_1.txt
-    $ ./bin/tool.sh lib/accumulo-examples-simple.jar $PKG.BulkIngestExample $ARGS -t test_bulk --inputDir bulk --workDir tmp/bulkWork
-    $ ./bin/accumulo $PKG.VerifyIngest $ARGS -t test_bulk --start-row 0 --count 1000
-
-For a high level discussion of bulk ingest, see the docs dir.
diff --git a/1.8/examples/classpath.html b/1.8/examples/classpath.html
new file mode 100644
index 0000000..d63a78b
--- /dev/null
+++ b/1.8/examples/classpath.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html>
+<html lang="en-US">
+  <meta charset="utf-8">
+  <title>Redirecting&hellip;</title>
+  <link rel="canonical" href="https://accumulo.apache.org/1.9/examples/classpath.html">
+  <meta http-equiv="refresh" content="0; url=https://accumulo.apache.org/1.9/examples/classpath.html">
+  <meta name="robots" content="noindex">
+  <h1>Redirecting&hellip;</h1>
+  <a href="https://accumulo.apache.org/1.9/examples/classpath.html">Click here if you are not redirected.</a>
+  <script>location="https://accumulo.apache.org/1.9/examples/classpath.html"</script>
+</html>
diff --git a/1.8/examples/classpath.md b/1.8/examples/classpath.md
deleted file mode 100644
index 710560f..0000000
--- a/1.8/examples/classpath.md
+++ /dev/null
@@ -1,68 +0,0 @@
-Title: Apache Accumulo Classpath Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-
-This example shows how to use per table classpaths. The example leverages a
-test jar which contains a Filter that supresses rows containing "foo". The
-example shows copying the FooFilter.jar into HDFS and then making an Accumulo
-table reference that jar.
-
-
-Execute the following command in the shell.
-
-    $ hadoop fs -copyFromLocal $ACCUMULO_HOME/test/src/test/resources/FooFilter.jar /user1/lib
-
-Execute following in Accumulo shell to setup classpath context
-
-    root@test15> config -s general.vfs.context.classpath.cx1=hdfs://<namenode host>:<namenode port>/user1/lib/[^.].*.jar
-
-Create a table
-
-    root@test15> createtable nofoo
-
-The following command makes this table use the configured classpath context
-
-    root@test15 nofoo> config -t nofoo -s table.classpath.context=cx1
-
-The following command configures an iterator thats in FooFilter.jar
-
-    root@test15 nofoo> setiter -n foofilter -p 10 -scan -minc -majc -class org.apache.accumulo.test.FooFilter
-    Filter accepts or rejects each Key/Value pair
-    ----------> set FooFilter parameter negate, default false keeps k/v that pass accept method, true rejects k/v that pass accept method: false
-
-The commands below show the filter is working.
-
-    root@test15 nofoo> insert foo1 f1 q1 v1
-    root@test15 nofoo> insert noo1 f1 q1 v2
-    root@test15 nofoo> scan
-    noo1 f1:q1 []    v2
-    root@test15 nofoo>
-
-Below, an attempt is made to add the FooFilter to a table thats not configured
-to use the clasppath context cx1. This fails util the table is configured to
-use cx1.
-
-    root@test15 nofoo> createtable nofootwo
-    root@test15 nofootwo> setiter -n foofilter -p 10 -scan -minc -majc -class org.apache.accumulo.test.FooFilter
-    2013-05-03 12:49:35,943 [shell.Shell] ERROR: java.lang.IllegalArgumentException: org.apache.accumulo.test.FooFilter
-    root@test15 nofootwo> config -t nofootwo -s table.classpath.context=cx1
-    root@test15 nofootwo> setiter -n foofilter -p 10 -scan -minc -majc -class org.apache.accumulo.test.FooFilter
-    Filter accepts or rejects each Key/Value pair
-    ----------> set FooFilter parameter negate, default false keeps k/v that pass accept method, true rejects k/v that pass accept method: false
-
-
diff --git a/1.8/examples/client.html b/1.8/examples/client.html
new file mode 100644
index 0000000..8798c68
--- /dev/null
+++ b/1.8/examples/client.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html>
+<html lang="en-US">
+  <meta charset="utf-8">
+  <title>Redirecting&hellip;</title>
+  <link rel="canonical" href="https://accumulo.apache.org/1.9/examples/client.html">
+  <meta http-equiv="refresh" content="0; url=https://accumulo.apache.org/1.9/examples/client.html">
+  <meta name="robots" content="noindex">
+  <h1>Redirecting&hellip;</h1>
+  <a href="https://accumulo.apache.org/1.9/examples/client.html">Click here if you are not redirected.</a>
+  <script>location="https://accumulo.apache.org/1.9/examples/client.html"</script>
+</html>
diff --git a/1.8/examples/client.md b/1.8/examples/client.md
deleted file mode 100644
index f6b8bcb..0000000
--- a/1.8/examples/client.md
+++ /dev/null
@@ -1,79 +0,0 @@
-Title: Apache Accumulo Client Examples
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This documents how you run the simplest java examples.
-
-This tutorial uses the following Java classes, which can be found in org.apache.accumulo.examples.simple.client in the examples-simple module:
-
- * Flush.java - flushes a table
- * RowOperations.java - reads and writes rows
- * ReadWriteExample.java - creates a table, writes to it, and reads from it
-
-Using the accumulo command, you can run the simple client examples by providing their
-class name, and enough arguments to find your accumulo instance. For example,
-the Flush class will flush a table:
-
-    $ PACKAGE=org.apache.accumulo.examples.simple.client
-    $ bin/accumulo $PACKAGE.Flush -u root -p mypassword -i instance -z zookeeper -t trace
-
-The very simple RowOperations class demonstrates how to read and write rows using the BatchWriter
-and Scanner:
-
-    $ bin/accumulo $PACKAGE.RowOperations -u root -p mypassword -i instance -z zookeeper
-    2013-01-14 14:45:24,738 [client.RowOperations] INFO : This is everything
-    2013-01-14 14:45:24,744 [client.RowOperations] INFO : Key: row1 column:1 [] 1358192724640 false Value: This is the value for this key
-    2013-01-14 14:45:24,744 [client.RowOperations] INFO : Key: row1 column:2 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,744 [client.RowOperations] INFO : Key: row1 column:3 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,744 [client.RowOperations] INFO : Key: row1 column:4 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,746 [client.RowOperations] INFO : Key: row2 column:1 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,746 [client.RowOperations] INFO : Key: row2 column:2 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,746 [client.RowOperations] INFO : Key: row2 column:3 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,746 [client.RowOperations] INFO : Key: row2 column:4 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,747 [client.RowOperations] INFO : Key: row3 column:1 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,747 [client.RowOperations] INFO : Key: row3 column:2 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,747 [client.RowOperations] INFO : Key: row3 column:3 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,747 [client.RowOperations] INFO : Key: row3 column:4 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,756 [client.RowOperations] INFO : This is row1 and row3
-    2013-01-14 14:45:24,757 [client.RowOperations] INFO : Key: row1 column:1 [] 1358192724640 false Value: This is the value for this key
-    2013-01-14 14:45:24,757 [client.RowOperations] INFO : Key: row1 column:2 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,757 [client.RowOperations] INFO : Key: row1 column:3 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,757 [client.RowOperations] INFO : Key: row1 column:4 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,761 [client.RowOperations] INFO : Key: row3 column:1 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,761 [client.RowOperations] INFO : Key: row3 column:2 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,761 [client.RowOperations] INFO : Key: row3 column:3 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,761 [client.RowOperations] INFO : Key: row3 column:4 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,765 [client.RowOperations] INFO : This is just row3
-    2013-01-14 14:45:24,769 [client.RowOperations] INFO : Key: row3 column:1 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,770 [client.RowOperations] INFO : Key: row3 column:2 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,770 [client.RowOperations] INFO : Key: row3 column:3 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,770 [client.RowOperations] INFO : Key: row3 column:4 [] 1358192724642 false Value: This is the value for this key
-
-To create a table, write to it and read from it:
-
-    $ bin/accumulo $PACKAGE.ReadWriteExample -u root -p mypassword -i instance -z zookeeper --createtable --create --read
-    hello%00; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-    hello%01; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-    hello%02; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-    hello%03; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-    hello%04; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-    hello%05; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-    hello%06; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-    hello%07; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-    hello%08; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-    hello%09; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-
diff --git a/1.8/examples/combiner.html b/1.8/examples/combiner.html
new file mode 100644
index 0000000..ada8ce4
--- /dev/null
+++ b/1.8/examples/combiner.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html>
+<html lang="en-US">
+  <meta charset="utf-8">
+  <title>Redirecting&hellip;</title>
+  <link rel="canonical" href="https://accumulo.apache.org/1.9/examples/combiner.html">
+  <meta http-equiv="refresh" content="0; url=https://accumulo.apache.org/1.9/examples/combiner.html">
+  <meta name="robots" content="noindex">
+  <h1>Redirecting&hellip;</h1>
+  <a href="https://accumulo.apache.org/1.9/examples/combiner.html">Click here if you are not redirected.</a>
+  <script>location="https://accumulo.apache.org/1.9/examples/combiner.html"</script>
+</html>
diff --git a/1.8/examples/combiner.md b/1.8/examples/combiner.md
deleted file mode 100644
index f388e5b..0000000
--- a/1.8/examples/combiner.md
+++ /dev/null
@@ -1,70 +0,0 @@
-Title: Apache Accumulo Combiner Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This tutorial uses the following Java class, which can be found in org.apache.accumulo.examples.simple.combiner in the examples-simple module:
-
- * StatsCombiner.java - a combiner that calculates max, min, sum, and count
-
-This is a simple combiner example. To build this example run maven and then
-copy the produced jar into the accumulo lib dir. This is already done in the
-tar distribution.
-
-    $ bin/accumulo shell -u username
-    Enter current password for 'username'@'instance': ***
-
-    Shell - Apache Accumulo Interactive Shell
-    -
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> createtable runners
-    username@instance runners> setiter -t runners -p 10 -scan -minc -majc -n decStats -class org.apache.accumulo.examples.simple.combiner.StatsCombiner
-    Combiner that keeps track of min, max, sum, and count
-    ----------> set StatsCombiner parameter all, set to true to apply Combiner to every column, otherwise leave blank. if true, columns option will be ignored.:
-    ----------> set StatsCombiner parameter columns, <col fam>[:<col qual>]{,<col fam>[:<col qual>]} escape non aplhanum chars using %<hex>.: stat
-    ----------> set StatsCombiner parameter radix, radix/base of the numbers: 10
-    username@instance runners> setiter -t runners -p 11 -scan -minc -majc -n hexStats -class org.apache.accumulo.examples.simple.combiner.StatsCombiner
-    Combiner that keeps track of min, max, sum, and count
-    ----------> set StatsCombiner parameter all, set to true to apply Combiner to every column, otherwise leave blank. if true, columns option will be ignored.:
-    ----------> set StatsCombiner parameter columns, <col fam>[:<col qual>]{,<col fam>[:<col qual>]} escape non aplhanum chars using %<hex>.: hstat
-    ----------> set StatsCombiner parameter radix, radix/base of the numbers: 16
-    username@instance runners> insert 123456 name first Joe
-    username@instance runners> insert 123456 stat marathon 240
-    username@instance runners> scan
-    123456 name:first []    Joe
-    123456 stat:marathon []    240,240,240,1
-    username@instance runners> insert 123456 stat marathon 230
-    username@instance runners> insert 123456 stat marathon 220
-    username@instance runners> scan
-    123456 name:first []    Joe
-    123456 stat:marathon []    220,240,690,3
-    username@instance runners> insert 123456 hstat virtualMarathon 6a
-    username@instance runners> insert 123456 hstat virtualMarathon 6b
-    username@instance runners> scan
-    123456 hstat:virtualMarathon []    6a,6b,d5,2
-    123456 name:first []    Joe
-    123456 stat:marathon []    220,240,690,3
-
-In this example a table is created and the example stats combiner is applied to
-the column family stat and hstat. The stats combiner computes min,max,sum, and
-count. It can be configured to use a different base or radix. In the example
-above the column family stat is configured for base 10 and the column family
-hstat is configured for base 16.
diff --git a/1.8/examples/constraints.html b/1.8/examples/constraints.html
new file mode 100644
index 0000000..762e2c2
--- /dev/null
+++ b/1.8/examples/constraints.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html>
+<html lang="en-US">
+  <meta charset="utf-8">
+  <title>Redirecting&hellip;</title>
+  <link rel="canonical" href="https://accumulo.apache.org/1.9/examples/constraints.html">
+  <meta http-equiv="refresh" content="0; url=https://accumulo.apache.org/1.9/examples/constraints.html">
+  <meta name="robots" content="noindex">
+  <h1>Redirecting&hellip;</h1>
+  <a href="https://accumulo.apache.org/1.9/examples/constraints.html">Click here if you are not redirected.</a>
+  <script>location="https://accumulo.apache.org/1.9/examples/constraints.html"</script>
+</html>
diff --git a/1.8/examples/constraints.md b/1.8/examples/constraints.md
deleted file mode 100644
index b15b409..0000000
--- a/1.8/examples/constraints.md
+++ /dev/null
@@ -1,54 +0,0 @@
-Title: Apache Accumulo Constraints Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This tutorial uses the following Java classes, which can be found in org.apache.accumulo.examples.simple.constraints in the examples-simple module:
-
- * AlphaNumKeyConstraint.java - a constraint that requires alphanumeric keys
- * NumericValueConstraint.java - a constraint that requires numeric string values
-
-This an example of how to create a table with constraints. Below a table is
-created with two example constraints. One constraints does not allow non alpha
-numeric keys. The other constraint does not allow non numeric values. Two
-inserts that violate these constraints are attempted and denied. The scan at
-the end shows the inserts were not allowed.
-
-    $ ./bin/accumulo shell -u username -p password
-
-    Shell - Apache Accumulo Interactive Shell
-    -
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> createtable testConstraints
-    username@instance testConstraints> constraint -a org.apache.accumulo.examples.simple.constraints.NumericValueConstraint
-    username@instance testConstraints> constraint -a org.apache.accumulo.examples.simple.constraints.AlphaNumKeyConstraint
-    username@instance testConstraints> insert r1 cf1 cq1 1111
-    username@instance testConstraints> insert r1 cf1 cq1 ABC
-      Constraint Failures:
-          ConstraintViolationSummary(constrainClass:org.apache.accumulo.examples.simple.constraints.NumericValueConstraint, violationCode:1, violationDescription:Value is not numeric, numberOfViolatingMutations:1)
-    username@instance testConstraints> insert r1! cf1 cq1 ABC
-      Constraint Failures:
-          ConstraintViolationSummary(constrainClass:org.apache.accumulo.examples.simple.constraints.NumericValueConstraint, violationCode:1, violationDescription:Value is not numeric, numberOfViolatingMutations:1)
-          ConstraintViolationSummary(constrainClass:org.apache.accumulo.examples.simple.constraints.AlphaNumKeyConstraint, violationCode:1, violationDescription:Row was not alpha numeric, numberOfViolatingMutations:1)
-    username@instance testConstraints> scan
-    r1 cf1:cq1 []    1111
-    username@instance testConstraints>
-
diff --git a/1.8/examples/dirlist.html b/1.8/examples/dirlist.html
new file mode 100644
index 0000000..b8666f8
--- /dev/null
+++ b/1.8/examples/dirlist.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html>
+<html lang="en-US">
+  <meta charset="utf-8">
+  <title>Redirecting&hellip;</title>
+  <link rel="canonical" href="https://accumulo.apache.org/1.9/examples/dirlist.html">
+  <meta http-equiv="refresh" content="0; url=https://accumulo.apache.org/1.9/examples/dirlist.html">
+  <meta name="robots" content="noindex">
+  <h1>Redirecting&hellip;</h1>
+  <a href="https://accumulo.apache.org/1.9/examples/dirlist.html">Click here if you are not redirected.</a>
+  <script>location="https://accumulo.apache.org/1.9/examples/dirlist.html"</script>
+</html>
diff --git a/1.8/examples/dirlist.md b/1.8/examples/dirlist.md
deleted file mode 100644
index 50623c6..0000000
--- a/1.8/examples/dirlist.md
+++ /dev/null
@@ -1,114 +0,0 @@
-Title: Apache Accumulo File System Archive
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This example stores filesystem information in accumulo. The example stores the information in the following three tables. More information about the table structures can be found at the end of README.dirlist.
-
- * directory table : This table stores information about the filesystem directory structure.
- * index table     : This table stores a file name index. It can be used to quickly find files with given name, suffix, or prefix.
- * data table      : This table stores the file data. File with duplicate data are only stored once.
-
-This example shows how to use Accumulo to store a file system history. It has the following classes:
-
- * Ingest.java - Recursively lists the files and directories under a given path, ingests their names and file info into one Accumulo table, indexes the file names in a separate table, and the file data into a third table.
- * QueryUtil.java - Provides utility methods for getting the info for a file, listing the contents of a directory, and performing single wild card searches on file or directory names.
- * Viewer.java - Provides a GUI for browsing the file system information stored in Accumulo.
- * FileCount.java - Computes recursive counts over file system information and stores them back into the same Accumulo table.
-
-To begin, ingest some data with Ingest.java.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.Ingest -i instance -z zookeepers -u username -p password --vis exampleVis --chunkSize 100000 /local/username/workspace
-
-This may take some time if there are large files in the /local/username/workspace directory. If you use 0 instead of 100000 on the command line, the ingest will run much faster, but it will not put any file data into Accumulo (the dataTable will be empty).
-Note that running this example will create tables dirTable, indexTable, and dataTable in Accumulo that you should delete when you have completed the example.
-If you modify a file or add new files in the directory ingested (e.g. /local/username/workspace), you can run Ingest again to add new information into the Accumulo tables.
-
-To browse the data ingested, use Viewer.java. Be sure to give the "username" user the authorizations to see the data (in this case, run
-
-    $ ./bin/accumulo shell -u root -e 'setauths -u username -s exampleVis'
-
-then run the Viewer:
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.Viewer -i instance -z zookeepers -u username -p password -t dirTable --dataTable dataTable --auths exampleVis --path /local/username/workspace
-
-To list the contents of specific directories, use QueryUtil.java.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t dirTable --auths exampleVis --path /local/username
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t dirTable --auths exampleVis --path /local/username/workspace
-
-To perform searches on file or directory names, also use QueryUtil.java. Search terms must contain no more than one wild card and cannot contain "/".
-*Note* these queries run on the _indexTable_ table instead of the dirTable table.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t indexTable --auths exampleVis --path filename --search
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t indexTable --auths exampleVis --path 'filename*' --search
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t indexTable --auths exampleVis --path '*jar' --search
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t indexTable --auths exampleVis --path 'filename*jar' --search
-
-To count the number of direct children (directories and files) and descendants (children and children's descendants, directories and files), run the FileCount over the dirTable table.
-The results are written back to the same table. FileCount reads from and writes to Accumulo. This requires scan authorizations for the read and a visibility for the data written.
-In this example, the authorizations and visibility are set to the same value, exampleVis. See README.visibility for more information on visibility and authorizations.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.FileCount -i instance -z zookeepers -u username -p password -t dirTable --auths exampleVis
-
-## Directory Table
-
-Here is a illustration of what data looks like in the directory table:
-
-    row colf:colq [vis]	value
-    000 dir:exec [exampleVis]    true
-    000 dir:hidden [exampleVis]    false
-    000 dir:lastmod [exampleVis]    1291996886000
-    000 dir:length [exampleVis]    1666
-    001/local dir:exec [exampleVis]    true
-    001/local dir:hidden [exampleVis]    false
-    001/local dir:lastmod [exampleVis]    1304945270000
-    001/local dir:length [exampleVis]    272
-    002/local/Accumulo.README \x7F\xFF\xFE\xCFH\xA1\x82\x97:exec [exampleVis]    false
-    002/local/Accumulo.README \x7F\xFF\xFE\xCFH\xA1\x82\x97:hidden [exampleVis]    false
-    002/local/Accumulo.README \x7F\xFF\xFE\xCFH\xA1\x82\x97:lastmod [exampleVis]    1308746481000
-    002/local/Accumulo.README \x7F\xFF\xFE\xCFH\xA1\x82\x97:length [exampleVis]    9192
-    002/local/Accumulo.README \x7F\xFF\xFE\xCFH\xA1\x82\x97:md5 [exampleVis]    274af6419a3c4c4a259260ac7017cbf1
-
-The rows are of the form depth + path, where depth is the number of slashes ("/") in the path padded to 3 digits. This is so that all the children of a directory appear as consecutive keys in Accumulo; without the depth, you would for example see all the subdirectories of /local before you saw /usr.
-For directories the column family is "dir". For files the column family is Long.MAX_VALUE - lastModified in bytes rather than string format so that newer versions sort earlier.
-
-## Index Table
-
-Here is an illustration of what data looks like in the index table:
-
-    row colf:colq [vis]
-    fAccumulo.README i:002/local/Accumulo.README [exampleVis]
-    flocal i:001/local [exampleVis]
-    rEMDAER.olumuccA i:002/local/Accumulo.README [exampleVis]
-    rlacol i:001/local [exampleVis]
-
-The values of the index table are null. The rows are of the form "f" + filename or "r" + reverse file name. This is to enable searches with wildcards at the beginning, middle, or end.
-
-## Data Table
-
-Here is an illustration of what data looks like in the data table:
-
-    row colf:colq [vis]	value
-    274af6419a3c4c4a259260ac7017cbf1 refs:e77276a2b56e5c15b540eaae32b12c69\x00filext [exampleVis]    README
-    274af6419a3c4c4a259260ac7017cbf1 refs:e77276a2b56e5c15b540eaae32b12c69\x00name [exampleVis]    /local/Accumulo.README
-    274af6419a3c4c4a259260ac7017cbf1 ~chunk:\x00\x0FB@\x00\x00\x00\x00 [exampleVis]    *******************************************************************************\x0A1. Building\x0A\x0AIn the normal tarball release of accumulo, [truncated]
-    274af6419a3c4c4a259260ac7017cbf1 ~chunk:\x00\x0FB@\x00\x00\x00\x01 [exampleVis]
-
-The rows are the md5 hash of the file. Some column family : column qualifier pairs are "refs" : hash of file name + null byte + property name, in which case the value is property value. There can be multiple references to the same file which are distinguished by the hash of the file name.
-Other column family : column qualifier pairs are "~chunk" : chunk size in bytes + chunk number in bytes, in which case the value is the bytes for that chunk of the file. There is an end of file data marker whose chunk number is the number of chunks for the file and whose value is empty.
-
-There may exist multiple copies of the same file (with the same md5 hash) with different chunk sizes or different visibilities. There is an iterator that can be set on the data table that combines these copies into a single copy with a visibility taken from the visibilities of the file references, e.g. (vis from ref1)|(vis from ref2).
diff --git a/1.8/examples/export.html b/1.8/examples/export.html
new file mode 100644
index 0000000..5692a4f
--- /dev/null
+++ b/1.8/examples/export.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html>
+<html lang="en-US">
+  <meta charset="utf-8">
+  <title>Redirecting&hellip;</title>
+  <link rel="canonical" href="https://accumulo.apache.org/1.9/examples/export.html">
+  <meta http-equiv="refresh" content="0; url=https://accumulo.apache.org/1.9/examples/export.html">
+  <meta name="robots" content="noindex">
+  <h1>Redirecting&hellip;</h1>
+  <a href="https://accumulo.apache.org/1.9/examples/export.html">Click here if you are not redirected.</a>
+  <script>location="https://accumulo.apache.org/1.9/examples/export.html"</script>
+</html>
diff --git a/1.8/examples/export.md b/1.8/examples/export.md
deleted file mode 100644
index b6ea8f8..0000000
--- a/1.8/examples/export.md
+++ /dev/null
@@ -1,91 +0,0 @@
-Title: Apache Accumulo Export/Import Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-Accumulo provides a mechanism to export and import tables. This README shows
-how to use this feature.
-
-The shell session below shows creating a table, inserting data, and exporting
-the table. A table must be offline to export it, and it should remain offline
-for the duration of the distcp. An easy way to take a table offline without
-interuppting access to it is to clone it and take the clone offline.
-
-    root@test15> createtable table1
-    root@test15 table1> insert a cf1 cq1 v1
-    root@test15 table1> insert h cf1 cq1 v2
-    root@test15 table1> insert z cf1 cq1 v3
-    root@test15 table1> insert z cf1 cq2 v4
-    root@test15 table1> addsplits -t table1 b r
-    root@test15 table1> scan
-    a cf1:cq1 []    v1
-    h cf1:cq1 []    v2
-    z cf1:cq1 []    v3
-    z cf1:cq2 []    v4
-    root@test15> config -t table1 -s table.split.threshold=100M
-    root@test15 table1> clonetable table1 table1_exp
-    root@test15 table1> offline table1_exp
-    root@test15 table1> exporttable -t table1_exp /tmp/table1_export
-    root@test15 table1> quit
-
-After executing the export command, a few files are created in the hdfs dir.
-One of the files is a list of files to distcp as shown below.
-
-    $ hadoop fs -ls /tmp/table1_export
-    Found 2 items
-    -rw-r--r--   3 user supergroup        162 2012-07-25 09:56 /tmp/table1_export/distcp.txt
-    -rw-r--r--   3 user supergroup        821 2012-07-25 09:56 /tmp/table1_export/exportMetadata.zip
-    $ hadoop fs -cat /tmp/table1_export/distcp.txt
-    hdfs://n1.example.com:6093/accumulo/tables/3/default_tablet/F0000000.rf
-    hdfs://n1.example.com:6093/tmp/table1_export/exportMetadata.zip
-
-Before the table can be imported, it must be copied using distcp. After the
-discp completed, the cloned table may be deleted.
-
-    $ hadoop distcp -f /tmp/table1_export/distcp.txt /tmp/table1_export_dest
-
-The Accumulo shell session below shows importing the table and inspecting it.
-The data, splits, config, and logical time information for the table were
-preserved.
-
-    root@test15> importtable table1_copy /tmp/table1_export_dest
-    root@test15> table table1_copy
-    root@test15 table1_copy> scan
-    a cf1:cq1 []    v1
-    h cf1:cq1 []    v2
-    z cf1:cq1 []    v3
-    z cf1:cq2 []    v4
-    root@test15 table1_copy> getsplits -t table1_copy
-    b
-    r
-    root@test15> config -t table1_copy -f split
-    ---------+--------------------------+-------------------------------------------
-    SCOPE    | NAME                     | VALUE
-    ---------+--------------------------+-------------------------------------------
-    default  | table.split.threshold .. | 1G
-    table    |    @override ........... | 100M
-    ---------+--------------------------+-------------------------------------------
-    root@test15> tables -l
-    accumulo.metadata    =>        !0
-    accumulo.root        =>        +r
-    table1_copy          =>         5
-    trace                =>         1
-    root@test15 table1_copy> scan -t accumulo.metadata -b 5 -c srv:time
-    5;b srv:time []    M1343224500467
-    5;r srv:time []    M1343224500467
-    5< srv:time []    M1343224500467
-
-
diff --git a/1.8/examples/filedata.html b/1.8/examples/filedata.html
new file mode 100644
index 0000000..d178490
--- /dev/null
+++ b/1.8/examples/filedata.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html>
+<html lang="en-US">
+  <meta charset="utf-8">
+  <title>Redirecting&hellip;</title>
+  <link rel="canonical" href="https://accumulo.apache.org/1.9/examples/filedata.html">
+  <meta http-equiv="refresh" content="0; url=https://accumulo.apache.org/1.9/examples/filedata.html">
+  <meta name="robots" content="noindex">
+  <h1>Redirecting&hellip;</h1>
+  <a href="https://accumulo.apache.org/1.9/examples/filedata.html">Click here if you are not redirected.</a>
+  <script>location="https://accumulo.apache.org/1.9/examples/filedata.html"</script>
+</html>
diff --git a/1.8/examples/filedata.md b/1.8/examples/filedata.md
deleted file mode 100644
index 26a6c1e..0000000
--- a/1.8/examples/filedata.md
+++ /dev/null
@@ -1,47 +0,0 @@
-Title: Apache Accumulo File System Archive Example (Data Only)
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This example archives file data into an Accumulo table. Files with duplicate data are only stored once.
-The example has the following classes:
-
- * CharacterHistogram - A MapReduce that computes a histogram of byte frequency for each file and stores the histogram alongside the file data. An example use of the ChunkInputFormat.
- * ChunkCombiner - An Iterator that dedupes file data and sets their visibilities to a combined visibility based on current references to the file data.
- * ChunkInputFormat - An Accumulo InputFormat that provides keys containing file info (List<Entry<Key,Value>>) and values with an InputStream over the file (ChunkInputStream).
- * ChunkInputStream - An input stream over file data stored in Accumulo.
- * FileDataIngest - Takes a list of files and archives them into Accumulo keyed on hashes of the files.
- * FileDataQuery - Retrieves file data based on the hash of the file. (Used by the dirlist.Viewer.)
- * KeyUtil - A utility for creating and parsing null-byte separated strings into/from Text objects.
- * VisibilityCombiner - A utility for merging visibilities into the form (VIS1)|(VIS2)|...
-
-This example is coupled with the dirlist example. See README.dirlist for instructions.
-
-If you haven't already run the README.dirlist example, ingest a file with FileDataIngest.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.filedata.FileDataIngest -i instance -z zookeepers -u username -p password -t dataTable --auths exampleVis --chunk 1000 $ACCUMULO_HOME/README
-
-Open the accumulo shell and look at the data. The row is the MD5 hash of the file, which you can verify by running a command such as 'md5sum' on the file.
-
-    > scan -t dataTable
-
-Run the CharacterHistogram MapReduce to add some information about the file.
-
-    $ bin/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.filedata.CharacterHistogram -i instance -z zookeepers -u username -p password -t dataTable --auths exampleVis --vis exampleVis
-
-Scan again to see the histogram stored in the 'info' column family.
-
-    > scan -t dataTable
diff --git a/1.8/examples/filter.html b/1.8/examples/filter.html
new file mode 100644
index 0000000..e5ca0ea
--- /dev/null
+++ b/1.8/examples/filter.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html>
+<html lang="en-US">
+  <meta charset="utf-8">
+  <title>Redirecting&hellip;</title>
+  <link rel="canonical" href="https://accumulo.apache.org/1.9/examples/filter.html">
+  <meta http-equiv="refresh" content="0; url=https://accumulo.apache.org/1.9/examples/filter.html">
+  <meta name="robots" content="noindex">
+  <h1>Redirecting&hellip;</h1>
+  <a href="https://accumulo.apache.org/1.9/examples/filter.html">Click here if you are not redirected.</a>
+  <script>location="https://accumulo.apache.org/1.9/examples/filter.html"</script>
+</html>
diff --git a/1.8/examples/filter.md b/1.8/examples/filter.md
deleted file mode 100644
index e00ba4a..0000000
--- a/1.8/examples/filter.md
+++ /dev/null
@@ -1,110 +0,0 @@
-Title: Apache Accumulo Filter Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This is a simple filter example. It uses the AgeOffFilter that is provided as
-part of the core package org.apache.accumulo.core.iterators.user. Filters are
-iterators that select desired key/value pairs (or weed out undesired ones).
-Filters extend the org.apache.accumulo.core.iterators.Filter class
-and must implement a method accept(Key k, Value v). This method returns true
-if the key/value pair are to be delivered and false if they are to be ignored.
-Filter takes a "negate" parameter which defaults to false. If set to true, the
-return value of the accept method is negated, so that key/value pairs accepted
-by the method are omitted by the Filter.
-
-    username@instance> createtable filtertest
-    username@instance filtertest> setiter -t filtertest -scan -p 10 -n myfilter -ageoff
-    AgeOffFilter removes entries with timestamps more than <ttl> milliseconds old
-    ----------> set AgeOffFilter parameter negate, default false keeps k/v that pass accept method, true rejects k/v that pass accept method:
-    ----------> set AgeOffFilter parameter ttl, time to live (milliseconds): 30000
-    ----------> set AgeOffFilter parameter currentTime, if set, use the given value as the absolute time in milliseconds as the current time of day:
-    username@instance filtertest> scan
-    username@instance filtertest> insert foo a b c
-    username@instance filtertest> scan
-    foo a:b []    c
-    username@instance filtertest>
-
-... wait 30 seconds ...
-
-    username@instance filtertest> scan
-    username@instance filtertest>
-
-Note the absence of the entry inserted more than 30 seconds ago. Since the
-scope was set to "scan", this means the entry is still in Accumulo, but is
-being filtered out at query time. To delete entries from Accumulo based on
-the ages of their timestamps, AgeOffFilters should be set up for the "minc"
-and "majc" scopes, as well.
-
-To force an ageoff of the persisted data, after setting up the ageoff iterator
-on the "minc" and "majc" scopes you can flush and compact your table. This will
-happen automatically as a background operation on any table that is being
-actively written to, but can also be requested in the shell.
-
-The first setiter command used the special -ageoff flag to specify the
-AgeOffFilter, but any Filter can be configured by using the -class flag. The
-following commands show how to enable the AgeOffFilter for the minc and majc
-scopes using the -class flag, then flush and compact the table.
-
-    username@instance filtertest> setiter -t filtertest -minc -majc -p 10 -n myfilter -class org.apache.accumulo.core.iterators.user.AgeOffFilter
-    AgeOffFilter removes entries with timestamps more than <ttl> milliseconds old
-    ----------> set AgeOffFilter parameter negate, default false keeps k/v that pass accept method, true rejects k/v that pass accept method:
-    ----------> set AgeOffFilter parameter ttl, time to live (milliseconds): 30000
-    ----------> set AgeOffFilter parameter currentTime, if set, use the given value as the absolute time in milliseconds as the current time of day:
-    username@instance filtertest> flush
-    06 10:42:24,806 [shell.Shell] INFO : Flush of table filtertest initiated...
-    username@instance filtertest> compact
-    06 10:42:36,781 [shell.Shell] INFO : Compaction of table filtertest started for given range
-    username@instance filtertest> flush -t filtertest -w
-    06 10:42:52,881 [shell.Shell] INFO : Flush of table filtertest completed.
-    username@instance filtertest> compact -t filtertest -w
-    06 10:43:00,632 [shell.Shell] INFO : Compacting table ...
-    06 10:43:01,307 [shell.Shell] INFO : Compaction of table filtertest completed for given range
-    username@instance filtertest>
-
-By default, flush and compact execute in the background, but with the -w flag
-they will wait to return until the operation has completed. Both are
-demonstrated above, though only one call to each would be necessary. A
-specific table can be specified with -t.
-
-After the compaction runs, the newly created files will not contain any data
-that should have been aged off, and the Accumulo garbage collector will remove
-the old files.
-
-To see the iterator settings for a table, use config.
-
-    username@instance filtertest> config -t filtertest -f iterator
-    ---------+---------------------------------------------+---------------------------------------------------------------------------
-    SCOPE    | NAME                                        | VALUE
-    ---------+---------------------------------------------+---------------------------------------------------------------------------
-    table    | table.iterator.majc.myfilter .............. | 10,org.apache.accumulo.core.iterators.user.AgeOffFilter
-    table    | table.iterator.majc.myfilter.opt.ttl ...... | 30000
-    table    | table.iterator.majc.vers .................. | 20,org.apache.accumulo.core.iterators.user.VersioningIterator
-    table    | table.iterator.majc.vers.opt.maxVersions .. | 1
-    table    | table.iterator.minc.myfilter .............. | 10,org.apache.accumulo.core.iterators.user.AgeOffFilter
-    table    | table.iterator.minc.myfilter.opt.ttl ...... | 30000
-    table    | table.iterator.minc.vers .................. | 20,org.apache.accumulo.core.iterators.user.VersioningIterator
-    table    | table.iterator.minc.vers.opt.maxVersions .. | 1
-    table    | table.iterator.scan.myfilter .............. | 10,org.apache.accumulo.core.iterators.user.AgeOffFilter
-    table    | table.iterator.scan.myfilter.opt.ttl ...... | 30000
-    table    | table.iterator.scan.vers .................. | 20,org.apache.accumulo.core.iterators.user.VersioningIterator
-    table    | table.iterator.scan.vers.opt.maxVersions .. | 1
-    ---------+---------------------------------------------+---------------------------------------------------------------------------
-    username@instance filtertest>
-
-When setting new iterators, make sure to order their priority numbers
-(specified with -p) in the order you would like the iterators to be applied.
-Also, each iterator must have a unique name and priority within each scope.
diff --git a/1.8/examples/helloworld.html b/1.8/examples/helloworld.html
new file mode 100644
index 0000000..277fab9
--- /dev/null
+++ b/1.8/examples/helloworld.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html>
+<html lang="en-US">
+  <meta charset="utf-8">
+  <title>Redirecting&hellip;</title>
+  <link rel="canonical" href="https://accumulo.apache.org/1.9/examples/helloworld.html">
+  <meta http-equiv="refresh" content="0; url=https://accumulo.apache.org/1.9/examples/helloworld.html">
+  <meta name="robots" content="noindex">
+  <h1>Redirecting&hellip;</h1>
+  <a href="https://accumulo.apache.org/1.9/examples/helloworld.html">Click here if you are not redirected.</a>
+  <script>location="https://accumulo.apache.org/1.9/examples/helloworld.html"</script>
+</html>
diff --git a/1.8/examples/helloworld.md b/1.8/examples/helloworld.md
deleted file mode 100644
index 618e301..0000000
--- a/1.8/examples/helloworld.md
+++ /dev/null
@@ -1,47 +0,0 @@
-Title: Apache Accumulo Hello World Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This tutorial uses the following Java classes, which can be found in org.apache.accumulo.examples.simple.helloworld in the examples-simple module:
-
- * InsertWithBatchWriter.java - Inserts 10K rows (50K entries) into accumulo with each row having 5 entries
- * ReadData.java - Reads all data between two rows
-
-Log into the accumulo shell:
-
-    $ ./bin/accumulo shell -u username -p password
-
-Create a table called 'hellotable':
-
-    username@instance> createtable hellotable
-
-Launch a Java program that inserts data with a BatchWriter:
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.helloworld.InsertWithBatchWriter -i instance -z zookeepers -u username -p password -t hellotable
-
-On the accumulo status page at the URL below (where 'master' is replaced with the name or IP of your accumulo master), you should see 50K entries
-
-    http://master:9995/
-
-To view the entries, use the shell to scan the table:
-
-    username@instance> table hellotable
-    username@instance hellotable> scan
-
-You can also use a Java class to scan the table:
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.helloworld.ReadData -i instance -z zookeepers -u username -p password -t hellotable --startKey row_0 --endKey row_1001
diff --git a/1.8/examples/index.html b/1.8/examples/index.html
index 3af64b6..22a4d58 100644
--- a/1.8/examples/index.html
+++ b/1.8/examples/index.html
@@ -204,6 +204,10 @@ features of Apache Accumulo.</p>
 <p><a href="regex">regex</a>:       Using MapReduce and Accumulo to find data using regular
                        expressions.</p>
 
+<p><a href="reservations">reservations</a>: Running a reservation system with Conditional Mutations.</p>
+
+<p><a href="rgbalancer">rgbalancer</a>: Spreading out groups of tablets with a Regex Group Balancer.</p>
+
 <p><a href="rowhash">rowhash</a>:     Using MapReduce to read a table and write to a new
                        column in the same table.</p>
 
diff --git a/1.8/examples/isolation.html b/1.8/examples/isolation.html
new file mode 100644
index 0000000..3f37e44
--- /dev/null
+++ b/1.8/examples/isolation.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html>
+<html lang="en-US">
+  <meta charset="utf-8">
+  <title>Redirecting&hellip;</title>
+  <link rel="canonical" href="https://accumulo.apache.org/1.9/examples/isolation.html">
+  <meta http-equiv="refresh" content="0; url=https://accumulo.apache.org/1.9/examples/isolation.html">
+  <meta name="robots" content="noindex">
+  <h1>Redirecting&hellip;</h1>
+  <a href="https://accumulo.apache.org/1.9/examples/isolation.html">Click here if you are not redirected.</a>
+  <script>location="https://accumulo.apache.org/1.9/examples/isolation.html"</script>
+</html>
diff --git a/1.8/examples/isolation.md b/1.8/examples/isolation.md
deleted file mode 100644
index 4739f59..0000000
--- a/1.8/examples/isolation.md
+++ /dev/null
@@ -1,50 +0,0 @@
-Title: Apache Accumulo Isolation Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-
-Accumulo has an isolated scanner that ensures partial changes to rows are not
-seen. Isolation is documented in ../docs/isolation.html and the user manual.
-
-InterferenceTest is a simple example that shows the effects of scanning with
-and without isolation. This program starts two threads. One threads
-continually upates all of the values in a row to be the same thing, but
-different from what it used to be. The other thread continually scans the
-table and checks that all values in a row are the same. Without isolation the
-scanning thread will sometimes see different values, which is the result of
-reading the row at the same time a mutation is changing the row.
-
-Below, Interference Test is run without isolation enabled for 5000 iterations
-and it reports problems.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.isolation.InterferenceTest -i instance -z zookeepers -u username -p password -t isotest --iterations 5000
-    ERROR Columns in row 053 had multiple values [53, 4553]
-    ERROR Columns in row 061 had multiple values [561, 61]
-    ERROR Columns in row 070 had multiple values [570, 1070]
-    ERROR Columns in row 079 had multiple values [1079, 1579]
-    ERROR Columns in row 088 had multiple values [2588, 1588]
-    ERROR Columns in row 106 had multiple values [2606, 3106]
-    ERROR Columns in row 115 had multiple values [4615, 3115]
-    finished
-
-Below, Interference Test is run with isolation enabled for 5000 iterations and
-it reports no problems.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.isolation.InterferenceTest -i instance -z zookeepers -u username -p password -t isotest --iterations 5000 --isolated
-    finished
-
-
diff --git a/1.8/examples/mapred.html b/1.8/examples/mapred.html
new file mode 100644
index 0000000..01e4fa6
--- /dev/null
+++ b/1.8/examples/mapred.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html>
+<html lang="en-US">
+  <meta charset="utf-8">
+  <title>Redirecting&hellip;</title>
+  <link rel="canonical" href="https://accumulo.apache.org/1.9/examples/mapred.html">
+  <meta http-equiv="refresh" content="0; url=https://accumulo.apache.org/1.9/examples/mapred.html">
+  <meta name="robots" content="noindex">
+  <h1>Redirecting&hellip;</h1>
+  <a href="https://accumulo.apache.org/1.9/examples/mapred.html">Click here if you are not redirected.</a>
+  <script>location="https://accumulo.apache.org/1.9/examples/mapred.html"</script>
+</html>
diff --git a/1.8/examples/mapred.md b/1.8/examples/mapred.md
deleted file mode 100644
index 9e9b17f..0000000
--- a/1.8/examples/mapred.md
+++ /dev/null
@@ -1,154 +0,0 @@
-Title: Apache Accumulo MapReduce Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This example uses mapreduce and accumulo to compute word counts for a set of
-documents. This is accomplished using a map-only mapreduce job and a
-accumulo table with combiners.
-
-To run this example you will need a directory in HDFS containing text files.
-The accumulo readme will be used to show how to run this example.
-
-    $ hadoop fs -copyFromLocal $ACCUMULO_HOME/README /user/username/wc/Accumulo.README
-    $ hadoop fs -ls /user/username/wc
-    Found 1 items
-    -rw-r--r--   2 username supergroup       9359 2009-07-15 17:54 /user/username/wc/Accumulo.README
-
-The first part of running this example is to create a table with a combiner
-for the column family count.
-
-    $ ./bin/accumulo shell -u username -p password
-    Shell - Apache Accumulo Interactive Shell
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> createtable wordCount
-    username@instance wordCount> setiter -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 10 -t wordCount -majc -minc -scan
-    SummingCombiner interprets Values as Longs and adds them together. A variety of encodings (variable length, fixed length, or string) are available
-    ----------> set SummingCombiner parameter all, set to true to apply Combiner to every column, otherwise leave blank. if true, columns option will be ignored.: false
-    ----------> set SummingCombiner parameter columns, <col fam>[:<col qual>]{,<col fam>[:<col qual>]} escape non-alphanum chars using %<hex>.: count
-    ----------> set SummingCombiner parameter lossy, if true, failed decodes are ignored. Otherwise combiner will error on failed decodes (default false): <TRUE|FALSE>: false
-    ----------> set SummingCombiner parameter type, <VARLEN|FIXEDLEN|STRING|fullClassName>: STRING
-    username@instance wordCount> quit
-
-After creating the table, run the word count map reduce job.
-
-    $ bin/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.mapreduce.WordCount -i instance -z zookeepers  --input /user/username/wc -t wordCount -u username -p password
-
-    11/02/07 18:20:11 INFO input.FileInputFormat: Total input paths to process : 1
-    11/02/07 18:20:12 INFO mapred.JobClient: Running job: job_201102071740_0003
-    11/02/07 18:20:13 INFO mapred.JobClient:  map 0% reduce 0%
-    11/02/07 18:20:20 INFO mapred.JobClient:  map 100% reduce 0%
-    11/02/07 18:20:22 INFO mapred.JobClient: Job complete: job_201102071740_0003
-    11/02/07 18:20:22 INFO mapred.JobClient: Counters: 6
-    11/02/07 18:20:22 INFO mapred.JobClient:   Job Counters
-    11/02/07 18:20:22 INFO mapred.JobClient:     Launched map tasks=1
-    11/02/07 18:20:22 INFO mapred.JobClient:     Data-local map tasks=1
-    11/02/07 18:20:22 INFO mapred.JobClient:   FileSystemCounters
-    11/02/07 18:20:22 INFO mapred.JobClient:     HDFS_BYTES_READ=10487
-    11/02/07 18:20:22 INFO mapred.JobClient:   Map-Reduce Framework
-    11/02/07 18:20:22 INFO mapred.JobClient:     Map input records=255
-    11/02/07 18:20:22 INFO mapred.JobClient:     Spilled Records=0
-    11/02/07 18:20:22 INFO mapred.JobClient:     Map output records=1452
-
-After the map reduce job completes, query the accumulo table to see word
-counts.
-
-    $ ./bin/accumulo shell -u username -p password
-    username@instance> table wordCount
-    username@instance wordCount> scan -b the
-    the count:20080906 []    75
-    their count:20080906 []    2
-    them count:20080906 []    1
-    then count:20080906 []    1
-    there count:20080906 []    1
-    these count:20080906 []    3
-    this count:20080906 []    6
-    through count:20080906 []    1
-    time count:20080906 []    3
-    time. count:20080906 []    1
-    to count:20080906 []    27
-    total count:20080906 []    1
-    tserver, count:20080906 []    1
-    tserver.compaction.major.concurrent.max count:20080906 []    1
-    ...
-
-Another example to look at is
-org.apache.accumulo.examples.simple.mapreduce.UniqueColumns. This example
-computes the unique set of columns in a table and shows how a map reduce job
-can directly read a tables files from HDFS.
-
-One more example available is
-org.apache.accumulo.examples.simple.mapreduce.TokenFileWordCount.
-The TokenFileWordCount example works exactly the same as the WordCount example
-explained above except that it uses a token file rather than giving the
-password directly to the map-reduce job (this avoids having the password
-displayed in the job's configuration which is world-readable).
-
-To create a token file, use the create-token utility
-
-  $ ./bin/accumulo create-token
-
-It defaults to creating a PasswordToken, but you can specify the token class
-with -tc (requires the fully qualified class name). Based on the token class,
-it will prompt you for each property required to create the token.
-
-The last value it prompts for is a local filename to save to. If this file
-exists, it will append the new token to the end. Multiple tokens can exist in
-a file, but only the first one for each user will be recognized.
-
-Rather than waiting for the prompts, you can specify some options when calling
-create-token, for example
-
-  $ ./bin/accumulo create-token -u root -p secret -f root.pw
-
-would create a token file containing a PasswordToken for
-user 'root' with password 'secret' and saved to 'root.pw'
-
-This local file needs to be uploaded to hdfs to be used with the
-map-reduce job. For example, if the file were 'root.pw' in the local directory:
-
-  $ hadoop fs -put root.pw root.pw
-
-This would put 'root.pw' in the user's home directory in hdfs.
-
-Because the basic WordCount example uses Opts to parse its arguments
-(which extends ClientOnRequiredTable), you can use a token file with
-the basic WordCount example by calling the same command as explained above
-except replacing the password with the token file (rather than -p, use -tf).
-
-  $ ./bin/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.mapreduce.WordCount -i instance -z zookeepers  --input /user/username/wc -t wordCount -u username -tf tokenfile
-
-In the above examples, username was 'root' and tokenfile was 'root.pw'
-
-However, if you don't want to use the Opts class to parse arguments,
-the TokenFileWordCount is an example of using the token file manually.
-
-  $ bin/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.mapreduce.TokenFileWordCount instance zookeepers username tokenfile /user/username/wc wordCount
-
-The results should be the same as the WordCount example except that the
-authentication token was not stored in the configuration. It was instead
-stored in a file that the map-reduce job pulled into the distributed cache.
-(If you ran either of these on the same table right after the
-WordCount example, then the resulting counts should just double.)
-
-
-
-
diff --git a/1.8/examples/maxmutation.html b/1.8/examples/maxmutation.html
new file mode 100644
index 0000000..5c9b10c
--- /dev/null
+++ b/1.8/examples/maxmutation.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html>
+<html lang="en-US">
+  <meta charset="utf-8">
+  <title>Redirecting&hellip;</title>
+  <link rel="canonical" href="https://accumulo.apache.org/1.9/examples/maxmutation.html">
+  <meta http-equiv="refresh" content="0; url=https://accumulo.apache.org/1.9/examples/maxmutation.html">
+  <meta name="robots" content="noindex">
+  <h1>Redirecting&hellip;</h1>
+  <a href="https://accumulo.apache.org/1.9/examples/maxmutation.html">Click here if you are not redirected.</a>
+  <script>location="https://accumulo.apache.org/1.9/examples/maxmutation.html"</script>
+</html>
diff --git a/1.8/examples/maxmutation.md b/1.8/examples/maxmutation.md
deleted file mode 100644
index 45b80d4..0000000
--- a/1.8/examples/maxmutation.md
+++ /dev/null
@@ -1,49 +0,0 @@
-Title: Apache Accumulo MaxMutation Constraints Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This an example of how to limit the size of mutations that will be accepted into
-a table. Under the default configuration, accumulo does not provide a limitation
-on the size of mutations that can be ingested. Poorly behaved writers might
-inadvertently create mutations so large, that they cause the tablet servers to
-run out of memory. A simple contraint can be added to a table to reject very
-large mutations.
-
-    $ ./bin/accumulo shell -u username -p password
-
-    Shell - Apache Accumulo Interactive Shell
-    -
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> createtable test_ingest
-    username@instance test_ingest> config -t test_ingest -s table.constraint.1=org.apache.accumulo.examples.simple.constraints.MaxMutationSize
-    username@instance test_ingest>
-
-
-Now the table will reject any mutation that is larger than 1/256th of the 
-working memory of the tablet server.  The following command attempts to ingest 
-a single row with 10000 columns, which exceeds the memory limit. Depending on the
-amount of Java heap your tserver(s) are given, you may have to increase the number
-of columns provided to see the failure.
-
-    $ ./bin/accumulo org.apache.accumulo.test.TestIngest -i instance -z zookeepers -u username -p password --rows 1 --cols 10000 
-    ERROR : Constraint violates : ConstraintViolationSummary(constrainClass:org.apache.accumulo.examples.simple.constraints.MaxMutationSize, violationCode:0, violationDescription:mutation exceeded maximum size of 188160, numberOfViolatingMutations:1)
-
diff --git a/1.8/examples/regex.html b/1.8/examples/regex.html
new file mode 100644
index 0000000..ac374c1
--- /dev/null
+++ b/1.8/examples/regex.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html>
+<html lang="en-US">
+  <meta charset="utf-8">
+  <title>Redirecting&hellip;</title>
+  <link rel="canonical" href="https://accumulo.apache.org/1.9/examples/regex.html">
+  <meta http-equiv="refresh" content="0; url=https://accumulo.apache.org/1.9/examples/regex.html">
+  <meta name="robots" content="noindex">
+  <h1>Redirecting&hellip;</h1>
+  <a href="https://accumulo.apache.org/1.9/examples/regex.html">Click here if you are not redirected.</a>
+  <script>location="https://accumulo.apache.org/1.9/examples/regex.html"</script>
+</html>
diff --git a/1.8/examples/regex.md b/1.8/examples/regex.md
deleted file mode 100644
index ea9f208..0000000
--- a/1.8/examples/regex.md
+++ /dev/null
@@ -1,57 +0,0 @@
-Title: Apache Accumulo Regex Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This example uses mapreduce and accumulo to find items using regular expressions.
-This is accomplished using a map-only mapreduce job and a scan-time iterator.
-
-To run this example you will need some data in a table. The following will
-put a trivial amount of data into accumulo using the accumulo shell:
-
-    $ ./bin/accumulo shell -u username -p password
-    Shell - Apache Accumulo Interactive Shell
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> createtable input
-    username@instance> insert dogrow dogcf dogcq dogvalue
-    username@instance> insert catrow catcf catcq catvalue
-    username@instance> quit
-
-The RegexExample class sets an iterator on the scanner. This does pattern matching
-against each key/value in accumulo, and only returns matching items. It will do this
-in parallel and will store the results in files in hdfs.
-
-The following will search for any rows in the input table that starts with "dog":
-
-    $ bin/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.mapreduce.RegexExample -u user -p passwd -i instance -t input --rowRegex 'dog.*' --output /tmp/output
-
-    $ hadoop fs -ls /tmp/output
-    Found 3 items
-    -rw-r--r--   1 username supergroup          0 2013-01-10 14:11 /tmp/output/_SUCCESS
-    drwxr-xr-x   - username supergroup          0 2013-01-10 14:10 /tmp/output/_logs
-    -rw-r--r--   1 username supergroup         51 2013-01-10 14:10 /tmp/output/part-m-00000
-
-We can see the output of our little map-reduce job:
-
-    $ hadoop fs -text /tmp/output/part-m-00000
-    dogrow dogcf:dogcq [] 1357844987994 false	dogvalue
-
-
diff --git a/1.8/examples/reservations.html b/1.8/examples/reservations.html
new file mode 100644
index 0000000..a91f4ad
--- /dev/null
+++ b/1.8/examples/reservations.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html>
+<html lang="en-US">
+  <meta charset="utf-8">
+  <title>Redirecting&hellip;</title>
+  <link rel="canonical" href="https://accumulo.apache.org/1.9/examples/reservations.html">
+  <meta http-equiv="refresh" content="0; url=https://accumulo.apache.org/1.9/examples/reservations.html">
+  <meta name="robots" content="noindex">
+  <h1>Redirecting&hellip;</h1>
+  <a href="https://accumulo.apache.org/1.9/examples/reservations.html">Click here if you are not redirected.</a>
+  <script>location="https://accumulo.apache.org/1.9/examples/reservations.html"</script>
+</html>
diff --git a/1.8/examples/reservations.md b/1.8/examples/reservations.md
deleted file mode 100644
index ff111b4..0000000
--- a/1.8/examples/reservations.md
+++ /dev/null
@@ -1,66 +0,0 @@
-Title: Apache Accumulo Isolation Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This example shows running a simple reservation system implemented using
-conditional mutations. This system guarantees that only one concurrent user can
-reserve a resource. The example's reserve command allows multiple users to be
-specified. When this is done, it creates a separate reservation thread for each
-user. In the example below threads are spun up for alice, bob, eve, mallory,
-and trent to reserve room06 on 20140101. Bob ends up getting the reservation
-and everyone else is put on a wait list. The example code will take any string
-for what, when and who.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.reservations.ARS
-    >connect test16 localhost root secret ars
-      connected
-    >
-      Commands :
-        reserve <what> <when> <who> {who}
-        cancel <what> <when> <who>
-        list <what> <when>
-    >reserve room06 20140101 alice bob eve mallory trent
-                       bob : RESERVED
-                   mallory : WAIT_LISTED
-                     alice : WAIT_LISTED
-                     trent : WAIT_LISTED
-                       eve : WAIT_LISTED
-    >list room06 20140101
-      Reservation holder : bob
-      Wait list : [mallory, alice, trent, eve]
-    >cancel room06 20140101 alice
-    >cancel room06 20140101 bob
-    >list room06 20140101
-      Reservation holder : mallory
-      Wait list : [trent, eve]
-    >quit
-
-Scanning the table in the Accumulo shell after running the example shows the
-following:
-
-    root@test16> table ars
-    root@test16 ars> scan
-    room06:20140101 res:0001 []    mallory
-    room06:20140101 res:0003 []    trent
-    room06:20140101 res:0004 []    eve
-    room06:20140101 tx:seq []    6
-
-The tx:seq column is incremented for each update to the row allowing for
-detection of concurrent changes. For an update to go through, the sequence
-number must not have changed since the data was read. If it does change,
-the conditional mutation will fail and the example code will retry.
-
diff --git a/1.8/examples/rgbalancer.html b/1.8/examples/rgbalancer.html
new file mode 100644
index 0000000..645c841
--- /dev/null
+++ b/1.8/examples/rgbalancer.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html>
+<html lang="en-US">
+  <meta charset="utf-8">
+  <title>Redirecting&hellip;</title>
+  <link rel="canonical" href="https://accumulo.apache.org/1.9/examples/rgbalancer.html">
+  <meta http-equiv="refresh" content="0; url=https://accumulo.apache.org/1.9/examples/rgbalancer.html">
+  <meta name="robots" content="noindex">
+  <h1>Redirecting&hellip;</h1>
+  <a href="https://accumulo.apache.org/1.9/examples/rgbalancer.html">Click here if you are not redirected.</a>
+  <script>location="https://accumulo.apache.org/1.9/examples/rgbalancer.html"</script>
+</html>
diff --git a/1.8/examples/rgbalancer.md b/1.8/examples/rgbalancer.md
deleted file mode 100644
index f192a93..0000000
--- a/1.8/examples/rgbalancer.md
+++ /dev/null
@@ -1,159 +0,0 @@
-Title: Apache Accumulo Hello World Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-For some data access patterns, its important to spread groups of tablets within
-a table out evenly.  Accumulo has a balancer that can do this using a regular
-expression to group tablets. This example shows how this balancer spreads 4
-groups of tablets within a table evenly across 17 tablet servers.
-
-Below shows creating a table and adding splits.  For this example we would like
-all of the tablets where the split point has the same two digits to be on
-different tservers.  This gives us four groups of tablets: 01, 02, 03, and 04.   
-
-    root@accumulo> createtable testRGB
-    root@accumulo testRGB> addsplits -t testRGB 01b 01m 01r 01z  02b 02m 02r 02z 03b 03m 03r 03z 04a 04b 04c 04d 04e 04f 04g 04h 04i 04j 04k 04l 04m 04n 04o 04p
-    root@accumulo testRGB> tables -l
-    accumulo.metadata    =>        !0
-    accumulo.replication =>      +rep
-    accumulo.root        =>        +r
-    testRGB              =>         2
-    trace                =>         1
-
-After adding the splits we look at the locations in the metadata table.
-
-    root@accumulo testRGB> scan -t accumulo.metadata -b 2; -e 2< -c loc
-    2;01b loc:34a5f6e086b000c []    ip-10-1-2-25:9997
-    2;01m loc:34a5f6e086b000c []    ip-10-1-2-25:9997
-    2;01r loc:14a5f6e079d0011 []    ip-10-1-2-15:9997
-    2;01z loc:14a5f6e079d000f []    ip-10-1-2-13:9997
-    2;02b loc:34a5f6e086b000b []    ip-10-1-2-26:9997
-    2;02m loc:14a5f6e079d000c []    ip-10-1-2-28:9997
-    2;02r loc:14a5f6e079d0012 []    ip-10-1-2-27:9997
-    2;02z loc:14a5f6e079d0012 []    ip-10-1-2-27:9997
-    2;03b loc:14a5f6e079d000d []    ip-10-1-2-21:9997
-    2;03m loc:14a5f6e079d000e []    ip-10-1-2-20:9997
-    2;03r loc:14a5f6e079d000d []    ip-10-1-2-21:9997
-    2;03z loc:14a5f6e079d000e []    ip-10-1-2-20:9997
-    2;04a loc:34a5f6e086b000b []    ip-10-1-2-26:9997
-    2;04b loc:14a5f6e079d0010 []    ip-10-1-2-17:9997
-    2;04c loc:14a5f6e079d0010 []    ip-10-1-2-17:9997
-    2;04d loc:24a5f6e07d3000c []    ip-10-1-2-16:9997
-    2;04e loc:24a5f6e07d3000d []    ip-10-1-2-29:9997
-    2;04f loc:24a5f6e07d3000c []    ip-10-1-2-16:9997
-    2;04g loc:24a5f6e07d3000a []    ip-10-1-2-14:9997
-    2;04h loc:14a5f6e079d000c []    ip-10-1-2-28:9997
-    2;04i loc:34a5f6e086b000d []    ip-10-1-2-19:9997
-    2;04j loc:34a5f6e086b000d []    ip-10-1-2-19:9997
-    2;04k loc:24a5f6e07d30009 []    ip-10-1-2-23:9997
-    2;04l loc:24a5f6e07d3000b []    ip-10-1-2-22:9997
-    2;04m loc:24a5f6e07d30009 []    ip-10-1-2-23:9997
-    2;04n loc:24a5f6e07d3000b []    ip-10-1-2-22:9997
-    2;04o loc:34a5f6e086b000a []    ip-10-1-2-18:9997
-    2;04p loc:24a5f6e07d30008 []    ip-10-1-2-24:9997
-    2< loc:24a5f6e07d30008 []    ip-10-1-2-24:9997
-
-Below the information above was massaged to show which tablet groups are on
-each tserver.  The four tablets in group 03 are on two tservers, ideally those
-tablets would be spread across 4 tservers.  Note the default tablet (2<) was
-categorized as group 04 below.
-
-    ip-10-1-2-13:9997 01
-    ip-10-1-2-14:9997 04
-    ip-10-1-2-15:9997 01
-    ip-10-1-2-16:9997 04 04
-    ip-10-1-2-17:9997 04 04
-    ip-10-1-2-18:9997 04
-    ip-10-1-2-19:9997 04 04
-    ip-10-1-2-20:9997 03 03
-    ip-10-1-2-21:9997 03 03
-    ip-10-1-2-22:9997 04 04
-    ip-10-1-2-23:9997 04 04
-    ip-10-1-2-24:9997 04 04
-    ip-10-1-2-25:9997 01 01
-    ip-10-1-2-26:9997 02 04
-    ip-10-1-2-27:9997 02 02
-    ip-10-1-2-28:9997 02 04
-    ip-10-1-2-29:9997 04
-
-To remedy this situation, the RegexGroupBalancer is configured with the
-commands below.  The configured regular expression selects the first two digits
-from a tablets end row as the group id.  Tablets that don't match and the
-default tablet are configured to be in group 04.
-
-    root@accumulo testRGB> config -t testRGB -s table.custom.balancer.group.regex.pattern=(\\d\\d).*
-    root@accumulo testRGB> config -t testRGB -s table.custom.balancer.group.regex.default=04
-    root@accumulo testRGB> config -t testRGB -s table.balancer=org.apache.accumulo.server.master.balancer.RegexGroupBalancer
-
-After waiting a little bit, look at the tablet locations again and all is good.
-
-    root@accumulo testRGB> scan -t accumulo.metadata -b 2; -e 2< -c loc
-    2;01b loc:34a5f6e086b000a []    ip-10-1-2-18:9997
-    2;01m loc:34a5f6e086b000c []    ip-10-1-2-25:9997
-    2;01r loc:14a5f6e079d0011 []    ip-10-1-2-15:9997
-    2;01z loc:14a5f6e079d000f []    ip-10-1-2-13:9997
-    2;02b loc:34a5f6e086b000b []    ip-10-1-2-26:9997
-    2;02m loc:14a5f6e079d000c []    ip-10-1-2-28:9997
-    2;02r loc:34a5f6e086b000d []    ip-10-1-2-19:9997
-    2;02z loc:14a5f6e079d0012 []    ip-10-1-2-27:9997
-    2;03b loc:24a5f6e07d3000d []    ip-10-1-2-29:9997
-    2;03m loc:24a5f6e07d30009 []    ip-10-1-2-23:9997
-    2;03r loc:14a5f6e079d000d []    ip-10-1-2-21:9997
-    2;03z loc:14a5f6e079d000e []    ip-10-1-2-20:9997
-    2;04a loc:34a5f6e086b000b []    ip-10-1-2-26:9997
-    2;04b loc:34a5f6e086b000c []    ip-10-1-2-25:9997
-    2;04c loc:14a5f6e079d0010 []    ip-10-1-2-17:9997
-    2;04d loc:14a5f6e079d000e []    ip-10-1-2-20:9997
-    2;04e loc:24a5f6e07d3000d []    ip-10-1-2-29:9997
-    2;04f loc:24a5f6e07d3000c []    ip-10-1-2-16:9997
-    2;04g loc:24a5f6e07d3000a []    ip-10-1-2-14:9997
-    2;04h loc:14a5f6e079d000c []    ip-10-1-2-28:9997
-    2;04i loc:14a5f6e079d0011 []    ip-10-1-2-15:9997
-    2;04j loc:34a5f6e086b000d []    ip-10-1-2-19:9997
-    2;04k loc:14a5f6e079d0012 []    ip-10-1-2-27:9997
-    2;04l loc:14a5f6e079d000f []    ip-10-1-2-13:9997
-    2;04m loc:24a5f6e07d30009 []    ip-10-1-2-23:9997
-    2;04n loc:24a5f6e07d3000b []    ip-10-1-2-22:9997
-    2;04o loc:34a5f6e086b000a []    ip-10-1-2-18:9997
-    2;04p loc:14a5f6e079d000d []    ip-10-1-2-21:9997
-    2< loc:24a5f6e07d30008 []    ip-10-1-2-24:9997
-
-Once again, the data above is transformed to make it easier to see which groups
-are on tservers.  The transformed data below shows that all groups are now
-evenly spread.
-
-    ip-10-1-2-13:9997 01 04
-    ip-10-1-2-14:9997    04
-    ip-10-1-2-15:9997 01 04
-    ip-10-1-2-16:9997    04
-    ip-10-1-2-17:9997    04
-    ip-10-1-2-18:9997 01 04
-    ip-10-1-2-19:9997 02 04
-    ip-10-1-2-20:9997 03 04
-    ip-10-1-2-21:9997 03 04
-    ip-10-1-2-22:9997    04
-    ip-10-1-2-23:9997 03 04
-    ip-10-1-2-24:9997    04
-    ip-10-1-2-25:9997 01 04
-    ip-10-1-2-26:9997 02 04
-    ip-10-1-2-27:9997 02 04
-    ip-10-1-2-28:9997 02 04
-    ip-10-1-2-29:9997 03 04
-
-If you need this functionality, but a regular expression does not meet your
-needs then extend GroupBalancer.  This allows you to specify a partitioning
-function in Java.  Use the RegexGroupBalancer source as an example.
diff --git a/1.8/examples/rowhash.html b/1.8/examples/rowhash.html
new file mode 100644
index 0000000..cb8d3b1
--- /dev/null
+++ b/1.8/examples/rowhash.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html>
+<html lang="en-US">
+  <meta charset="utf-8">
+  <title>Redirecting&hellip;</title>
+  <link rel="canonical" href="https://accumulo.apache.org/1.9/examples/rowhash.html">
+  <meta http-equiv="refresh" content="0; url=https://accumulo.apache.org/1.9/examples/rowhash.html">
+  <meta name="robots" content="noindex">
+  <h1>Redirecting&hellip;</h1>
+  <a href="https://accumulo.apache.org/1.9/examples/rowhash.html">Click here if you are not redirected.</a>
+  <script>location="https://accumulo.apache.org/1.9/examples/rowhash.html"</script>
+</html>
diff --git a/1.8/examples/rowhash.md b/1.8/examples/rowhash.md
deleted file mode 100644
index 43782c9..0000000
--- a/1.8/examples/rowhash.md
+++ /dev/null
@@ -1,59 +0,0 @@
-Title: Apache Accumulo RowHash Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This example shows a simple map/reduce job that reads from an accumulo table and
-writes back into that table.
-
-To run this example you will need some data in a table. The following will
-put a trivial amount of data into accumulo using the accumulo shell:
-
-    $ ./bin/accumulo shell -u username -p password
-    Shell - Apache Accumulo Interactive Shell
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> createtable input
-    username@instance> insert a-row cf cq value
-    username@instance> insert b-row cf cq value
-    username@instance> quit
-
-The RowHash class will insert a hash for each row in the database if it contains a
-specified colum. Here's how you run the map/reduce job
-
-    $ bin/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.mapreduce.RowHash -u user -p passwd -i instance -t input --column cf:cq
-
-Now we can scan the table and see the hashes:
-
-    $ ./bin/accumulo shell -u username -p password
-    Shell - Apache Accumulo Interactive Shell
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> scan -t input
-    a-row cf:cq []    value
-    a-row cf-HASHTYPE:cq-MD5BASE64 []    IGPBYI1uC6+AJJxC4r5YBA==
-    b-row cf:cq []    value
-    b-row cf-HASHTYPE:cq-MD5BASE64 []    IGPBYI1uC6+AJJxC4r5YBA==
-    username@instance>
-
diff --git a/1.8/examples/sample.html b/1.8/examples/sample.html
new file mode 100644
index 0000000..92ed6a1
--- /dev/null
+++ b/1.8/examples/sample.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html>
+<html lang="en-US">
+  <meta charset="utf-8">
+  <title>Redirecting&hellip;</title>
+  <link rel="canonical" href="https://accumulo.apache.org/1.9/examples/sample.html">
+  <meta http-equiv="refresh" content="0; url=https://accumulo.apache.org/1.9/examples/sample.html">
+  <meta name="robots" content="noindex">
+  <h1>Redirecting&hellip;</h1>
+  <a href="https://accumulo.apache.org/1.9/examples/sample.html">Click here if you are not redirected.</a>
+  <script>location="https://accumulo.apache.org/1.9/examples/sample.html"</script>
+</html>
diff --git a/1.8/examples/sample.md b/1.8/examples/sample.md
deleted file mode 100644
index 3642cc6..0000000
--- a/1.8/examples/sample.md
+++ /dev/null
@@ -1,192 +0,0 @@
-Title: Apache Accumulo Batch Writing and Scanning Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-
-Basic Sampling Example
-----------------------
-
-Accumulo supports building a set of sample data that can be efficiently
-accessed by scanners.  What data is included in the sample set is configurable.
-Below, some data representing documents are inserted.  
-
-    root@instance sampex> createtable sampex
-    root@instance sampex> insert 9255 doc content 'abcde'
-    root@instance sampex> insert 9255 doc url file://foo.txt
-    root@instance sampex> insert 8934 doc content 'accumulo scales'
-    root@instance sampex> insert 8934 doc url file://accumulo_notes.txt
-    root@instance sampex> insert 2317 doc content 'milk, eggs, bread, parmigiano-reggiano'
-    root@instance sampex> insert 2317 doc url file://groceries/9.txt
-    root@instance sampex> insert 3900 doc content 'EC2 ate my homework'
-    root@instance sampex> insert 3900 doc uril file://final_project.txt
-
-Below the table sampex is configured to build a sample set.  The configuration
-causes Accumulo to include any row where `murmur3_32(row) % 3 ==0` in the
-tables sample data.
-
-    root@instance sampex> config -t sampex -s table.sampler.opt.hasher=murmur3_32
-    root@instance sampex> config -t sampex -s table.sampler.opt.modulus=3
-    root@instance sampex> config -t sampex -s table.sampler=org.apache.accumulo.core.client.sample.RowSampler
-
-Below, attempting to scan the sample returns an error.  This is because data
-was inserted before the sample set was configured.
-
-    root@instance sampex> scan --sample
-    2015-09-09 12:21:50,643 [shell.Shell] ERROR: org.apache.accumulo.core.client.SampleNotPresentException: Table sampex(ID:2) does not have sampling configured or built
-
-To remedy this problem, the following command will flush in memory data and
-compact any files that do not contain the correct sample data.   
-
-    root@instance sampex> compact -t sampex --sf-no-sample
-
-After the compaction, the sample scan works.  
-
-    root@instance sampex> scan --sample
-    2317 doc:content []    milk, eggs, bread, parmigiano-reggiano
-    2317 doc:url []    file://groceries/9.txt
-
-The commands below show that updates to data in the sample are seen when
-scanning the sample.
-
-    root@instance sampex> insert 2317 doc content 'milk, eggs, bread, parmigiano-reggiano, butter'
-    root@instance sampex> scan --sample
-    2317 doc:content []    milk, eggs, bread, parmigiano-reggiano, butter
-    2317 doc:url []    file://groceries/9.txt
-
-Inorder to make scanning the sample fast, sample data is partitioned as data is
-written to Accumulo.  This means if the sample configuration is changed, that
-data written previously is partitioned using a different criteria.  Accumulo
-will detect this situation and fail sample scans.  The commands below show this
-failure and fixiing the problem with a compaction.
-
-    root@instance sampex> config -t sampex -s table.sampler.opt.modulus=2
-    root@instance sampex> scan --sample
-    2015-09-09 12:22:51,058 [shell.Shell] ERROR: org.apache.accumulo.core.client.SampleNotPresentException: Table sampex(ID:2) does not have sampling configured or built
-    root@instance sampex> compact -t sampex --sf-no-sample
-    2015-09-09 12:23:07,242 [shell.Shell] INFO : Compaction of table sampex started for given range
-    root@instance sampex> scan --sample
-    2317 doc:content []    milk, eggs, bread, parmigiano-reggiano
-    2317 doc:url []    file://groceries/9.txt
-    3900 doc:content []    EC2 ate my homework
-    3900 doc:uril []    file://final_project.txt
-    9255 doc:content []    abcde
-    9255 doc:url []    file://foo.txt
-
-The example above is replicated in a java program using the Accumulo API.
-Below is the program name and the command to run it.
-
-    ./bin/accumulo org.apache.accumulo.examples.simple.sample.SampleExample -i instance -z localhost -u root -p secret
-
-The commands below look under the hood to give some insight into how this
-feature works.  The commands determine what files the sampex table is using.
-
-    root@instance sampex> tables -l
-    accumulo.metadata    =>        !0
-    accumulo.replication =>      +rep
-    accumulo.root        =>        +r
-    sampex               =>         2
-    trace                =>         1
-    root@instance sampex> scan -t accumulo.metadata -c file -b 2 -e 2<
-    2< file:hdfs://localhost:10000/accumulo/tables/2/default_tablet/A000000s.rf []    702,8
-
-Below shows running `accumulo rfile-info` on the file above.  This shows the
-rfile has a normal default locality group and a sample default locality group.
-The output also shows the configuration used to create the sample locality
-group.  The sample configuration within a rfile must match the tables sample
-configuration for sample scan to work.
-
-    $ ./bin/accumulo rfile-info hdfs://localhost:10000/accumulo/tables/2/default_tablet/A000000s.rf
-    Reading file: hdfs://localhost:10000/accumulo/tables/2/default_tablet/A000000s.rf
-    RFile Version            : 8
-    
-    Locality group           : <DEFAULT>
-    	Start block            : 0
-    	Num   blocks           : 1
-    	Index level 0          : 35 bytes  1 blocks
-    	First key              : 2317 doc:content [] 1437672014986 false
-    	Last key               : 9255 doc:url [] 1437672014875 false
-    	Num entries            : 8
-    	Column families        : [doc]
-    
-    Sample Configuration     :
-    	Sampler class          : org.apache.accumulo.core.client.sample.RowSampler
-    	Sampler options        : {hasher=murmur3_32, modulus=2}
-
-    Sample Locality group    : <DEFAULT>
-    	Start block            : 0
-    	Num   blocks           : 1
-    	Index level 0          : 36 bytes  1 blocks
-    	First key              : 2317 doc:content [] 1437672014986 false
-    	Last key               : 9255 doc:url [] 1437672014875 false
-    	Num entries            : 6
-    	Column families        : [doc]
-    
-    Meta block     : BCFile.index
-          Raw size             : 4 bytes
-          Compressed size      : 12 bytes
-          Compression type     : gz
-
-    Meta block     : RFile.index
-          Raw size             : 309 bytes
-          Compressed size      : 176 bytes
-          Compression type     : gz
-
-
-Shard Sampling Example
--------------------------
-
-`README.shard` shows how to index and search files using Accumulo.  That
-example indexes documents into a table named `shard`.  The indexing scheme used
-in that example places the document name in the column qualifier.  A useful
-sample of this indexing scheme should contain all data for any document in the
-sample.   To accomplish this, the following commands build a sample for the
-shard table based on the column qualifier.
-
-    root@instance shard> config -t shard -s table.sampler.opt.hasher=murmur3_32
-    root@instance shard> config -t shard -s table.sampler.opt.modulus=101
-    root@instance shard> config -t shard -s table.sampler.opt.qualifier=true
-    root@instance shard> config -t shard -s table.sampler=org.apache.accumulo.core.client.sample.RowColumnSampler
-    root@instance shard> compact -t shard --sf-no-sample -w
-    2015-07-23 15:00:09,280 [shell.Shell] INFO : Compacting table ...
-    2015-07-23 15:00:10,134 [shell.Shell] INFO : Compaction of table shard completed for given range
-
-After enabling sampling, the command below counts the number of documents in
-the sample containing the words `import` and `int`.     
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.shard.Query --sample -i instance16 -z localhost -t shard -u root -p secret import int | fgrep '.java' | wc
-         11      11    1246
-
-The command below counts the total number of documents containing the words
-`import` and `int`.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.shard.Query -i instance16 -z localhost -t shard -u root -p secret import int | fgrep '.java' | wc
-       1085    1085  118175
-
-The counts 11 out of 1085 total are around what would be expected for a modulus
-of 101.  Querying the sample first provides a quick way to estimate how much data
-the real query will bring back. 
-
-Another way sample data could be used with the shard example is with a
-specialized iterator.  In the examples source code there is an iterator named
-CutoffIntersectingIterator.  This iterator first checks how many documents are
-found in the sample data.  If too many documents are found in the sample data,
-then it returns nothing.   Otherwise it proceeds to query the full data set.
-To experiment with this iterator, use the following command.  The
-`--sampleCutoff` option below will cause the query to return nothing if based
-on the sample it appears a query would return more than 1000 documents.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.shard.Query --sampleCutoff 1000 -i instance16 -z localhost -t shard -u root -p secret import int | fgrep '.java' | wc
diff --git a/1.8/examples/shard.html b/1.8/examples/shard.html
new file mode 100644
index 0000000..ce8ccba
--- /dev/null
+++ b/1.8/examples/shard.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html>
+<html lang="en-US">
+  <meta charset="utf-8">
+  <title>Redirecting&hellip;</title>
+  <link rel="canonical" href="https://accumulo.apache.org/1.9/examples/shard.html">
+  <meta http-equiv="refresh" content="0; url=https://accumulo.apache.org/1.9/examples/shard.html">
+  <meta name="robots" content="noindex">
+  <h1>Redirecting&hellip;</h1>
+  <a href="https://accumulo.apache.org/1.9/examples/shard.html">Click here if you are not redirected.</a>
+  <script>location="https://accumulo.apache.org/1.9/examples/shard.html"</script>
+</html>
diff --git a/1.8/examples/shard.md b/1.8/examples/shard.md
deleted file mode 100644
index d08658a..0000000
--- a/1.8/examples/shard.md
+++ /dev/null
@@ -1,67 +0,0 @@
-Title: Apache Accumulo Shard Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-Accumulo has an iterator called the intersecting iterator which supports querying a term index that is partitioned by
-document, or "sharded". This example shows how to use the intersecting iterator through these four programs:
-
- * Index.java - Indexes a set of text files into an Accumulo table
- * Query.java - Finds documents containing a given set of terms.
- * Reverse.java - Reads the index table and writes a map of documents to terms into another table.
- * ContinuousQuery.java  Uses the table populated by Reverse.java to select N random terms per document. Then it continuously and randomly queries those terms.
-
-To run these example programs, create two tables like below.
-
-    username@instance> createtable shard
-    username@instance shard> createtable doc2term
-
-After creating the tables, index some files. The following command indexes all of the java files in the Accumulo source code.
-
-    $ cd /local/username/workspace/accumulo/
-    $ find core/src server/src -name "*.java" | xargs ./bin/accumulo org.apache.accumulo.examples.simple.shard.Index -i instance -z zookeepers -t shard -u username -p password --partitions 30
-
-The following command queries the index to find all files containing 'foo' and 'bar'.
-
-    $ cd $ACCUMULO_HOME
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.shard.Query -i instance -z zookeepers -t shard -u username -p password foo bar
-    /local/username/workspace/accumulo/src/core/src/test/java/accumulo/core/security/ColumnVisibilityTest.java
-    /local/username/workspace/accumulo/src/core/src/test/java/accumulo/core/client/mock/MockConnectorTest.java
-    /local/username/workspace/accumulo/src/core/src/test/java/accumulo/core/security/VisibilityEvaluatorTest.java
-    /local/username/workspace/accumulo/src/server/src/main/java/accumulo/test/functional/RowDeleteTest.java
-    /local/username/workspace/accumulo/src/server/src/test/java/accumulo/server/logger/TestLogWriter.java
-    /local/username/workspace/accumulo/src/server/src/main/java/accumulo/test/functional/DeleteEverythingTest.java
-    /local/username/workspace/accumulo/src/core/src/test/java/accumulo/core/data/KeyExtentTest.java
-    /local/username/workspace/accumulo/src/server/src/test/java/accumulo/server/constraints/MetadataConstraintsTest.java
-    /local/username/workspace/accumulo/src/core/src/test/java/accumulo/core/iterators/WholeRowIteratorTest.java
-    /local/username/workspace/accumulo/src/server/src/test/java/accumulo/server/util/DefaultMapTest.java
-    /local/username/workspace/accumulo/src/server/src/test/java/accumulo/server/tabletserver/InMemoryMapTest.java
-
-In order to run ContinuousQuery, we need to run Reverse.java to populate doc2term.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.shard.Reverse -i instance -z zookeepers --shardTable shard --doc2Term doc2term -u username -p password
-
-Below ContinuousQuery is run using 5 terms. So it selects 5 random terms from each document, then it continually
-randomly selects one set of 5 terms and queries. It prints the number of matching documents and the time in seconds.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.shard.ContinuousQuery -i instance -z zookeepers --shardTable shard --doc2Term doc2term -u username -p password --terms 5
-    [public, core, class, binarycomparable, b] 2  0.081
-    [wordtodelete, unindexdocument, doctablename, putdelete, insert] 1  0.041
-    [import, columnvisibilityinterpreterfactory, illegalstateexception, cv, columnvisibility] 1  0.049
-    [getpackage, testversion, util, version, 55] 1  0.048
-    [for, static, println, public, the] 55  0.211
-    [sleeptime, wrappingiterator, options, long, utilwaitthread] 1  0.057
-    [string, public, long, 0, wait] 12  0.132
diff --git a/1.8/examples/tabletofile.html b/1.8/examples/tabletofile.html
new file mode 100644
index 0000000..072b993
--- /dev/null
+++ b/1.8/examples/tabletofile.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html>
+<html lang="en-US">
+  <meta charset="utf-8">
+  <title>Redirecting&hellip;</title>
+  <link rel="canonical" href="https://accumulo.apache.org/1.9/examples/tabletofile.html">
+  <meta http-equiv="refresh" content="0; url=https://accumulo.apache.org/1.9/examples/tabletofile.html">
+  <meta name="robots" content="noindex">
+  <h1>Redirecting&hellip;</h1>
+  <a href="https://accumulo.apache.org/1.9/examples/tabletofile.html">Click here if you are not redirected.</a>
+  <script>location="https://accumulo.apache.org/1.9/examples/tabletofile.html"</script>
+</html>
diff --git a/1.8/examples/tabletofile.md b/1.8/examples/tabletofile.md
deleted file mode 100644
index 08b7cc9..0000000
--- a/1.8/examples/tabletofile.md
+++ /dev/null
@@ -1,59 +0,0 @@
-Title: Apache Accumulo Table-to-File Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This example uses mapreduce to extract specified columns from an existing table.
-
-To run this example you will need some data in a table. The following will
-put a trivial amount of data into accumulo using the accumulo shell:
-
-    $ ./bin/accumulo shell -u username -p password
-    Shell - Apache Accumulo Interactive Shell
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> createtable input
-    username@instance> insert dog cf cq dogvalue
-    username@instance> insert cat cf cq catvalue
-    username@instance> insert junk family qualifier junkvalue
-    username@instance> quit
-
-The TableToFile class configures a map-only job to read the specified columns and
-write the key/value pairs to a file in HDFS.
-
-The following will extract the rows containing the column "cf:cq":
-
-    $ bin/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.mapreduce.TableToFile -u user -p passwd -i instance -t input --columns cf:cq --output /tmp/output
-
-    $ hadoop fs -ls /tmp/output
-    -rw-r--r--   1 username supergroup          0 2013-01-10 14:44 /tmp/output/_SUCCESS
-    drwxr-xr-x   - username supergroup          0 2013-01-10 14:44 /tmp/output/_logs
-    drwxr-xr-x   - username supergroup          0 2013-01-10 14:44 /tmp/output/_logs/history
-    -rw-r--r--   1 username supergroup       9049 2013-01-10 14:44 /tmp/output/_logs/history/job_201301081658_0011_1357847072863_username_TableToFile%5F1357847071434
-    -rw-r--r--   1 username supergroup      26172 2013-01-10 14:44 /tmp/output/_logs/history/job_201301081658_0011_conf.xml
-    -rw-r--r--   1 username supergroup         50 2013-01-10 14:44 /tmp/output/part-m-00000
-
-We can see the output of our little map-reduce job:
-
-    $ hadoop fs -text /tmp/output/output/part-m-00000
-    catrow cf:cq []	catvalue
-    dogrow cf:cq []	dogvalue
-    $
-
diff --git a/1.8/examples/terasort.html b/1.8/examples/terasort.html
new file mode 100644
index 0000000..dc2a76c
--- /dev/null
+++ b/1.8/examples/terasort.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html>
+<html lang="en-US">
+  <meta charset="utf-8">
+  <title>Redirecting&hellip;</title>
+  <link rel="canonical" href="https://accumulo.apache.org/1.9/examples/terasort.html">
+  <meta http-equiv="refresh" content="0; url=https://accumulo.apache.org/1.9/examples/terasort.html">
+  <meta name="robots" content="noindex">
+  <h1>Redirecting&hellip;</h1>
+  <a href="https://accumulo.apache.org/1.9/examples/terasort.html">Click here if you are not redirected.</a>
+  <script>location="https://accumulo.apache.org/1.9/examples/terasort.html"</script>
+</html>
diff --git a/1.8/examples/terasort.md b/1.8/examples/terasort.md
deleted file mode 100644
index 409c1d1..0000000
--- a/1.8/examples/terasort.md
+++ /dev/null
@@ -1,50 +0,0 @@
-Title: Apache Accumulo Terasort Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This example uses map/reduce to generate random input data that will
-be sorted by storing it into accumulo. It uses data very similar to the
-hadoop terasort benchmark.
-
-To run this example you run it with arguments describing the amount of data:
-
-    $ bin/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.mapreduce.TeraSortIngest \
-    -i instance -z zookeepers -u user -p password \
-    --count 10 \
-    --minKeySize 10 \
-    --maxKeySize 10 \
-    --minValueSize 78 \
-    --maxValueSize 78 \
-    --table sort \
-    --splits 10 \
-
-After the map reduce job completes, scan the data:
-
-    $ ./bin/accumulo shell -u username -p password
-    username@instance> scan -t sort
-    +l-$$OE/ZH c:         4 []    GGGGGGGGGGWWWWWWWWWWMMMMMMMMMMCCCCCCCCCCSSSSSSSSSSIIIIIIIIIIYYYYYYYYYYOOOOOOOO
-    ,C)wDw//u= c:        10 []    CCCCCCCCCCSSSSSSSSSSIIIIIIIIIIYYYYYYYYYYOOOOOOOOOOEEEEEEEEEEUUUUUUUUUUKKKKKKKK
-    75@~?'WdUF c:         1 []    IIIIIIIIIIYYYYYYYYYYOOOOOOOOOOEEEEEEEEEEUUUUUUUUUUKKKKKKKKKKAAAAAAAAAAQQQQQQQQ
-    ;L+!2rT~hd c:         8 []    MMMMMMMMMMCCCCCCCCCCSSSSSSSSSSIIIIIIIIIIYYYYYYYYYYOOOOOOOOOOEEEEEEEEEEUUUUUUUU
-    LsS8)|.ZLD c:         5 []    OOOOOOOOOOEEEEEEEEEEUUUUUUUUUUKKKKKKKKKKAAAAAAAAAAQQQQQQQQQQGGGGGGGGGGWWWWWWWW
-    M^*dDE;6^< c:         9 []    UUUUUUUUUUKKKKKKKKKKAAAAAAAAAAQQQQQQQQQQGGGGGGGGGGWWWWWWWWWWMMMMMMMMMMCCCCCCCC
-    ^Eu)<n#kdP c:         3 []    YYYYYYYYYYOOOOOOOOOOEEEEEEEEEEUUUUUUUUUUKKKKKKKKKKAAAAAAAAAAQQQQQQQQQQGGGGGGGG
-    le5awB.$sm c:         6 []    WWWWWWWWWWMMMMMMMMMMCCCCCCCCCCSSSSSSSSSSIIIIIIIIIIYYYYYYYYYYOOOOOOOOOOEEEEEEEE
-    q__[fwhKFg c:         7 []    EEEEEEEEEEUUUUUUUUUUKKKKKKKKKKAAAAAAAAAAQQQQQQQQQQGGGGGGGGGGWWWWWWWWWWMMMMMMMM
-    w[o||:N&H, c:         2 []    QQQQQQQQQQGGGGGGGGGGWWWWWWWWWWMMMMMMMMMMCCCCCCCCCCSSSSSSSSSSIIIIIIIIIIYYYYYYYY
-
-Of course, a real benchmark would ingest millions of entries.
diff --git a/1.8/examples/visibility.html b/1.8/examples/visibility.html
new file mode 100644
index 0000000..745c6e9
--- /dev/null
+++ b/1.8/examples/visibility.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html>
+<html lang="en-US">
+  <meta charset="utf-8">
+  <title>Redirecting&hellip;</title>
+  <link rel="canonical" href="https://accumulo.apache.org/1.9/examples/visibility.html">
+  <meta http-equiv="refresh" content="0; url=https://accumulo.apache.org/1.9/examples/visibility.html">
+  <meta name="robots" content="noindex">
+  <h1>Redirecting&hellip;</h1>
+  <a href="https://accumulo.apache.org/1.9/examples/visibility.html">Click here if you are not redirected.</a>
+  <script>location="https://accumulo.apache.org/1.9/examples/visibility.html"</script>
+</html>
diff --git a/1.8/examples/visibility.md b/1.8/examples/visibility.md
deleted file mode 100644
index b766dba..0000000
--- a/1.8/examples/visibility.md
+++ /dev/null
@@ -1,131 +0,0 @@
-Title: Apache Accumulo Visibility, Authorizations, and Permissions Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-## Creating a new user
-
-    root@instance> createuser username
-    Enter new password for 'username': ********
-    Please confirm new password for 'username': ********
-    root@instance> user username
-    Enter password for user username: ********
-    username@instance> createtable vistest
-    06 10:48:47,931 [shell.Shell] ERROR: org.apache.accumulo.core.client.AccumuloSecurityException: Error PERMISSION_DENIED - User does not have permission to perform this action
-    username@instance> userpermissions
-    System permissions:
-
-    Table permissions (accumulo.metadata): Table.READ
-    username@instance>
-
-A user does not by default have permission to create a table.
-
-## Granting permissions to a user
-
-    username@instance> user root
-    Enter password for user root: ********
-    root@instance> grant -s System.CREATE_TABLE -u username
-    root@instance> user username
-    Enter password for user username: ********
-    username@instance> createtable vistest
-    username@instance> userpermissions
-    System permissions: System.CREATE_TABLE
-
-    Table permissions (accumulo.metadata): Table.READ
-    Table permissions (vistest): Table.READ, Table.WRITE, Table.BULK_IMPORT, Table.ALTER_TABLE, Table.GRANT, Table.DROP_TABLE
-    username@instance vistest>
-
-## Inserting data with visibilities
-
-Visibilities are boolean AND (&) and OR (|) combinations of authorization
-tokens. Authorization tokens are arbitrary strings taken from a restricted
-ASCII character set. Parentheses are required to specify order of operations
-in visibilities.
-
-    username@instance vistest> insert row f1 q1 v1 -l A
-    username@instance vistest> insert row f2 q2 v2 -l A&B
-    username@instance vistest> insert row f3 q3 v3 -l apple&carrot|broccoli|spinach
-    06 11:19:01,432 [shell.Shell] ERROR: org.apache.accumulo.core.util.BadArgumentException: cannot mix | and & near index 12
-    apple&carrot|broccoli|spinach
-                ^
-    username@instance vistest> insert row f3 q3 v3 -l (apple&carrot)|broccoli|spinach
-    username@instance vistest>
-
-## Scanning with authorizations
-
-Authorizations are sets of authorization tokens. Each Accumulo user has
-authorizations and each Accumulo scan has authorizations. Scan authorizations
-are only allowed to be a subset of the user's authorizations. By default, a
-user's authorizations set is empty.
-
-    username@instance vistest> scan
-    username@instance vistest> scan -s A
-    06 11:43:14,951 [shell.Shell] ERROR: java.lang.RuntimeException: org.apache.accumulo.core.client.AccumuloSecurityException: Error BAD_AUTHORIZATIONS - The user does not have the specified authorizations assigned
-    username@instance vistest>
-
-## Setting authorizations for a user
-
-    username@instance vistest> setauths -s A
-    06 11:53:42,056 [shell.Shell] ERROR: org.apache.accumulo.core.client.AccumuloSecurityException: Error PERMISSION_DENIED - User does not have permission to perform this action
-    username@instance vistest>
-
-A user cannot set authorizations unless the user has the System.ALTER_USER permission.
-The root user has this permission.
-
-    username@instance vistest> user root
-    Enter password for user root: ********
-    root@instance vistest> setauths -s A -u username
-    root@instance vistest> user username
-    Enter password for user username: ********
-    username@instance vistest> scan -s A
-    row f1:q1 [A]    v1
-    username@instance vistest> scan
-    row f1:q1 [A]    v1
-    username@instance vistest>
-
-The default authorizations for a scan are the user's entire set of authorizations.
-
-    username@instance vistest> user root
-    Enter password for user root: ********
-    root@instance vistest> setauths -s A,B,broccoli -u username
-    root@instance vistest> user username
-    Enter password for user username: ********
-    username@instance vistest> scan
-    row f1:q1 [A]    v1
-    row f2:q2 [A&B]    v2
-    row f3:q3 [(apple&carrot)|broccoli|spinach]    v3
-    username@instance vistest> scan -s B
-    username@instance vistest>
-
-If you want, you can limit a user to only be able to insert data which they can read themselves.
-It can be set with the following constraint.
-
-    username@instance vistest> user root
-    Enter password for user root: ******
-    root@instance vistest> config -t vistest -s table.constraint.1=org.apache.accumulo.core.security.VisibilityConstraint
-    root@instance vistest> user username
-    Enter password for user username: ********
-    username@instance vistest> insert row f4 q4 v4 -l spinach
-        Constraint Failures:
-            ConstraintViolationSummary(constrainClass:org.apache.accumulo.core.security.VisibilityConstraint, violationCode:2, violationDescription:User does not have authorization on column visibility, numberOfViolatingMutations:1)
-    username@instance vistest> insert row f4 q4 v4 -l spinach|broccoli
-    username@instance vistest> scan
-    row f1:q1 [A]    v1
-    row f2:q2 [A&B]    v2
-    row f3:q3 [(apple&carrot)|broccoli|spinach]    v3
-    row f4:q4 [spinach|broccoli]    v4
-    username@instance vistest>
-
diff --git a/1.8/examples/index.html b/1.9/examples/batch.html
similarity index 66%
copy from 1.8/examples/index.html
copy to 1.9/examples/batch.html
index 3af64b6..3d0707c 100644
--- a/1.8/examples/index.html
+++ b/1.9/examples/batch.html
@@ -25,7 +25,7 @@
 <link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/v/bs/jq-2.2.3/dt-1.10.12/datatables.min.css">
 <link href="/css/accumulo.css" rel="stylesheet" type="text/css">
 
-<title>Apache Accumulo Examples</title>
+<title>Apache Accumulo Batch Writing and Scanning Example</title>
 
 <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.2.4/jquery.min.js" integrity="sha256-BbhdlvQf/xTY9gja0Dq3HiwQF8LaCRTXxZKRutelT44=" crossorigin="anonymous"></script>
 <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
@@ -136,90 +136,49 @@
         </div>
         <div id="content">
           
-          <h1 class="title">Apache Accumulo Examples</h1>
+          <h1 class="title">Apache Accumulo Batch Writing and Scanning Example</h1>
           
-          <p>Before running any of the examples, the following steps must be performed.</p>
+          <p>This tutorial uses the following Java classes, which can be found in org.apache.accumulo.examples.simple.client in the examples-simple module:</p>
 
-<ol>
-  <li>
-    <p>Install and run Accumulo via the instructions found in $ACCUMULO_HOME/README.
-Remember the instance name. It will be referred to as “instance” throughout
-the examples. A comma-separated list of zookeeper servers will be referred
-to as “zookeepers”.</p>
-  </li>
-  <li>
-    <p>Create an Accumulo user (see the <a href="../accumulo_user_manual#_user_administration">user manual</a>), or use the root user.
-The “username” Accumulo user name with password “password” is used
-throughout the examples. This user needs the ability to create tables.</p>
-  </li>
-</ol>
+<ul>
+  <li>SequentialBatchWriter.java - writes mutations with sequential rows and random values</li>
+  <li>RandomBatchWriter.java - used by SequentialBatchWriter to generate random values</li>
+  <li>RandomBatchScanner.java - reads random rows and verifies their values</li>
+</ul>
 
-<p>In all commands, you will need to replace “instance”, “zookeepers”,
-“username”, and “password” with the values you set for your Accumulo instance.</p>
+<p>This is an example of how to use the batch writer and batch scanner. To compile
+the example, run maven and copy the produced jar into the accumulo lib dir.
+This is already done in the tar distribution.</p>
 
-<p>Commands intended to be run in bash are prefixed by ‘$’. These are always
-assumed to be run from the $ACCUMULO_HOME directory.</p>
+<p>Below are commands that add 10000 entries to accumulo and then do 100 random
+queries. The write command generates random 50 byte values.</p>
 
-<p>Commands intended to be run in the Accumulo shell are prefixed by ‘&gt;’.</p>
+<p>Be sure to use the name of your instance (given as instance here) and the appropriate
+list of zookeeper nodes (given as zookeepers here).</p>
 
-<p>Each README in the examples directory highlights the use of particular
-features of Apache Accumulo.</p>
+<p>Before you run this, you must ensure that the user you are running has the
+“exampleVis” authorization. (you can set this in the shell with “setauths -u username -s exampleVis”)</p>
 
-<p><a href="batch">batch</a>:       Using the batch writer and batch scanner.</p>
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo shell -u root -e "setauths -u username -s exampleVis"
+</code></pre></div></div>
 
-<p><a href="bloom">bloom</a>:       Creating a bloom filter enabled table to increase query
-                       performance.</p>
+<p>You must also create the table, batchtest1, ahead of time. (In the shell, use “createtable batchtest1”)</p>
 
-<p><a href="bulkIngest">bulkIngest</a>:  Ingesting bulk data using map/reduce jobs on Hadoop.</p>
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo shell -u username -e "createtable batchtest1"
+$ ./bin/accumulo org.apache.accumulo.examples.simple.client.SequentialBatchWriter -i instance -z zookeepers -u username -p password -t batchtest1 --start 0 --num 10000 --size 50 --batchMemory 20M --batchLatency 500 --batchThreads 20 --vis exampleVis
+$ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner -i instance -z zookeepers -u username -p password -t batchtest1 --num 100 --min 0 --max 10000 --size 50 --scanThreads 20 --auths exampleVis
+07 11:33:11,103 [client.CountingVerifyingReceiver] INFO : Generating 100 random queries...
+07 11:33:11,112 [client.CountingVerifyingReceiver] INFO : finished
+07 11:33:11,260 [client.CountingVerifyingReceiver] INFO : 694.44 lookups/sec   0.14 secs
 
-<p><a href="classpath">classpath</a>:   Using per-table classpaths.</p>
+07 11:33:11,260 [client.CountingVerifyingReceiver] INFO : num results : 100
 
-<p><a href="client">client</a>:      Using table operations, reading and writing data in Java.</p>
-
-<p><a href="combiner">combiner</a>:    Using example StatsCombiner to find min, max, sum, and
-                       count.</p>
-
-<p><a href="constraints">constraints</a>: Using constraints with tables.</p>
-
-<p><a href="dirlist">dirlist</a>:     Storing filesystem information.</p>
-
-<p><a href="export">export</a>:      Exporting and importing tables.</p>
-
-<p><a href="filedata">filedata</a>:    Storing file data.</p>
-
-<p><a href="filter">filter</a>:      Using the AgeOffFilter to remove records more than 30
-                       seconds old.</p>
-
-<p><a href="helloworld">helloworld</a>:  Inserting records both inside map/reduce jobs and
-                       outside. And reading records between two rows.</p>
-
-<p><a href="isolation">isolation</a>:   Using the isolated scanner to ensure partial changes
-                       are not seen.</p>
-
-<p><a href="mapred">mapred</a>:      Using MapReduce to read from and write to Accumulo
-                       tables.</p>
-
-<p><a href="maxmutation">maxmutation</a>: Limiting mutation size to avoid running out of memory.</p>
-
-<p><a href="regex">regex</a>:       Using MapReduce and Accumulo to find data using regular
-                       expressions.</p>
-
-<p><a href="rowhash">rowhash</a>:     Using MapReduce to read a table and write to a new
-                       column in the same table.</p>
-
-<p><a href="sample">sample</a>:      Building and using sample data in Accumulo.</p>
-
-<p><a href="shard">shard</a>:       Using the intersecting iterator with a term index
-                       partitioned by document.</p>
-
-<p><a href="tabletofile">tabletofile</a>: Using MapReduce to read a table and write one of its
-                       columns to a file in HDFS.</p>
-
-<p><a href="terasort">terasort</a>:    Generating random data and sorting it using Accumulo.</p>
-
-<p><a href="visibility">visibility</a> :  Using visibilities (or combinations of authorizations).
-                       Also shows user permissions.</p>
+07 11:33:11,364 [client.CountingVerifyingReceiver] INFO : Generating 100 random queries...
+07 11:33:11,370 [client.CountingVerifyingReceiver] INFO : finished
+07 11:33:11,416 [client.CountingVerifyingReceiver] INFO : 2173.91 lookups/sec   0.05 secs
 
+07 11:33:11,416 [client.CountingVerifyingReceiver] INFO : num results : 100
+</code></pre></div></div>
 
         </div>
 
diff --git a/1.9/examples/batch.md b/1.9/examples/batch.md
deleted file mode 100644
index 463481b..0000000
--- a/1.9/examples/batch.md
+++ /dev/null
@@ -1,55 +0,0 @@
-Title: Apache Accumulo Batch Writing and Scanning Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This tutorial uses the following Java classes, which can be found in org.apache.accumulo.examples.simple.client in the examples-simple module:
-
- * SequentialBatchWriter.java - writes mutations with sequential rows and random values
- * RandomBatchWriter.java - used by SequentialBatchWriter to generate random values
- * RandomBatchScanner.java - reads random rows and verifies their values
-
-This is an example of how to use the batch writer and batch scanner. To compile
-the example, run maven and copy the produced jar into the accumulo lib dir.
-This is already done in the tar distribution.
-
-Below are commands that add 10000 entries to accumulo and then do 100 random
-queries. The write command generates random 50 byte values.
-
-Be sure to use the name of your instance (given as instance here) and the appropriate
-list of zookeeper nodes (given as zookeepers here).
-
-Before you run this, you must ensure that the user you are running has the
-"exampleVis" authorization. (you can set this in the shell with "setauths -u username -s exampleVis")
-
-    $ ./bin/accumulo shell -u root -e "setauths -u username -s exampleVis"
-
-You must also create the table, batchtest1, ahead of time. (In the shell, use "createtable batchtest1")
-
-    $ ./bin/accumulo shell -u username -e "createtable batchtest1"
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.SequentialBatchWriter -i instance -z zookeepers -u username -p password -t batchtest1 --start 0 --num 10000 --size 50 --batchMemory 20M --batchLatency 500 --batchThreads 20 --vis exampleVis
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner -i instance -z zookeepers -u username -p password -t batchtest1 --num 100 --min 0 --max 10000 --size 50 --scanThreads 20 --auths exampleVis
-    07 11:33:11,103 [client.CountingVerifyingReceiver] INFO : Generating 100 random queries...
-    07 11:33:11,112 [client.CountingVerifyingReceiver] INFO : finished
-    07 11:33:11,260 [client.CountingVerifyingReceiver] INFO : 694.44 lookups/sec   0.14 secs
-
-    07 11:33:11,260 [client.CountingVerifyingReceiver] INFO : num results : 100
-
-    07 11:33:11,364 [client.CountingVerifyingReceiver] INFO : Generating 100 random queries...
-    07 11:33:11,370 [client.CountingVerifyingReceiver] INFO : finished
-    07 11:33:11,416 [client.CountingVerifyingReceiver] INFO : 2173.91 lookups/sec   0.05 secs
-
-    07 11:33:11,416 [client.CountingVerifyingReceiver] INFO : num results : 100
diff --git a/1.9/examples/bloom.html b/1.9/examples/bloom.html
new file mode 100644
index 0000000..0c668cd
--- /dev/null
+++ b/1.9/examples/bloom.html
@@ -0,0 +1,378 @@
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+-->
+<meta charset="utf-8">
+<meta http-equiv="X-UA-Compatible" content="IE=edge">
+<meta name="viewport" content="width=device-width, initial-scale=1">
+<link href="https://maxcdn.bootstrapcdn.com/bootswatch/3.3.7/paper/bootstrap.min.css" rel="stylesheet" integrity="sha384-awusxf8AUojygHf2+joICySzB780jVvQaVCAt1clU3QsyAitLGul28Qxb2r1e5g+" crossorigin="anonymous">
+<link href="//netdna.bootstrapcdn.com/font-awesome/4.0.3/css/font-awesome.css" rel="stylesheet">
+<link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/v/bs/jq-2.2.3/dt-1.10.12/datatables.min.css">
+<link href="/css/accumulo.css" rel="stylesheet" type="text/css">
+
+<title>Apache Accumulo Bloom Filter Example</title>
+
+<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.2.4/jquery.min.js" integrity="sha256-BbhdlvQf/xTY9gja0Dq3HiwQF8LaCRTXxZKRutelT44=" crossorigin="anonymous"></script>
+<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
+<script type="text/javascript" src="https://cdn.datatables.net/v/bs/jq-2.2.3/dt-1.10.12/datatables.min.js"></script>
+<script>
+  // show location of canonical site if not currently on the canonical site
+  $(function() {
+    var host = window.location.host;
+    if (typeof host !== 'undefined' && host !== 'accumulo.apache.org') {
+      $('#non-canonical').show();
+    }
+  });
+
+  $(function() {
+    // decorate section headers with anchors
+    return $("h2, h3, h4, h5, h6").each(function(i, el) {
+      var $el, icon, id;
+      $el = $(el);
+      id = $el.attr('id');
+      icon = '<i class="fa fa-link"></i>';
+      if (id) {
+        return $el.append($("<a />").addClass("header-link").attr("href", "#" + id).html(icon));
+      }
+    });
+  });
+
+  // fix sidebar width in documentation
+  $(function() {
+    var $affixElement = $('div[data-spy="affix"]');
+    $affixElement.width($affixElement.parent().width());
+  });
+</script>
+
+</head>
+<body style="padding-top: 100px">
+
+  <nav class="navbar navbar-default navbar-fixed-top">
+  <div class="container">
+    <div class="navbar-header">
+      <button type="button" class="navbar-toggle" data-toggle="collapse" data-target="#navbar-items">
+        <span class="sr-only">Toggle navigation</span>
+        <span class="icon-bar"></span>
+        <span class="icon-bar"></span>
+        <span class="icon-bar"></span>
+      </button>
+      <a href="/"><img id="nav-logo" alt="Apache Accumulo" class="img-responsive" src="/images/accumulo-logo.png" width="200"
+        /></a>
+    </div>
+    <div class="collapse navbar-collapse" id="navbar-items">
+      <ul class="nav navbar-nav">
+        <li class="nav-link"><a href="/downloads">Download</a></li>
+        <li class="nav-link"><a href="/tour">Tour</a></li>
+        <li class="dropdown">
+          <a class="dropdown-toggle" data-toggle="dropdown" href="#">Releases<span class="caret"></span></a>
+          <ul class="dropdown-menu">
+            <li><a href="/release/accumulo-2.0.0-alpha-2/">2.0.0-alpha-2 (Preview Release)</a></li>
+            <li><a href="/release/accumulo-1.9.3/">1.9.3 (Latest)</a></li>
+            <li><a href="/release/">Archive</a></li>
+          </ul>
+        </li>
+        <li class="dropdown">
+          <a class="dropdown-toggle" data-toggle="dropdown" href="#">Documentation<span class="caret"></span></a>
+          <ul class="dropdown-menu">
+            <li><a href="/docs/2.x/getting-started/quickstart">User Manual (2.x)</a></li>
+            <li><a href="/quickstart-1.x">Quickstart (1.x)</a></li>
+            <li><a href="/1.9/accumulo_user_manual.html">User Manual (1.9)</a></li>
+            <li><a href="/1.9/apidocs">Javadocs (1.9)</a></li>
+            <li><a href="/external-docs">External Docs</a></li>
+            <li><a href="/docs-archive/">Archive</a></li>
+          </ul>
+        </li>
+        <li class="dropdown">
+          <a class="dropdown-toggle" data-toggle="dropdown" href="#">Community<span class="caret"></span></a>
+          <ul class="dropdown-menu">
+            <li><a href="/contact-us">Contact Us</a></li>
+            <li><a href="/how-to-contribute">How To Contribute</a></li>
+            <li><a href="/people">People</a></li>
+            <li><a href="/related-projects">Related Projects</a></li>
+          </ul>
+        </li>
+        <li class="nav-link"><a href="/search">Search</a></li>
+      </ul>
+      <ul class="nav navbar-nav navbar-right">
+        <li class="dropdown">
+          <a class="dropdown-toggle" data-toggle="dropdown" href="#"><img alt="Apache Software Foundation" src="https://www.apache.org/foundation/press/kit/feather.svg" width="15"/><span class="caret"></span></a>
+          <ul class="dropdown-menu">
+            <li><a href="https://www.apache.org">Apache Homepage <i class="fa fa-external-link"></i></a></li>
+            <li><a href="https://www.apache.org/licenses/">License <i class="fa fa-external-link"></i></a></li>
+            <li><a href="https://www.apache.org/foundation/sponsorship">Sponsorship <i class="fa fa-external-link"></i></a></li>
+            <li><a href="https://www.apache.org/security">Security <i class="fa fa-external-link"></i></a></li>
+            <li><a href="https://www.apache.org/foundation/thanks">Thanks <i class="fa fa-external-link"></i></a></li>
+            <li><a href="https://www.apache.org/foundation/policies/conduct">Code of Conduct <i class="fa fa-external-link"></i></a></li>
+            <li><a href="https://www.apache.org/events/current-event.html">Current Event <i class="fa fa-external-link"></i></a></li>
+          </ul>
+        </li>
+      </ul>
+    </div>
+  </div>
+</nav>
+
+
+  <div class="container">
+    <div class="row">
+      <div class="col-md-12">
+
+        <div id="non-canonical" style="display: none; background-color: #F0E68C; padding-left: 1em;">
+          Visit the official site at: <a href="https://accumulo.apache.org">https://accumulo.apache.org</a>
+        </div>
+        <div id="content">
+          
+          <h1 class="title">Apache Accumulo Bloom Filter Example</h1>
+          
+          <p>This example shows how to create a table with bloom filters enabled.  It also
+shows how bloom filters increase query performance when looking for values that
+do not exist in a table.</p>
+
+<p>Below table named bloom_test is created and bloom filters are enabled.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo shell -u username -p password
+Shell - Apache Accumulo Interactive Shell
+- version: 1.5.0
+- instance name: instance
+- instance id: 00000000-0000-0000-0000-000000000000
+-
+- type 'help' for a list of available commands
+-
+username@instance&gt; setauths -u username -s exampleVis
+username@instance&gt; createtable bloom_test
+username@instance bloom_test&gt; config -t bloom_test -s table.bloom.enabled=true
+username@instance bloom_test&gt; exit
+</code></pre></div></div>
+
+<p>Below 1 million random values are inserted into accumulo. The randomly
+generated rows range between 0 and 1 billion. The random number generator is
+initialized with the seed 7.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 7 -i instance -z zookeepers -u username -p password -t bloom_test --num 1000000 --min 0 --max 1000000000 --size 50 --batchMemory 2M --batchLatency 60s --batchThreads 3 --vis exampleVis
+</code></pre></div></div>
+
+<p>Below the table is flushed:</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test -w'
+05 10:40:06,069 [shell.Shell] INFO : Flush of table bloom_test completed.
+</code></pre></div></div>
+
+<p>After the flush completes, 500 random queries are done against the table. The
+same seed is used to generate the queries, therefore everything is found in the
+table.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner --seed 7 -i instance -z zookeepers -u username -p password -t bloom_test --num 500 --min 0 --max 1000000000 --size 50 --scanThreads 20 --auths exampleVis
+Generating 500 random queries...finished
+96.19 lookups/sec   5.20 secs
+num results : 500
+Generating 500 random queries...finished
+102.35 lookups/sec   4.89 secs
+num results : 500
+</code></pre></div></div>
+
+<p>Below another 500 queries are performed, using a different seed which results
+in nothing being found. In this case the lookups are much faster because of
+the bloom filters.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner --seed 8 -i instance -z zookeepers -u username -p password -t bloom_test --num 500 --min 0 --max 1000000000 --size 50 -batchThreads 20 -auths exampleVis
+Generating 500 random queries...finished
+2212.39 lookups/sec   0.23 secs
+num results : 0
+Did not find 500 rows
+Generating 500 random queries...finished
+4464.29 lookups/sec   0.11 secs
+num results : 0
+Did not find 500 rows
+</code></pre></div></div>
+
+<hr />
+
+<p>Bloom filters can also speed up lookups for entries that exist. In accumulo
+data is divided into tablets and each tablet has multiple map files. Every
+lookup in accumulo goes to a specific tablet where a lookup is done on each
+map file in the tablet. So if a tablet has three map files, lookup performance
+can be three times slower than a tablet with one map file. However if the map
+files contain unique sets of data, then bloom filters can help eliminate map
+files that do not contain the row being looked up. To illustrate this two
+identical tables were created using the following process. One table had bloom
+filters, the other did not. Also the major compaction ratio was increased to
+prevent the files from being compacted into one file.</p>
+
+<ul>
+  <li>Insert 1 million entries using  RandomBatchWriter with a seed of 7</li>
+  <li>Flush the table using the shell</li>
+  <li>Insert 1 million entries using  RandomBatchWriter with a seed of 8</li>
+  <li>Flush the table using the shell</li>
+  <li>Insert 1 million entries using  RandomBatchWriter with a seed of 9</li>
+  <li>Flush the table using the shell</li>
+</ul>
+
+<p>After following the above steps, each table will have a tablet with three map
+files. Flushing the table after each batch of inserts will create a map file.
+Each map file will contain 1 million entries generated with a different seed.
+This is assuming that Accumulo is configured with enough memory to hold 1
+million inserts. If not, then more map files will be created.</p>
+
+<p>The commands for creating the first table without bloom filters are below.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo shell -u username -p password
+Shell - Apache Accumulo Interactive Shell
+- version: 1.5.0
+- instance name: instance
+- instance id: 00000000-0000-0000-0000-000000000000
+-
+- type 'help' for a list of available commands
+-
+username@instance&gt; setauths -u username -s exampleVis
+username@instance&gt; createtable bloom_test1
+username@instance bloom_test1&gt; config -t bloom_test1 -s table.compaction.major.ratio=7
+username@instance bloom_test1&gt; exit
+
+$ ARGS="-i instance -z zookeepers -u username -p password -t bloom_test1 --num 1000000 --min 0 --max 1000000000 --size 50 --batchMemory 2M --batchLatency 60s --batchThreads 3 --vis exampleVis"
+$ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 7 $ARGS
+$ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test1 -w'
+$ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 8 $ARGS
+$ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test1 -w'
+$ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 9 $ARGS
+$ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test1 -w'
+</code></pre></div></div>
+
+<p>The commands for creating the second table with bloom filers are below.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo shell -u username -p password
+Shell - Apache Accumulo Interactive Shell
+- version: 1.5.0
+- instance name: instance
+- instance id: 00000000-0000-0000-0000-000000000000
+-
+- type 'help' for a list of available commands
+-
+username@instance&gt; setauths -u username -s exampleVis
+username@instance&gt; createtable bloom_test2
+username@instance bloom_test2&gt; config -t bloom_test2 -s table.compaction.major.ratio=7
+username@instance bloom_test2&gt; config -t bloom_test2 -s table.bloom.enabled=true
+username@instance bloom_test2&gt; exit
+
+$ ARGS="-i instance -z zookeepers -u username -p password -t bloom_test2 --num 1000000 --min 0 --max 1000000000 --size 50 --batchMemory 2M --batchLatency 60s --batchThreads 3 --vis exampleVis"
+$ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 7 $ARGS
+$ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test2 -w'
+$ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 8 $ARGS
+$ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test2 -w'
+$ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 9 $ARGS
+$ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test2 -w'
+</code></pre></div></div>
+
+<p>Below 500 lookups are done against the table without bloom filters using random
+NG seed 7. Even though only one map file will likely contain entries for this
+seed, all map files will be interrogated.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner --seed 7 -i instance -z zookeepers -u username -p password -t bloom_test1 --num 500 --min 0 --max 1000000000 --size 50 --scanThreads 20 --auths exampleVis
+Generating 500 random queries...finished
+35.09 lookups/sec  14.25 secs
+num results : 500
+Generating 500 random queries...finished
+35.33 lookups/sec  14.15 secs
+num results : 500
+</code></pre></div></div>
+
+<p>Below the same lookups are done against the table with bloom filters. The
+lookups were 2.86 times faster because only one map file was used, even though three
+map files existed.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner --seed 7 -i instance -z zookeepers -u username -p password -t bloom_test2 --num 500 --min 0 --max 1000000000 --size 50 -scanThreads 20 --auths exampleVis
+Generating 500 random queries...finished
+99.03 lookups/sec   5.05 secs
+num results : 500
+Generating 500 random queries...finished
+101.15 lookups/sec   4.94 secs
+num results : 500
+</code></pre></div></div>
+
+<p>You can verify the table has three files by looking in HDFS. To look in HDFS
+you will need the table ID, because this is used in HDFS instead of the table
+name. The following command will show table ids.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo shell -u username -p password -e 'tables -l'
+accumulo.metadata    =&gt;        !0
+accumulo.root        =&gt;        +r
+bloom_test1          =&gt;        o7
+bloom_test2          =&gt;        o8
+trace                =&gt;         1
+</code></pre></div></div>
+
+<p>So the table id for bloom_test2 is o8. The command below shows what files this
+table has in HDFS. This assumes Accumulo is at the default location in HDFS.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ hadoop fs -lsr /accumulo/tables/o8
+drwxr-xr-x   - username supergroup          0 2012-01-10 14:02 /accumulo/tables/o8/default_tablet
+-rw-r--r--   3 username supergroup   52672650 2012-01-10 14:01 /accumulo/tables/o8/default_tablet/F00000dj.rf
+-rw-r--r--   3 username supergroup   52436176 2012-01-10 14:01 /accumulo/tables/o8/default_tablet/F00000dk.rf
+-rw-r--r--   3 username supergroup   52850173 2012-01-10 14:02 /accumulo/tables/o8/default_tablet/F00000dl.rf
+</code></pre></div></div>
+
+<p>Running the rfile-info command shows that one of the files has a bloom filter
+and its 1.5MB.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo rfile-info /accumulo/tables/o8/default_tablet/F00000dj.rf
+Locality group         : &lt;DEFAULT&gt;
+Start block          : 0
+Num   blocks         : 752
+Index level 0        : 43,598 bytes  1 blocks
+First key            : row_0000001169 foo:1 [exampleVis] 1326222052539 false
+Last key             : row_0999999421 foo:1 [exampleVis] 1326222052058 false
+Num entries          : 999,536
+Column families      : [foo]
+
+Meta block     : BCFile.index
+  Raw size             : 4 bytes
+  Compressed size      : 12 bytes
+  Compression type     : gz
+
+Meta block     : RFile.index
+  Raw size             : 43,696 bytes
+  Compressed size      : 15,592 bytes
+  Compression type     : gz
+
+Meta block     : acu_bloom
+  Raw size             : 1,540,292 bytes
+  Compressed size      : 1,433,115 bytes
+  Compression type     : gz
+</code></pre></div></div>
+
+
+        </div>
+
+        
+<footer>
+
+  <p><a href="https://www.apache.org/foundation/contributing"><img src="https://www.apache.org/images/SupportApache-small.png" alt="Support the ASF" id="asf-logo" height="100" /></a></p>
+
+  <p>Copyright © 2011-2019 <a href="https://www.apache.org">The Apache Software Foundation</a>.
+Licensed under the <a href="https://www.apache.org/licenses/">Apache License, Version 2.0</a>.</p>
+
+  <p>Apache®, the names of Apache projects and their logos, and the multicolor feather
+logo are registered trademarks or trademarks of The Apache Software Foundation
+in the United States and/or other countries.</p>
+
+</footer>
+
+
+      </div>
+    </div>
+  </div>
+</body>
+</html>
diff --git a/1.9/examples/bloom.md b/1.9/examples/bloom.md
deleted file mode 100644
index 555f06d..0000000
--- a/1.9/examples/bloom.md
+++ /dev/null
@@ -1,219 +0,0 @@
-Title: Apache Accumulo Bloom Filter Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This example shows how to create a table with bloom filters enabled.  It also
-shows how bloom filters increase query performance when looking for values that
-do not exist in a table.
-
-Below table named bloom_test is created and bloom filters are enabled.
-
-    $ ./bin/accumulo shell -u username -p password
-    Shell - Apache Accumulo Interactive Shell
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> setauths -u username -s exampleVis
-    username@instance> createtable bloom_test
-    username@instance bloom_test> config -t bloom_test -s table.bloom.enabled=true
-    username@instance bloom_test> exit
-
-Below 1 million random values are inserted into accumulo. The randomly
-generated rows range between 0 and 1 billion. The random number generator is
-initialized with the seed 7.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 7 -i instance -z zookeepers -u username -p password -t bloom_test --num 1000000 --min 0 --max 1000000000 --size 50 --batchMemory 2M --batchLatency 60s --batchThreads 3 --vis exampleVis
-
-Below the table is flushed:
-
-    $ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test -w'
-    05 10:40:06,069 [shell.Shell] INFO : Flush of table bloom_test completed.
-
-After the flush completes, 500 random queries are done against the table. The
-same seed is used to generate the queries, therefore everything is found in the
-table.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner --seed 7 -i instance -z zookeepers -u username -p password -t bloom_test --num 500 --min 0 --max 1000000000 --size 50 --scanThreads 20 --auths exampleVis
-    Generating 500 random queries...finished
-    96.19 lookups/sec   5.20 secs
-    num results : 500
-    Generating 500 random queries...finished
-    102.35 lookups/sec   4.89 secs
-    num results : 500
-
-Below another 500 queries are performed, using a different seed which results
-in nothing being found. In this case the lookups are much faster because of
-the bloom filters.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner --seed 8 -i instance -z zookeepers -u username -p password -t bloom_test --num 500 --min 0 --max 1000000000 --size 50 -batchThreads 20 -auths exampleVis
-    Generating 500 random queries...finished
-    2212.39 lookups/sec   0.23 secs
-    num results : 0
-    Did not find 500 rows
-    Generating 500 random queries...finished
-    4464.29 lookups/sec   0.11 secs
-    num results : 0
-    Did not find 500 rows
-
-********************************************************************************
-
-Bloom filters can also speed up lookups for entries that exist. In accumulo
-data is divided into tablets and each tablet has multiple map files. Every
-lookup in accumulo goes to a specific tablet where a lookup is done on each
-map file in the tablet. So if a tablet has three map files, lookup performance
-can be three times slower than a tablet with one map file. However if the map
-files contain unique sets of data, then bloom filters can help eliminate map
-files that do not contain the row being looked up. To illustrate this two
-identical tables were created using the following process. One table had bloom
-filters, the other did not. Also the major compaction ratio was increased to
-prevent the files from being compacted into one file.
-
- * Insert 1 million entries using  RandomBatchWriter with a seed of 7
- * Flush the table using the shell
- * Insert 1 million entries using  RandomBatchWriter with a seed of 8
- * Flush the table using the shell
- * Insert 1 million entries using  RandomBatchWriter with a seed of 9
- * Flush the table using the shell
-
-After following the above steps, each table will have a tablet with three map
-files. Flushing the table after each batch of inserts will create a map file.
-Each map file will contain 1 million entries generated with a different seed.
-This is assuming that Accumulo is configured with enough memory to hold 1
-million inserts. If not, then more map files will be created.
-
-The commands for creating the first table without bloom filters are below.
-
-    $ ./bin/accumulo shell -u username -p password
-    Shell - Apache Accumulo Interactive Shell
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> setauths -u username -s exampleVis
-    username@instance> createtable bloom_test1
-    username@instance bloom_test1> config -t bloom_test1 -s table.compaction.major.ratio=7
-    username@instance bloom_test1> exit
-
-    $ ARGS="-i instance -z zookeepers -u username -p password -t bloom_test1 --num 1000000 --min 0 --max 1000000000 --size 50 --batchMemory 2M --batchLatency 60s --batchThreads 3 --vis exampleVis"
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 7 $ARGS
-    $ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test1 -w'
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 8 $ARGS
-    $ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test1 -w'
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 9 $ARGS
-    $ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test1 -w'
-
-The commands for creating the second table with bloom filers are below.
-
-    $ ./bin/accumulo shell -u username -p password
-    Shell - Apache Accumulo Interactive Shell
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> setauths -u username -s exampleVis
-    username@instance> createtable bloom_test2
-    username@instance bloom_test2> config -t bloom_test2 -s table.compaction.major.ratio=7
-    username@instance bloom_test2> config -t bloom_test2 -s table.bloom.enabled=true
-    username@instance bloom_test2> exit
-
-    $ ARGS="-i instance -z zookeepers -u username -p password -t bloom_test2 --num 1000000 --min 0 --max 1000000000 --size 50 --batchMemory 2M --batchLatency 60s --batchThreads 3 --vis exampleVis"
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 7 $ARGS
-    $ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test2 -w'
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 8 $ARGS
-    $ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test2 -w'
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchWriter --seed 9 $ARGS
-    $ ./bin/accumulo shell -u username -p password -e 'flush -t bloom_test2 -w'
-
-Below 500 lookups are done against the table without bloom filters using random
-NG seed 7. Even though only one map file will likely contain entries for this
-seed, all map files will be interrogated.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner --seed 7 -i instance -z zookeepers -u username -p password -t bloom_test1 --num 500 --min 0 --max 1000000000 --size 50 --scanThreads 20 --auths exampleVis
-    Generating 500 random queries...finished
-    35.09 lookups/sec  14.25 secs
-    num results : 500
-    Generating 500 random queries...finished
-    35.33 lookups/sec  14.15 secs
-    num results : 500
-
-Below the same lookups are done against the table with bloom filters. The
-lookups were 2.86 times faster because only one map file was used, even though three
-map files existed.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.client.RandomBatchScanner --seed 7 -i instance -z zookeepers -u username -p password -t bloom_test2 --num 500 --min 0 --max 1000000000 --size 50 -scanThreads 20 --auths exampleVis
-    Generating 500 random queries...finished
-    99.03 lookups/sec   5.05 secs
-    num results : 500
-    Generating 500 random queries...finished
-    101.15 lookups/sec   4.94 secs
-    num results : 500
-
-You can verify the table has three files by looking in HDFS. To look in HDFS
-you will need the table ID, because this is used in HDFS instead of the table
-name. The following command will show table ids.
-
-    $ ./bin/accumulo shell -u username -p password -e 'tables -l'
-    accumulo.metadata    =>        !0
-    accumulo.root        =>        +r
-    bloom_test1          =>        o7
-    bloom_test2          =>        o8
-    trace                =>         1
-
-So the table id for bloom_test2 is o8. The command below shows what files this
-table has in HDFS. This assumes Accumulo is at the default location in HDFS.
-
-    $ hadoop fs -lsr /accumulo/tables/o8
-    drwxr-xr-x   - username supergroup          0 2012-01-10 14:02 /accumulo/tables/o8/default_tablet
-    -rw-r--r--   3 username supergroup   52672650 2012-01-10 14:01 /accumulo/tables/o8/default_tablet/F00000dj.rf
-    -rw-r--r--   3 username supergroup   52436176 2012-01-10 14:01 /accumulo/tables/o8/default_tablet/F00000dk.rf
-    -rw-r--r--   3 username supergroup   52850173 2012-01-10 14:02 /accumulo/tables/o8/default_tablet/F00000dl.rf
-
-Running the rfile-info command shows that one of the files has a bloom filter
-and its 1.5MB.
-
-    $ ./bin/accumulo rfile-info /accumulo/tables/o8/default_tablet/F00000dj.rf
-    Locality group         : <DEFAULT>
-	Start block          : 0
-	Num   blocks         : 752
-	Index level 0        : 43,598 bytes  1 blocks
-	First key            : row_0000001169 foo:1 [exampleVis] 1326222052539 false
-	Last key             : row_0999999421 foo:1 [exampleVis] 1326222052058 false
-	Num entries          : 999,536
-	Column families      : [foo]
-
-    Meta block     : BCFile.index
-      Raw size             : 4 bytes
-      Compressed size      : 12 bytes
-      Compression type     : gz
-
-    Meta block     : RFile.index
-      Raw size             : 43,696 bytes
-      Compressed size      : 15,592 bytes
-      Compression type     : gz
-
-    Meta block     : acu_bloom
-      Raw size             : 1,540,292 bytes
-      Compressed size      : 1,433,115 bytes
-      Compression type     : gz
-
diff --git a/1.8/examples/index.html b/1.9/examples/bulkIngest.html
similarity index 66%
copy from 1.8/examples/index.html
copy to 1.9/examples/bulkIngest.html
index 3af64b6..ece2de7 100644
--- a/1.8/examples/index.html
+++ b/1.9/examples/bulkIngest.html
@@ -25,7 +25,7 @@
 <link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/v/bs/jq-2.2.3/dt-1.10.12/datatables.min.css">
 <link href="/css/accumulo.css" rel="stylesheet" type="text/css">
 
-<title>Apache Accumulo Examples</title>
+<title>Apache Accumulo Bulk Ingest Example</title>
 
 <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.2.4/jquery.min.js" integrity="sha256-BbhdlvQf/xTY9gja0Dq3HiwQF8LaCRTXxZKRutelT44=" crossorigin="anonymous"></script>
 <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
@@ -136,90 +136,24 @@
         </div>
         <div id="content">
           
-          <h1 class="title">Apache Accumulo Examples</h1>
+          <h1 class="title">Apache Accumulo Bulk Ingest Example</h1>
           
-          <p>Before running any of the examples, the following steps must be performed.</p>
-
-<ol>
-  <li>
-    <p>Install and run Accumulo via the instructions found in $ACCUMULO_HOME/README.
-Remember the instance name. It will be referred to as “instance” throughout
-the examples. A comma-separated list of zookeeper servers will be referred
-to as “zookeepers”.</p>
-  </li>
-  <li>
-    <p>Create an Accumulo user (see the <a href="../accumulo_user_manual#_user_administration">user manual</a>), or use the root user.
-The “username” Accumulo user name with password “password” is used
-throughout the examples. This user needs the ability to create tables.</p>
-  </li>
-</ol>
-
-<p>In all commands, you will need to replace “instance”, “zookeepers”,
-“username”, and “password” with the values you set for your Accumulo instance.</p>
-
-<p>Commands intended to be run in bash are prefixed by ‘$’. These are always
-assumed to be run from the $ACCUMULO_HOME directory.</p>
-
-<p>Commands intended to be run in the Accumulo shell are prefixed by ‘&gt;’.</p>
-
-<p>Each README in the examples directory highlights the use of particular
-features of Apache Accumulo.</p>
-
-<p><a href="batch">batch</a>:       Using the batch writer and batch scanner.</p>
-
-<p><a href="bloom">bloom</a>:       Creating a bloom filter enabled table to increase query
-                       performance.</p>
-
-<p><a href="bulkIngest">bulkIngest</a>:  Ingesting bulk data using map/reduce jobs on Hadoop.</p>
-
-<p><a href="classpath">classpath</a>:   Using per-table classpaths.</p>
-
-<p><a href="client">client</a>:      Using table operations, reading and writing data in Java.</p>
-
-<p><a href="combiner">combiner</a>:    Using example StatsCombiner to find min, max, sum, and
-                       count.</p>
-
-<p><a href="constraints">constraints</a>: Using constraints with tables.</p>
-
-<p><a href="dirlist">dirlist</a>:     Storing filesystem information.</p>
-
-<p><a href="export">export</a>:      Exporting and importing tables.</p>
-
-<p><a href="filedata">filedata</a>:    Storing file data.</p>
-
-<p><a href="filter">filter</a>:      Using the AgeOffFilter to remove records more than 30
-                       seconds old.</p>
-
-<p><a href="helloworld">helloworld</a>:  Inserting records both inside map/reduce jobs and
-                       outside. And reading records between two rows.</p>
-
-<p><a href="isolation">isolation</a>:   Using the isolated scanner to ensure partial changes
-                       are not seen.</p>
-
-<p><a href="mapred">mapred</a>:      Using MapReduce to read from and write to Accumulo
-                       tables.</p>
-
-<p><a href="maxmutation">maxmutation</a>: Limiting mutation size to avoid running out of memory.</p>
-
-<p><a href="regex">regex</a>:       Using MapReduce and Accumulo to find data using regular
-                       expressions.</p>
-
-<p><a href="rowhash">rowhash</a>:     Using MapReduce to read a table and write to a new
-                       column in the same table.</p>
-
-<p><a href="sample">sample</a>:      Building and using sample data in Accumulo.</p>
-
-<p><a href="shard">shard</a>:       Using the intersecting iterator with a term index
-                       partitioned by document.</p>
-
-<p><a href="tabletofile">tabletofile</a>: Using MapReduce to read a table and write one of its
-                       columns to a file in HDFS.</p>
-
-<p><a href="terasort">terasort</a>:    Generating random data and sorting it using Accumulo.</p>
-
-<p><a href="visibility">visibility</a> :  Using visibilities (or combinations of authorizations).
-                       Also shows user permissions.</p>
-
+          <p>This is an example of how to bulk ingest data into accumulo using map reduce.</p>
+
+<p>The following commands show how to run this example. This example creates a
+table called test_bulk which has two initial split points. Then 1000 rows of
+test data are created in HDFS. After that the 1000 rows are ingested into
+accumulo. Then we verify the 1000 rows are in accumulo.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ PKG=org.apache.accumulo.examples.simple.mapreduce.bulk
+$ ARGS="-i instance -z zookeepers -u username -p password"
+$ ./bin/accumulo $PKG.SetupTable $ARGS -t test_bulk row_00000333 row_00000666
+$ ./bin/accumulo $PKG.GenerateTestData --start-row 0 --count 1000 --output bulk/test_1.txt
+$ ./bin/tool.sh lib/accumulo-examples-simple.jar $PKG.BulkIngestExample $ARGS -t test_bulk --inputDir bulk --workDir tmp/bulkWork
+$ ./bin/accumulo $PKG.VerifyIngest $ARGS -t test_bulk --start-row 0 --count 1000
+</code></pre></div></div>
+
+<p>For a high level discussion of bulk ingest, see the docs dir.</p>
 
         </div>
 
diff --git a/1.9/examples/bulkIngest.md b/1.9/examples/bulkIngest.md
deleted file mode 100644
index e07dc9b..0000000
--- a/1.9/examples/bulkIngest.md
+++ /dev/null
@@ -1,33 +0,0 @@
-Title: Apache Accumulo Bulk Ingest Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This is an example of how to bulk ingest data into accumulo using map reduce.
-
-The following commands show how to run this example. This example creates a
-table called test_bulk which has two initial split points. Then 1000 rows of
-test data are created in HDFS. After that the 1000 rows are ingested into
-accumulo. Then we verify the 1000 rows are in accumulo.
-
-    $ PKG=org.apache.accumulo.examples.simple.mapreduce.bulk
-    $ ARGS="-i instance -z zookeepers -u username -p password"
-    $ ./bin/accumulo $PKG.SetupTable $ARGS -t test_bulk row_00000333 row_00000666
-    $ ./bin/accumulo $PKG.GenerateTestData --start-row 0 --count 1000 --output bulk/test_1.txt
-    $ ./bin/tool.sh lib/accumulo-examples-simple.jar $PKG.BulkIngestExample $ARGS -t test_bulk --inputDir bulk --workDir tmp/bulkWork
-    $ ./bin/accumulo $PKG.VerifyIngest $ARGS -t test_bulk --start-row 0 --count 1000
-
-For a high level discussion of bulk ingest, see the docs dir.
diff --git a/1.8/examples/index.html b/1.9/examples/classpath.html
similarity index 66%
copy from 1.8/examples/index.html
copy to 1.9/examples/classpath.html
index 3af64b6..87d1295 100644
--- a/1.8/examples/index.html
+++ b/1.9/examples/classpath.html
@@ -25,7 +25,7 @@
 <link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/v/bs/jq-2.2.3/dt-1.10.12/datatables.min.css">
 <link href="/css/accumulo.css" rel="stylesheet" type="text/css">
 
-<title>Apache Accumulo Examples</title>
+<title>Apache Accumulo Classpath Example</title>
 
 <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.2.4/jquery.min.js" integrity="sha256-BbhdlvQf/xTY9gja0Dq3HiwQF8LaCRTXxZKRutelT44=" crossorigin="anonymous"></script>
 <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
@@ -136,89 +136,61 @@
         </div>
         <div id="content">
           
-          <h1 class="title">Apache Accumulo Examples</h1>
+          <h1 class="title">Apache Accumulo Classpath Example</h1>
           
-          <p>Before running any of the examples, the following steps must be performed.</p>
+          <p>This example shows how to use per table classpaths. The example leverages a
+test jar which contains a Filter that supresses rows containing “foo”. The
+example shows copying the FooFilter.jar into HDFS and then making an Accumulo
+table reference that jar.</p>
 
-<ol>
-  <li>
-    <p>Install and run Accumulo via the instructions found in $ACCUMULO_HOME/README.
-Remember the instance name. It will be referred to as “instance” throughout
-the examples. A comma-separated list of zookeeper servers will be referred
-to as “zookeepers”.</p>
-  </li>
-  <li>
-    <p>Create an Accumulo user (see the <a href="../accumulo_user_manual#_user_administration">user manual</a>), or use the root user.
-The “username” Accumulo user name with password “password” is used
-throughout the examples. This user needs the ability to create tables.</p>
-  </li>
-</ol>
+<p>Execute the following command in the shell.</p>
 
-<p>In all commands, you will need to replace “instance”, “zookeepers”,
-“username”, and “password” with the values you set for your Accumulo instance.</p>
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ hadoop fs -copyFromLocal $ACCUMULO_HOME/test/src/test/resources/FooFilter.jar /user1/lib
+</code></pre></div></div>
 
-<p>Commands intended to be run in bash are prefixed by ‘$’. These are always
-assumed to be run from the $ACCUMULO_HOME directory.</p>
+<p>Execute following in Accumulo shell to setup classpath context</p>
 
-<p>Commands intended to be run in the Accumulo shell are prefixed by ‘&gt;’.</p>
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>root@test15&gt; config -s general.vfs.context.classpath.cx1=hdfs://&lt;namenode host&gt;:&lt;namenode port&gt;/user1/lib/[^.].*.jar
+</code></pre></div></div>
 
-<p>Each README in the examples directory highlights the use of particular
-features of Apache Accumulo.</p>
+<p>Create a table</p>
 
-<p><a href="batch">batch</a>:       Using the batch writer and batch scanner.</p>
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>root@test15&gt; createtable nofoo
+</code></pre></div></div>
 
-<p><a href="bloom">bloom</a>:       Creating a bloom filter enabled table to increase query
-                       performance.</p>
+<p>The following command makes this table use the configured classpath context</p>
 
-<p><a href="bulkIngest">bulkIngest</a>:  Ingesting bulk data using map/reduce jobs on Hadoop.</p>
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>root@test15 nofoo&gt; config -t nofoo -s table.classpath.context=cx1
+</code></pre></div></div>
 
-<p><a href="classpath">classpath</a>:   Using per-table classpaths.</p>
+<p>The following command configures an iterator thats in FooFilter.jar</p>
 
-<p><a href="client">client</a>:      Using table operations, reading and writing data in Java.</p>
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>root@test15 nofoo&gt; setiter -n foofilter -p 10 -scan -minc -majc -class org.apache.accumulo.test.FooFilter
+Filter accepts or rejects each Key/Value pair
+----------&gt; set FooFilter parameter negate, default false keeps k/v that pass accept method, true rejects k/v that pass accept method: false
+</code></pre></div></div>
 
-<p><a href="combiner">combiner</a>:    Using example StatsCombiner to find min, max, sum, and
-                       count.</p>
+<p>The commands below show the filter is working.</p>
 
-<p><a href="constraints">constraints</a>: Using constraints with tables.</p>
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>root@test15 nofoo&gt; insert foo1 f1 q1 v1
+root@test15 nofoo&gt; insert noo1 f1 q1 v2
+root@test15 nofoo&gt; scan
+noo1 f1:q1 []    v2
+root@test15 nofoo&gt;
+</code></pre></div></div>
 
-<p><a href="dirlist">dirlist</a>:     Storing filesystem information.</p>
+<p>Below, an attempt is made to add the FooFilter to a table thats not configured
+to use the clasppath context cx1. This fails util the table is configured to
+use cx1.</p>
 
-<p><a href="export">export</a>:      Exporting and importing tables.</p>
-
-<p><a href="filedata">filedata</a>:    Storing file data.</p>
-
-<p><a href="filter">filter</a>:      Using the AgeOffFilter to remove records more than 30
-                       seconds old.</p>
-
-<p><a href="helloworld">helloworld</a>:  Inserting records both inside map/reduce jobs and
-                       outside. And reading records between two rows.</p>
-
-<p><a href="isolation">isolation</a>:   Using the isolated scanner to ensure partial changes
-                       are not seen.</p>
-
-<p><a href="mapred">mapred</a>:      Using MapReduce to read from and write to Accumulo
-                       tables.</p>
-
-<p><a href="maxmutation">maxmutation</a>: Limiting mutation size to avoid running out of memory.</p>
-
-<p><a href="regex">regex</a>:       Using MapReduce and Accumulo to find data using regular
-                       expressions.</p>
-
-<p><a href="rowhash">rowhash</a>:     Using MapReduce to read a table and write to a new
-                       column in the same table.</p>
-
-<p><a href="sample">sample</a>:      Building and using sample data in Accumulo.</p>
-
-<p><a href="shard">shard</a>:       Using the intersecting iterator with a term index
-                       partitioned by document.</p>
-
-<p><a href="tabletofile">tabletofile</a>: Using MapReduce to read a table and write one of its
-                       columns to a file in HDFS.</p>
-
-<p><a href="terasort">terasort</a>:    Generating random data and sorting it using Accumulo.</p>
-
-<p><a href="visibility">visibility</a> :  Using visibilities (or combinations of authorizations).
-                       Also shows user permissions.</p>
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>root@test15 nofoo&gt; createtable nofootwo
+root@test15 nofootwo&gt; setiter -n foofilter -p 10 -scan -minc -majc -class org.apache.accumulo.test.FooFilter
+2013-05-03 12:49:35,943 [shell.Shell] ERROR: java.lang.IllegalArgumentException: org.apache.accumulo.test.FooFilter
+root@test15 nofootwo&gt; config -t nofootwo -s table.classpath.context=cx1
+root@test15 nofootwo&gt; setiter -n foofilter -p 10 -scan -minc -majc -class org.apache.accumulo.test.FooFilter
+Filter accepts or rejects each Key/Value pair
+----------&gt; set FooFilter parameter negate, default false keeps k/v that pass accept method, true rejects k/v that pass accept method: false
+</code></pre></div></div>
 
 
         </div>
diff --git a/1.9/examples/classpath.md b/1.9/examples/classpath.md
deleted file mode 100644
index 710560f..0000000
--- a/1.9/examples/classpath.md
+++ /dev/null
@@ -1,68 +0,0 @@
-Title: Apache Accumulo Classpath Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-
-This example shows how to use per table classpaths. The example leverages a
-test jar which contains a Filter that supresses rows containing "foo". The
-example shows copying the FooFilter.jar into HDFS and then making an Accumulo
-table reference that jar.
-
-
-Execute the following command in the shell.
-
-    $ hadoop fs -copyFromLocal $ACCUMULO_HOME/test/src/test/resources/FooFilter.jar /user1/lib
-
-Execute following in Accumulo shell to setup classpath context
-
-    root@test15> config -s general.vfs.context.classpath.cx1=hdfs://<namenode host>:<namenode port>/user1/lib/[^.].*.jar
-
-Create a table
-
-    root@test15> createtable nofoo
-
-The following command makes this table use the configured classpath context
-
-    root@test15 nofoo> config -t nofoo -s table.classpath.context=cx1
-
-The following command configures an iterator thats in FooFilter.jar
-
-    root@test15 nofoo> setiter -n foofilter -p 10 -scan -minc -majc -class org.apache.accumulo.test.FooFilter
-    Filter accepts or rejects each Key/Value pair
-    ----------> set FooFilter parameter negate, default false keeps k/v that pass accept method, true rejects k/v that pass accept method: false
-
-The commands below show the filter is working.
-
-    root@test15 nofoo> insert foo1 f1 q1 v1
-    root@test15 nofoo> insert noo1 f1 q1 v2
-    root@test15 nofoo> scan
-    noo1 f1:q1 []    v2
-    root@test15 nofoo>
-
-Below, an attempt is made to add the FooFilter to a table thats not configured
-to use the clasppath context cx1. This fails util the table is configured to
-use cx1.
-
-    root@test15 nofoo> createtable nofootwo
-    root@test15 nofootwo> setiter -n foofilter -p 10 -scan -minc -majc -class org.apache.accumulo.test.FooFilter
-    2013-05-03 12:49:35,943 [shell.Shell] ERROR: java.lang.IllegalArgumentException: org.apache.accumulo.test.FooFilter
-    root@test15 nofootwo> config -t nofootwo -s table.classpath.context=cx1
-    root@test15 nofootwo> setiter -n foofilter -p 10 -scan -minc -majc -class org.apache.accumulo.test.FooFilter
-    Filter accepts or rejects each Key/Value pair
-    ----------> set FooFilter parameter negate, default false keeps k/v that pass accept method, true rejects k/v that pass accept method: false
-
-
diff --git a/1.9/examples/index.html b/1.9/examples/client.html
similarity index 56%
copy from 1.9/examples/index.html
copy to 1.9/examples/client.html
index 3af64b6..767022b 100644
--- a/1.9/examples/index.html
+++ b/1.9/examples/client.html
@@ -25,7 +25,7 @@
 <link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/v/bs/jq-2.2.3/dt-1.10.12/datatables.min.css">
 <link href="/css/accumulo.css" rel="stylesheet" type="text/css">
 
-<title>Apache Accumulo Examples</title>
+<title>Apache Accumulo Client Examples</title>
 
 <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.2.4/jquery.min.js" integrity="sha256-BbhdlvQf/xTY9gja0Dq3HiwQF8LaCRTXxZKRutelT44=" crossorigin="anonymous"></script>
 <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
@@ -136,89 +136,73 @@
         </div>
         <div id="content">
           
-          <h1 class="title">Apache Accumulo Examples</h1>
+          <h1 class="title">Apache Accumulo Client Examples</h1>
           
-          <p>Before running any of the examples, the following steps must be performed.</p>
-
-<ol>
-  <li>
-    <p>Install and run Accumulo via the instructions found in $ACCUMULO_HOME/README.
-Remember the instance name. It will be referred to as “instance” throughout
-the examples. A comma-separated list of zookeeper servers will be referred
-to as “zookeepers”.</p>
-  </li>
-  <li>
-    <p>Create an Accumulo user (see the <a href="../accumulo_user_manual#_user_administration">user manual</a>), or use the root user.
-The “username” Accumulo user name with password “password” is used
-throughout the examples. This user needs the ability to create tables.</p>
-  </li>
-</ol>
-
-<p>In all commands, you will need to replace “instance”, “zookeepers”,
-“username”, and “password” with the values you set for your Accumulo instance.</p>
-
-<p>Commands intended to be run in bash are prefixed by ‘$’. These are always
-assumed to be run from the $ACCUMULO_HOME directory.</p>
-
-<p>Commands intended to be run in the Accumulo shell are prefixed by ‘&gt;’.</p>
-
-<p>Each README in the examples directory highlights the use of particular
-features of Apache Accumulo.</p>
-
-<p><a href="batch">batch</a>:       Using the batch writer and batch scanner.</p>
-
-<p><a href="bloom">bloom</a>:       Creating a bloom filter enabled table to increase query
-                       performance.</p>
-
-<p><a href="bulkIngest">bulkIngest</a>:  Ingesting bulk data using map/reduce jobs on Hadoop.</p>
-
-<p><a href="classpath">classpath</a>:   Using per-table classpaths.</p>
-
-<p><a href="client">client</a>:      Using table operations, reading and writing data in Java.</p>
-
-<p><a href="combiner">combiner</a>:    Using example StatsCombiner to find min, max, sum, and
-                       count.</p>
-
-<p><a href="constraints">constraints</a>: Using constraints with tables.</p>
-
-<p><a href="dirlist">dirlist</a>:     Storing filesystem information.</p>
-
-<p><a href="export">export</a>:      Exporting and importing tables.</p>
-
-<p><a href="filedata">filedata</a>:    Storing file data.</p>
-
-<p><a href="filter">filter</a>:      Using the AgeOffFilter to remove records more than 30
-                       seconds old.</p>
-
-<p><a href="helloworld">helloworld</a>:  Inserting records both inside map/reduce jobs and
-                       outside. And reading records between two rows.</p>
-
-<p><a href="isolation">isolation</a>:   Using the isolated scanner to ensure partial changes
-                       are not seen.</p>
-
-<p><a href="mapred">mapred</a>:      Using MapReduce to read from and write to Accumulo
-                       tables.</p>
-
-<p><a href="maxmutation">maxmutation</a>: Limiting mutation size to avoid running out of memory.</p>
-
-<p><a href="regex">regex</a>:       Using MapReduce and Accumulo to find data using regular
-                       expressions.</p>
-
-<p><a href="rowhash">rowhash</a>:     Using MapReduce to read a table and write to a new
-                       column in the same table.</p>
-
-<p><a href="sample">sample</a>:      Building and using sample data in Accumulo.</p>
-
-<p><a href="shard">shard</a>:       Using the intersecting iterator with a term index
-                       partitioned by document.</p>
-
-<p><a href="tabletofile">tabletofile</a>: Using MapReduce to read a table and write one of its
-                       columns to a file in HDFS.</p>
-
-<p><a href="terasort">terasort</a>:    Generating random data and sorting it using Accumulo.</p>
-
-<p><a href="visibility">visibility</a> :  Using visibilities (or combinations of authorizations).
-                       Also shows user permissions.</p>
+          <p>This documents how you run the simplest java examples.</p>
+
+<p>This tutorial uses the following Java classes, which can be found in org.apache.accumulo.examples.simple.client in the examples-simple module:</p>
+
+<ul>
+  <li>Flush.java - flushes a table</li>
+  <li>RowOperations.java - reads and writes rows</li>
+  <li>ReadWriteExample.java - creates a table, writes to it, and reads from it</li>
+</ul>
+
+<p>Using the accumulo command, you can run the simple client examples by providing their
+class name, and enough arguments to find your accumulo instance. For example,
+the Flush class will flush a table:</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ PACKAGE=org.apache.accumulo.examples.simple.client
+$ bin/accumulo $PACKAGE.Flush -u root -p mypassword -i instance -z zookeeper -t trace
+</code></pre></div></div>
+
+<p>The very simple RowOperations class demonstrates how to read and write rows using the BatchWriter
+and Scanner:</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ bin/accumulo $PACKAGE.RowOperations -u root -p mypassword -i instance -z zookeeper
+2013-01-14 14:45:24,738 [client.RowOperations] INFO : This is everything
+2013-01-14 14:45:24,744 [client.RowOperations] INFO : Key: row1 column:1 [] 1358192724640 false Value: This is the value for this key
+2013-01-14 14:45:24,744 [client.RowOperations] INFO : Key: row1 column:2 [] 1358192724642 false Value: This is the value for this key
+2013-01-14 14:45:24,744 [client.RowOperations] INFO : Key: row1 column:3 [] 1358192724642 false Value: This is the value for this key
+2013-01-14 14:45:24,744 [client.RowOperations] INFO : Key: row1 column:4 [] 1358192724642 false Value: This is the value for this key
+2013-01-14 14:45:24,746 [client.RowOperations] INFO : Key: row2 column:1 [] 1358192724642 false Value: This is the value for this key
+2013-01-14 14:45:24,746 [client.RowOperations] INFO : Key: row2 column:2 [] 1358192724642 false Value: This is the value for this key
+2013-01-14 14:45:24,746 [client.RowOperations] INFO : Key: row2 column:3 [] 1358192724642 false Value: This is the value for this key
+2013-01-14 14:45:24,746 [client.RowOperations] INFO : Key: row2 column:4 [] 1358192724642 false Value: This is the value for this key
+2013-01-14 14:45:24,747 [client.RowOperations] INFO : Key: row3 column:1 [] 1358192724642 false Value: This is the value for this key
+2013-01-14 14:45:24,747 [client.RowOperations] INFO : Key: row3 column:2 [] 1358192724642 false Value: This is the value for this key
+2013-01-14 14:45:24,747 [client.RowOperations] INFO : Key: row3 column:3 [] 1358192724642 false Value: This is the value for this key
+2013-01-14 14:45:24,747 [client.RowOperations] INFO : Key: row3 column:4 [] 1358192724642 false Value: This is the value for this key
+2013-01-14 14:45:24,756 [client.RowOperations] INFO : This is row1 and row3
+2013-01-14 14:45:24,757 [client.RowOperations] INFO : Key: row1 column:1 [] 1358192724640 false Value: This is the value for this key
+2013-01-14 14:45:24,757 [client.RowOperations] INFO : Key: row1 column:2 [] 1358192724642 false Value: This is the value for this key
+2013-01-14 14:45:24,757 [client.RowOperations] INFO : Key: row1 column:3 [] 1358192724642 false Value: This is the value for this key
+2013-01-14 14:45:24,757 [client.RowOperations] INFO : Key: row1 column:4 [] 1358192724642 false Value: This is the value for this key
+2013-01-14 14:45:24,761 [client.RowOperations] INFO : Key: row3 column:1 [] 1358192724642 false Value: This is the value for this key
+2013-01-14 14:45:24,761 [client.RowOperations] INFO : Key: row3 column:2 [] 1358192724642 false Value: This is the value for this key
+2013-01-14 14:45:24,761 [client.RowOperations] INFO : Key: row3 column:3 [] 1358192724642 false Value: This is the value for this key
+2013-01-14 14:45:24,761 [client.RowOperations] INFO : Key: row3 column:4 [] 1358192724642 false Value: This is the value for this key
+2013-01-14 14:45:24,765 [client.RowOperations] INFO : This is just row3
+2013-01-14 14:45:24,769 [client.RowOperations] INFO : Key: row3 column:1 [] 1358192724642 false Value: This is the value for this key
+2013-01-14 14:45:24,770 [client.RowOperations] INFO : Key: row3 column:2 [] 1358192724642 false Value: This is the value for this key
+2013-01-14 14:45:24,770 [client.RowOperations] INFO : Key: row3 column:3 [] 1358192724642 false Value: This is the value for this key
+2013-01-14 14:45:24,770 [client.RowOperations] INFO : Key: row3 column:4 [] 1358192724642 false Value: This is the value for this key
+</code></pre></div></div>
+
+<p>To create a table, write to it and read from it:</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ bin/accumulo $PACKAGE.ReadWriteExample -u root -p mypassword -i instance -z zookeeper --createtable --create --read
+hello%00; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -&gt; world
+hello%01; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -&gt; world
+hello%02; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -&gt; world
+hello%03; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -&gt; world
+hello%04; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -&gt; world
+hello%05; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -&gt; world
+hello%06; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -&gt; world
+hello%07; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -&gt; world
+hello%08; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -&gt; world
+hello%09; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -&gt; world
+</code></pre></div></div>
 
 
         </div>
diff --git a/1.9/examples/client.md b/1.9/examples/client.md
deleted file mode 100644
index f6b8bcb..0000000
--- a/1.9/examples/client.md
+++ /dev/null
@@ -1,79 +0,0 @@
-Title: Apache Accumulo Client Examples
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This documents how you run the simplest java examples.
-
-This tutorial uses the following Java classes, which can be found in org.apache.accumulo.examples.simple.client in the examples-simple module:
-
- * Flush.java - flushes a table
- * RowOperations.java - reads and writes rows
- * ReadWriteExample.java - creates a table, writes to it, and reads from it
-
-Using the accumulo command, you can run the simple client examples by providing their
-class name, and enough arguments to find your accumulo instance. For example,
-the Flush class will flush a table:
-
-    $ PACKAGE=org.apache.accumulo.examples.simple.client
-    $ bin/accumulo $PACKAGE.Flush -u root -p mypassword -i instance -z zookeeper -t trace
-
-The very simple RowOperations class demonstrates how to read and write rows using the BatchWriter
-and Scanner:
-
-    $ bin/accumulo $PACKAGE.RowOperations -u root -p mypassword -i instance -z zookeeper
-    2013-01-14 14:45:24,738 [client.RowOperations] INFO : This is everything
-    2013-01-14 14:45:24,744 [client.RowOperations] INFO : Key: row1 column:1 [] 1358192724640 false Value: This is the value for this key
-    2013-01-14 14:45:24,744 [client.RowOperations] INFO : Key: row1 column:2 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,744 [client.RowOperations] INFO : Key: row1 column:3 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,744 [client.RowOperations] INFO : Key: row1 column:4 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,746 [client.RowOperations] INFO : Key: row2 column:1 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,746 [client.RowOperations] INFO : Key: row2 column:2 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,746 [client.RowOperations] INFO : Key: row2 column:3 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,746 [client.RowOperations] INFO : Key: row2 column:4 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,747 [client.RowOperations] INFO : Key: row3 column:1 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,747 [client.RowOperations] INFO : Key: row3 column:2 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,747 [client.RowOperations] INFO : Key: row3 column:3 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,747 [client.RowOperations] INFO : Key: row3 column:4 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,756 [client.RowOperations] INFO : This is row1 and row3
-    2013-01-14 14:45:24,757 [client.RowOperations] INFO : Key: row1 column:1 [] 1358192724640 false Value: This is the value for this key
-    2013-01-14 14:45:24,757 [client.RowOperations] INFO : Key: row1 column:2 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,757 [client.RowOperations] INFO : Key: row1 column:3 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,757 [client.RowOperations] INFO : Key: row1 column:4 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,761 [client.RowOperations] INFO : Key: row3 column:1 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,761 [client.RowOperations] INFO : Key: row3 column:2 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,761 [client.RowOperations] INFO : Key: row3 column:3 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,761 [client.RowOperations] INFO : Key: row3 column:4 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,765 [client.RowOperations] INFO : This is just row3
-    2013-01-14 14:45:24,769 [client.RowOperations] INFO : Key: row3 column:1 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,770 [client.RowOperations] INFO : Key: row3 column:2 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,770 [client.RowOperations] INFO : Key: row3 column:3 [] 1358192724642 false Value: This is the value for this key
-    2013-01-14 14:45:24,770 [client.RowOperations] INFO : Key: row3 column:4 [] 1358192724642 false Value: This is the value for this key
-
-To create a table, write to it and read from it:
-
-    $ bin/accumulo $PACKAGE.ReadWriteExample -u root -p mypassword -i instance -z zookeeper --createtable --create --read
-    hello%00; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-    hello%01; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-    hello%02; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-    hello%03; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-    hello%04; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-    hello%05; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-    hello%06; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-    hello%07; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-    hello%08; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-    hello%09; datatypes:xml [LEVEL1|GROUP1] 1358192329450 false -> world
-
diff --git a/1.8/examples/index.html b/1.9/examples/combiner.html
similarity index 66%
copy from 1.8/examples/index.html
copy to 1.9/examples/combiner.html
index 3af64b6..044dd96 100644
--- a/1.8/examples/index.html
+++ b/1.9/examples/combiner.html
@@ -25,7 +25,7 @@
 <link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/v/bs/jq-2.2.3/dt-1.10.12/datatables.min.css">
 <link href="/css/accumulo.css" rel="stylesheet" type="text/css">
 
-<title>Apache Accumulo Examples</title>
+<title>Apache Accumulo Combiner Example</title>
 
 <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.2.4/jquery.min.js" integrity="sha256-BbhdlvQf/xTY9gja0Dq3HiwQF8LaCRTXxZKRutelT44=" crossorigin="anonymous"></script>
 <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
@@ -136,90 +136,63 @@
         </div>
         <div id="content">
           
-          <h1 class="title">Apache Accumulo Examples</h1>
+          <h1 class="title">Apache Accumulo Combiner Example</h1>
           
-          <p>Before running any of the examples, the following steps must be performed.</p>
-
-<ol>
-  <li>
-    <p>Install and run Accumulo via the instructions found in $ACCUMULO_HOME/README.
-Remember the instance name. It will be referred to as “instance” throughout
-the examples. A comma-separated list of zookeeper servers will be referred
-to as “zookeepers”.</p>
-  </li>
-  <li>
-    <p>Create an Accumulo user (see the <a href="../accumulo_user_manual#_user_administration">user manual</a>), or use the root user.
-The “username” Accumulo user name with password “password” is used
-throughout the examples. This user needs the ability to create tables.</p>
-  </li>
-</ol>
-
-<p>In all commands, you will need to replace “instance”, “zookeepers”,
-“username”, and “password” with the values you set for your Accumulo instance.</p>
-
-<p>Commands intended to be run in bash are prefixed by ‘$’. These are always
-assumed to be run from the $ACCUMULO_HOME directory.</p>
-
-<p>Commands intended to be run in the Accumulo shell are prefixed by ‘&gt;’.</p>
-
-<p>Each README in the examples directory highlights the use of particular
-features of Apache Accumulo.</p>
-
-<p><a href="batch">batch</a>:       Using the batch writer and batch scanner.</p>
-
-<p><a href="bloom">bloom</a>:       Creating a bloom filter enabled table to increase query
-                       performance.</p>
-
-<p><a href="bulkIngest">bulkIngest</a>:  Ingesting bulk data using map/reduce jobs on Hadoop.</p>
-
-<p><a href="classpath">classpath</a>:   Using per-table classpaths.</p>
-
-<p><a href="client">client</a>:      Using table operations, reading and writing data in Java.</p>
-
-<p><a href="combiner">combiner</a>:    Using example StatsCombiner to find min, max, sum, and
-                       count.</p>
-
-<p><a href="constraints">constraints</a>: Using constraints with tables.</p>
-
-<p><a href="dirlist">dirlist</a>:     Storing filesystem information.</p>
-
-<p><a href="export">export</a>:      Exporting and importing tables.</p>
-
-<p><a href="filedata">filedata</a>:    Storing file data.</p>
-
-<p><a href="filter">filter</a>:      Using the AgeOffFilter to remove records more than 30
-                       seconds old.</p>
-
-<p><a href="helloworld">helloworld</a>:  Inserting records both inside map/reduce jobs and
-                       outside. And reading records between two rows.</p>
-
-<p><a href="isolation">isolation</a>:   Using the isolated scanner to ensure partial changes
-                       are not seen.</p>
-
-<p><a href="mapred">mapred</a>:      Using MapReduce to read from and write to Accumulo
-                       tables.</p>
-
-<p><a href="maxmutation">maxmutation</a>: Limiting mutation size to avoid running out of memory.</p>
-
-<p><a href="regex">regex</a>:       Using MapReduce and Accumulo to find data using regular
-                       expressions.</p>
-
-<p><a href="rowhash">rowhash</a>:     Using MapReduce to read a table and write to a new
-                       column in the same table.</p>
-
-<p><a href="sample">sample</a>:      Building and using sample data in Accumulo.</p>
-
-<p><a href="shard">shard</a>:       Using the intersecting iterator with a term index
-                       partitioned by document.</p>
-
-<p><a href="tabletofile">tabletofile</a>: Using MapReduce to read a table and write one of its
-                       columns to a file in HDFS.</p>
-
-<p><a href="terasort">terasort</a>:    Generating random data and sorting it using Accumulo.</p>
-
-<p><a href="visibility">visibility</a> :  Using visibilities (or combinations of authorizations).
-                       Also shows user permissions.</p>
-
+          <p>This tutorial uses the following Java class, which can be found in org.apache.accumulo.examples.simple.combiner in the examples-simple module:</p>
+
+<ul>
+  <li>StatsCombiner.java - a combiner that calculates max, min, sum, and count</li>
+</ul>
+
+<p>This is a simple combiner example. To build this example run maven and then
+copy the produced jar into the accumulo lib dir. This is already done in the
+tar distribution.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ bin/accumulo shell -u username
+Enter current password for 'username'@'instance': ***
+
+Shell - Apache Accumulo Interactive Shell
+<span class="gd">-
+- version: 1.5.0
+- instance name: instance
+- instance id: 00000000-0000-0000-0000-000000000000
+-
+- type 'help' for a list of available commands
+-
+</span>username@instance&gt; createtable runners
+username@instance runners&gt; setiter -t runners -p 10 -scan -minc -majc -n decStats -class org.apache.accumulo.examples.simple.combiner.StatsCombiner
+Combiner that keeps track of min, max, sum, and count
+<span class="gd">----------&gt; set StatsCombiner parameter all, set to true to apply Combiner to every column, otherwise leave blank. if true, columns option will be ignored.:
+----------&gt; set StatsCombiner parameter columns, &lt;col fam&gt;[:&lt;col qual&gt;]{,&lt;col fam&gt;[:&lt;col qual&gt;]} escape non aplhanum chars using %&lt;hex&gt;.: stat
+----------&gt; set StatsCombiner parameter radix, radix/base of the numbers: 10
+</span>username@instance runners&gt; setiter -t runners -p 11 -scan -minc -majc -n hexStats -class org.apache.accumulo.examples.simple.combiner.StatsCombiner
+Combiner that keeps track of min, max, sum, and count
+<span class="gd">----------&gt; set StatsCombiner parameter all, set to true to apply Combiner to every column, otherwise leave blank. if true, columns option will be ignored.:
+----------&gt; set StatsCombiner parameter columns, &lt;col fam&gt;[:&lt;col qual&gt;]{,&lt;col fam&gt;[:&lt;col qual&gt;]} escape non aplhanum chars using %&lt;hex&gt;.: hstat
+----------&gt; set StatsCombiner parameter radix, radix/base of the numbers: 16
+</span>username@instance runners&gt; insert 123456 name first Joe
+username@instance runners&gt; insert 123456 stat marathon 240
+username@instance runners&gt; scan
+123456 name:first []    Joe
+123456 stat:marathon []    240,240,240,1
+username@instance runners&gt; insert 123456 stat marathon 230
+username@instance runners&gt; insert 123456 stat marathon 220
+username@instance runners&gt; scan
+123456 name:first []    Joe
+123456 stat:marathon []    220,240,690,3
+username@instance runners&gt; insert 123456 hstat virtualMarathon 6a
+username@instance runners&gt; insert 123456 hstat virtualMarathon 6b
+username@instance runners&gt; scan
+123456 hstat:virtualMarathon []    6a,6b,d5,2
+123456 name:first []    Joe
+123456 stat:marathon []    220,240,690,3
+</code></pre></div></div>
+
+<p>In this example a table is created and the example stats combiner is applied to
+the column family stat and hstat. The stats combiner computes min,max,sum, and
+count. It can be configured to use a different base or radix. In the example
+above the column family stat is configured for base 10 and the column family
+hstat is configured for base 16.</p>
 
         </div>
 
diff --git a/1.9/examples/combiner.md b/1.9/examples/combiner.md
deleted file mode 100644
index f388e5b..0000000
--- a/1.9/examples/combiner.md
+++ /dev/null
@@ -1,70 +0,0 @@
-Title: Apache Accumulo Combiner Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This tutorial uses the following Java class, which can be found in org.apache.accumulo.examples.simple.combiner in the examples-simple module:
-
- * StatsCombiner.java - a combiner that calculates max, min, sum, and count
-
-This is a simple combiner example. To build this example run maven and then
-copy the produced jar into the accumulo lib dir. This is already done in the
-tar distribution.
-
-    $ bin/accumulo shell -u username
-    Enter current password for 'username'@'instance': ***
-
-    Shell - Apache Accumulo Interactive Shell
-    -
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> createtable runners
-    username@instance runners> setiter -t runners -p 10 -scan -minc -majc -n decStats -class org.apache.accumulo.examples.simple.combiner.StatsCombiner
-    Combiner that keeps track of min, max, sum, and count
-    ----------> set StatsCombiner parameter all, set to true to apply Combiner to every column, otherwise leave blank. if true, columns option will be ignored.:
-    ----------> set StatsCombiner parameter columns, <col fam>[:<col qual>]{,<col fam>[:<col qual>]} escape non aplhanum chars using %<hex>.: stat
-    ----------> set StatsCombiner parameter radix, radix/base of the numbers: 10
-    username@instance runners> setiter -t runners -p 11 -scan -minc -majc -n hexStats -class org.apache.accumulo.examples.simple.combiner.StatsCombiner
-    Combiner that keeps track of min, max, sum, and count
-    ----------> set StatsCombiner parameter all, set to true to apply Combiner to every column, otherwise leave blank. if true, columns option will be ignored.:
-    ----------> set StatsCombiner parameter columns, <col fam>[:<col qual>]{,<col fam>[:<col qual>]} escape non aplhanum chars using %<hex>.: hstat
-    ----------> set StatsCombiner parameter radix, radix/base of the numbers: 16
-    username@instance runners> insert 123456 name first Joe
-    username@instance runners> insert 123456 stat marathon 240
-    username@instance runners> scan
-    123456 name:first []    Joe
-    123456 stat:marathon []    240,240,240,1
-    username@instance runners> insert 123456 stat marathon 230
-    username@instance runners> insert 123456 stat marathon 220
-    username@instance runners> scan
-    123456 name:first []    Joe
-    123456 stat:marathon []    220,240,690,3
-    username@instance runners> insert 123456 hstat virtualMarathon 6a
-    username@instance runners> insert 123456 hstat virtualMarathon 6b
-    username@instance runners> scan
-    123456 hstat:virtualMarathon []    6a,6b,d5,2
-    123456 name:first []    Joe
-    123456 stat:marathon []    220,240,690,3
-
-In this example a table is created and the example stats combiner is applied to
-the column family stat and hstat. The stats combiner computes min,max,sum, and
-count. It can be configured to use a different base or radix. In the example
-above the column family stat is configured for base 10 and the column family
-hstat is configured for base 16.
diff --git a/1.8/examples/index.html b/1.9/examples/constraints.html
similarity index 66%
copy from 1.8/examples/index.html
copy to 1.9/examples/constraints.html
index 3af64b6..51cd067 100644
--- a/1.8/examples/index.html
+++ b/1.9/examples/constraints.html
@@ -25,7 +25,7 @@
 <link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/v/bs/jq-2.2.3/dt-1.10.12/datatables.min.css">
 <link href="/css/accumulo.css" rel="stylesheet" type="text/css">
 
-<title>Apache Accumulo Examples</title>
+<title>Apache Accumulo Constraints Example</title>
 
 <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.2.4/jquery.min.js" integrity="sha256-BbhdlvQf/xTY9gja0Dq3HiwQF8LaCRTXxZKRutelT44=" crossorigin="anonymous"></script>
 <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
@@ -136,89 +136,46 @@
         </div>
         <div id="content">
           
-          <h1 class="title">Apache Accumulo Examples</h1>
+          <h1 class="title">Apache Accumulo Constraints Example</h1>
           
-          <p>Before running any of the examples, the following steps must be performed.</p>
-
-<ol>
-  <li>
-    <p>Install and run Accumulo via the instructions found in $ACCUMULO_HOME/README.
-Remember the instance name. It will be referred to as “instance” throughout
-the examples. A comma-separated list of zookeeper servers will be referred
-to as “zookeepers”.</p>
-  </li>
-  <li>
-    <p>Create an Accumulo user (see the <a href="../accumulo_user_manual#_user_administration">user manual</a>), or use the root user.
-The “username” Accumulo user name with password “password” is used
-throughout the examples. This user needs the ability to create tables.</p>
-  </li>
-</ol>
-
-<p>In all commands, you will need to replace “instance”, “zookeepers”,
-“username”, and “password” with the values you set for your Accumulo instance.</p>
-
-<p>Commands intended to be run in bash are prefixed by ‘$’. These are always
-assumed to be run from the $ACCUMULO_HOME directory.</p>
-
-<p>Commands intended to be run in the Accumulo shell are prefixed by ‘&gt;’.</p>
-
-<p>Each README in the examples directory highlights the use of particular
-features of Apache Accumulo.</p>
-
-<p><a href="batch">batch</a>:       Using the batch writer and batch scanner.</p>
-
-<p><a href="bloom">bloom</a>:       Creating a bloom filter enabled table to increase query
-                       performance.</p>
-
-<p><a href="bulkIngest">bulkIngest</a>:  Ingesting bulk data using map/reduce jobs on Hadoop.</p>
-
-<p><a href="classpath">classpath</a>:   Using per-table classpaths.</p>
-
-<p><a href="client">client</a>:      Using table operations, reading and writing data in Java.</p>
-
-<p><a href="combiner">combiner</a>:    Using example StatsCombiner to find min, max, sum, and
-                       count.</p>
-
-<p><a href="constraints">constraints</a>: Using constraints with tables.</p>
-
-<p><a href="dirlist">dirlist</a>:     Storing filesystem information.</p>
-
-<p><a href="export">export</a>:      Exporting and importing tables.</p>
-
-<p><a href="filedata">filedata</a>:    Storing file data.</p>
-
-<p><a href="filter">filter</a>:      Using the AgeOffFilter to remove records more than 30
-                       seconds old.</p>
-
-<p><a href="helloworld">helloworld</a>:  Inserting records both inside map/reduce jobs and
-                       outside. And reading records between two rows.</p>
-
-<p><a href="isolation">isolation</a>:   Using the isolated scanner to ensure partial changes
-                       are not seen.</p>
-
-<p><a href="mapred">mapred</a>:      Using MapReduce to read from and write to Accumulo
-                       tables.</p>
-
-<p><a href="maxmutation">maxmutation</a>: Limiting mutation size to avoid running out of memory.</p>
-
-<p><a href="regex">regex</a>:       Using MapReduce and Accumulo to find data using regular
-                       expressions.</p>
-
-<p><a href="rowhash">rowhash</a>:     Using MapReduce to read a table and write to a new
-                       column in the same table.</p>
-
-<p><a href="sample">sample</a>:      Building and using sample data in Accumulo.</p>
-
-<p><a href="shard">shard</a>:       Using the intersecting iterator with a term index
-                       partitioned by document.</p>
-
-<p><a href="tabletofile">tabletofile</a>: Using MapReduce to read a table and write one of its
-                       columns to a file in HDFS.</p>
-
-<p><a href="terasort">terasort</a>:    Generating random data and sorting it using Accumulo.</p>
-
-<p><a href="visibility">visibility</a> :  Using visibilities (or combinations of authorizations).
-                       Also shows user permissions.</p>
+          <p>This tutorial uses the following Java classes, which can be found in org.apache.accumulo.examples.simple.constraints in the examples-simple module:</p>
+
+<ul>
+  <li>AlphaNumKeyConstraint.java - a constraint that requires alphanumeric keys</li>
+  <li>NumericValueConstraint.java - a constraint that requires numeric string values</li>
+</ul>
+
+<p>This an example of how to create a table with constraints. Below a table is
+created with two example constraints. One constraints does not allow non alpha
+numeric keys. The other constraint does not allow non numeric values. Two
+inserts that violate these constraints are attempted and denied. The scan at
+the end shows the inserts were not allowed.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo shell -u username -p password
+
+Shell - Apache Accumulo Interactive Shell
+-
+- version: 1.5.0
+- instance name: instance
+- instance id: 00000000-0000-0000-0000-000000000000
+-
+- type 'help' for a list of available commands
+-
+username@instance&gt; createtable testConstraints
+username@instance testConstraints&gt; constraint -a org.apache.accumulo.examples.simple.constraints.NumericValueConstraint
+username@instance testConstraints&gt; constraint -a org.apache.accumulo.examples.simple.constraints.AlphaNumKeyConstraint
+username@instance testConstraints&gt; insert r1 cf1 cq1 1111
+username@instance testConstraints&gt; insert r1 cf1 cq1 ABC
+  Constraint Failures:
+      ConstraintViolationSummary(constrainClass:org.apache.accumulo.examples.simple.constraints.NumericValueConstraint, violationCode:1, violationDescription:Value is not numeric, numberOfViolatingMutations:1)
+username@instance testConstraints&gt; insert r1! cf1 cq1 ABC
+  Constraint Failures:
+      ConstraintViolationSummary(constrainClass:org.apache.accumulo.examples.simple.constraints.NumericValueConstraint, violationCode:1, violationDescription:Value is not numeric, numberOfViolatingMutations:1)
+      ConstraintViolationSummary(constrainClass:org.apache.accumulo.examples.simple.constraints.AlphaNumKeyConstraint, violationCode:1, violationDescription:Row was not alpha numeric, numberOfViolatingMutations:1)
+username@instance testConstraints&gt; scan
+r1 cf1:cq1 []    1111
+username@instance testConstraints&gt;
+</code></pre></div></div>
 
 
         </div>
diff --git a/1.9/examples/constraints.md b/1.9/examples/constraints.md
deleted file mode 100644
index b15b409..0000000
--- a/1.9/examples/constraints.md
+++ /dev/null
@@ -1,54 +0,0 @@
-Title: Apache Accumulo Constraints Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This tutorial uses the following Java classes, which can be found in org.apache.accumulo.examples.simple.constraints in the examples-simple module:
-
- * AlphaNumKeyConstraint.java - a constraint that requires alphanumeric keys
- * NumericValueConstraint.java - a constraint that requires numeric string values
-
-This an example of how to create a table with constraints. Below a table is
-created with two example constraints. One constraints does not allow non alpha
-numeric keys. The other constraint does not allow non numeric values. Two
-inserts that violate these constraints are attempted and denied. The scan at
-the end shows the inserts were not allowed.
-
-    $ ./bin/accumulo shell -u username -p password
-
-    Shell - Apache Accumulo Interactive Shell
-    -
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> createtable testConstraints
-    username@instance testConstraints> constraint -a org.apache.accumulo.examples.simple.constraints.NumericValueConstraint
-    username@instance testConstraints> constraint -a org.apache.accumulo.examples.simple.constraints.AlphaNumKeyConstraint
-    username@instance testConstraints> insert r1 cf1 cq1 1111
-    username@instance testConstraints> insert r1 cf1 cq1 ABC
-      Constraint Failures:
-          ConstraintViolationSummary(constrainClass:org.apache.accumulo.examples.simple.constraints.NumericValueConstraint, violationCode:1, violationDescription:Value is not numeric, numberOfViolatingMutations:1)
-    username@instance testConstraints> insert r1! cf1 cq1 ABC
-      Constraint Failures:
-          ConstraintViolationSummary(constrainClass:org.apache.accumulo.examples.simple.constraints.NumericValueConstraint, violationCode:1, violationDescription:Value is not numeric, numberOfViolatingMutations:1)
-          ConstraintViolationSummary(constrainClass:org.apache.accumulo.examples.simple.constraints.AlphaNumKeyConstraint, violationCode:1, violationDescription:Row was not alpha numeric, numberOfViolatingMutations:1)
-    username@instance testConstraints> scan
-    r1 cf1:cq1 []    1111
-    username@instance testConstraints>
-
diff --git a/1.9/examples/dirlist.html b/1.9/examples/dirlist.html
new file mode 100644
index 0000000..1e910af
--- /dev/null
+++ b/1.9/examples/dirlist.html
@@ -0,0 +1,279 @@
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+-->
+<meta charset="utf-8">
+<meta http-equiv="X-UA-Compatible" content="IE=edge">
+<meta name="viewport" content="width=device-width, initial-scale=1">
+<link href="https://maxcdn.bootstrapcdn.com/bootswatch/3.3.7/paper/bootstrap.min.css" rel="stylesheet" integrity="sha384-awusxf8AUojygHf2+joICySzB780jVvQaVCAt1clU3QsyAitLGul28Qxb2r1e5g+" crossorigin="anonymous">
+<link href="//netdna.bootstrapcdn.com/font-awesome/4.0.3/css/font-awesome.css" rel="stylesheet">
+<link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/v/bs/jq-2.2.3/dt-1.10.12/datatables.min.css">
+<link href="/css/accumulo.css" rel="stylesheet" type="text/css">
+
+<title>Apache Accumulo File System Archive</title>
+
+<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.2.4/jquery.min.js" integrity="sha256-BbhdlvQf/xTY9gja0Dq3HiwQF8LaCRTXxZKRutelT44=" crossorigin="anonymous"></script>
+<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
+<script type="text/javascript" src="https://cdn.datatables.net/v/bs/jq-2.2.3/dt-1.10.12/datatables.min.js"></script>
+<script>
+  // show location of canonical site if not currently on the canonical site
+  $(function() {
+    var host = window.location.host;
+    if (typeof host !== 'undefined' && host !== 'accumulo.apache.org') {
+      $('#non-canonical').show();
+    }
+  });
+
+  $(function() {
+    // decorate section headers with anchors
+    return $("h2, h3, h4, h5, h6").each(function(i, el) {
+      var $el, icon, id;
+      $el = $(el);
+      id = $el.attr('id');
+      icon = '<i class="fa fa-link"></i>';
+      if (id) {
+        return $el.append($("<a />").addClass("header-link").attr("href", "#" + id).html(icon));
+      }
+    });
+  });
+
+  // fix sidebar width in documentation
+  $(function() {
+    var $affixElement = $('div[data-spy="affix"]');
+    $affixElement.width($affixElement.parent().width());
+  });
+</script>
+
+</head>
+<body style="padding-top: 100px">
+
+  <nav class="navbar navbar-default navbar-fixed-top">
+  <div class="container">
+    <div class="navbar-header">
+      <button type="button" class="navbar-toggle" data-toggle="collapse" data-target="#navbar-items">
+        <span class="sr-only">Toggle navigation</span>
+        <span class="icon-bar"></span>
+        <span class="icon-bar"></span>
+        <span class="icon-bar"></span>
+      </button>
+      <a href="/"><img id="nav-logo" alt="Apache Accumulo" class="img-responsive" src="/images/accumulo-logo.png" width="200"
+        /></a>
+    </div>
+    <div class="collapse navbar-collapse" id="navbar-items">
+      <ul class="nav navbar-nav">
+        <li class="nav-link"><a href="/downloads">Download</a></li>
+        <li class="nav-link"><a href="/tour">Tour</a></li>
+        <li class="dropdown">
+          <a class="dropdown-toggle" data-toggle="dropdown" href="#">Releases<span class="caret"></span></a>
+          <ul class="dropdown-menu">
+            <li><a href="/release/accumulo-2.0.0-alpha-2/">2.0.0-alpha-2 (Preview Release)</a></li>
+            <li><a href="/release/accumulo-1.9.3/">1.9.3 (Latest)</a></li>
+            <li><a href="/release/">Archive</a></li>
+          </ul>
+        </li>
+        <li class="dropdown">
+          <a class="dropdown-toggle" data-toggle="dropdown" href="#">Documentation<span class="caret"></span></a>
+          <ul class="dropdown-menu">
+            <li><a href="/docs/2.x/getting-started/quickstart">User Manual (2.x)</a></li>
+            <li><a href="/quickstart-1.x">Quickstart (1.x)</a></li>
+            <li><a href="/1.9/accumulo_user_manual.html">User Manual (1.9)</a></li>
+            <li><a href="/1.9/apidocs">Javadocs (1.9)</a></li>
+            <li><a href="/external-docs">External Docs</a></li>
+            <li><a href="/docs-archive/">Archive</a></li>
+          </ul>
+        </li>
+        <li class="dropdown">
+          <a class="dropdown-toggle" data-toggle="dropdown" href="#">Community<span class="caret"></span></a>
+          <ul class="dropdown-menu">
+            <li><a href="/contact-us">Contact Us</a></li>
+            <li><a href="/how-to-contribute">How To Contribute</a></li>
+            <li><a href="/people">People</a></li>
+            <li><a href="/related-projects">Related Projects</a></li>
+          </ul>
+        </li>
+        <li class="nav-link"><a href="/search">Search</a></li>
+      </ul>
+      <ul class="nav navbar-nav navbar-right">
+        <li class="dropdown">
+          <a class="dropdown-toggle" data-toggle="dropdown" href="#"><img alt="Apache Software Foundation" src="https://www.apache.org/foundation/press/kit/feather.svg" width="15"/><span class="caret"></span></a>
+          <ul class="dropdown-menu">
+            <li><a href="https://www.apache.org">Apache Homepage <i class="fa fa-external-link"></i></a></li>
+            <li><a href="https://www.apache.org/licenses/">License <i class="fa fa-external-link"></i></a></li>
+            <li><a href="https://www.apache.org/foundation/sponsorship">Sponsorship <i class="fa fa-external-link"></i></a></li>
+            <li><a href="https://www.apache.org/security">Security <i class="fa fa-external-link"></i></a></li>
+            <li><a href="https://www.apache.org/foundation/thanks">Thanks <i class="fa fa-external-link"></i></a></li>
+            <li><a href="https://www.apache.org/foundation/policies/conduct">Code of Conduct <i class="fa fa-external-link"></i></a></li>
+            <li><a href="https://www.apache.org/events/current-event.html">Current Event <i class="fa fa-external-link"></i></a></li>
+          </ul>
+        </li>
+      </ul>
+    </div>
+  </div>
+</nav>
+
+
+  <div class="container">
+    <div class="row">
+      <div class="col-md-12">
+
+        <div id="non-canonical" style="display: none; background-color: #F0E68C; padding-left: 1em;">
+          Visit the official site at: <a href="https://accumulo.apache.org">https://accumulo.apache.org</a>
+        </div>
+        <div id="content">
+          
+          <h1 class="title">Apache Accumulo File System Archive</h1>
+          
+          <p>This example stores filesystem information in accumulo. The example stores the information in the following three tables. More information about the table structures can be found at the end of README.dirlist.</p>
+
+<ul>
+  <li>directory table : This table stores information about the filesystem directory structure.</li>
+  <li>index table     : This table stores a file name index. It can be used to quickly find files with given name, suffix, or prefix.</li>
+  <li>data table      : This table stores the file data. File with duplicate data are only stored once.</li>
+</ul>
+
+<p>This example shows how to use Accumulo to store a file system history. It has the following classes:</p>
+
+<ul>
+  <li>Ingest.java - Recursively lists the files and directories under a given path, ingests their names and file info into one Accumulo table, indexes the file names in a separate table, and the file data into a third table.</li>
+  <li>QueryUtil.java - Provides utility methods for getting the info for a file, listing the contents of a directory, and performing single wild card searches on file or directory names.</li>
+  <li>Viewer.java - Provides a GUI for browsing the file system information stored in Accumulo.</li>
+  <li>FileCount.java - Computes recursive counts over file system information and stores them back into the same Accumulo table.</li>
+</ul>
+
+<p>To begin, ingest some data with Ingest.java.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.Ingest -i instance -z zookeepers -u username -p password --vis exampleVis --chunkSize 100000 /local/username/workspace
+</code></pre></div></div>
+
+<p>This may take some time if there are large files in the /local/username/workspace directory. If you use 0 instead of 100000 on the command line, the ingest will run much faster, but it will not put any file data into Accumulo (the dataTable will be empty).
+Note that running this example will create tables dirTable, indexTable, and dataTable in Accumulo that you should delete when you have completed the example.
+If you modify a file or add new files in the directory ingested (e.g. /local/username/workspace), you can run Ingest again to add new information into the Accumulo tables.</p>
+
+<p>To browse the data ingested, use Viewer.java. Be sure to give the “username” user the authorizations to see the data (in this case, run</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo shell -u root -e 'setauths -u username -s exampleVis'
+</code></pre></div></div>
+
+<p>then run the Viewer:</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.Viewer -i instance -z zookeepers -u username -p password -t dirTable --dataTable dataTable --auths exampleVis --path /local/username/workspace
+</code></pre></div></div>
+
+<p>To list the contents of specific directories, use QueryUtil.java.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t dirTable --auths exampleVis --path /local/username
+$ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t dirTable --auths exampleVis --path /local/username/workspace
+</code></pre></div></div>
+
+<p>To perform searches on file or directory names, also use QueryUtil.java. Search terms must contain no more than one wild card and cannot contain “/”.
+<em>Note</em> these queries run on the <em>indexTable</em> table instead of the dirTable table.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t indexTable --auths exampleVis --path filename --search
+$ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t indexTable --auths exampleVis --path 'filename*' --search
+$ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t indexTable --auths exampleVis --path '*jar' --search
+$ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t indexTable --auths exampleVis --path 'filename*jar' --search
+</code></pre></div></div>
+
+<p>To count the number of direct children (directories and files) and descendants (children and children’s descendants, directories and files), run the FileCount over the dirTable table.
+The results are written back to the same table. FileCount reads from and writes to Accumulo. This requires scan authorizations for the read and a visibility for the data written.
+In this example, the authorizations and visibility are set to the same value, exampleVis. See README.visibility for more information on visibility and authorizations.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.FileCount -i instance -z zookeepers -u username -p password -t dirTable --auths exampleVis
+</code></pre></div></div>
+
+<h2 id="directory-table">Directory Table</h2>
+
+<p>Here is a illustration of what data looks like in the directory table:</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>row colf:colq [vis]	value
+000 dir:exec [exampleVis]    true
+000 dir:hidden [exampleVis]    false
+000 dir:lastmod [exampleVis]    1291996886000
+000 dir:length [exampleVis]    1666
+001/local dir:exec [exampleVis]    true
+001/local dir:hidden [exampleVis]    false
+001/local dir:lastmod [exampleVis]    1304945270000
+001/local dir:length [exampleVis]    272
+002/local/Accumulo.README \x7F\xFF\xFE\xCFH\xA1\x82\x97:exec [exampleVis]    false
+002/local/Accumulo.README \x7F\xFF\xFE\xCFH\xA1\x82\x97:hidden [exampleVis]    false
+002/local/Accumulo.README \x7F\xFF\xFE\xCFH\xA1\x82\x97:lastmod [exampleVis]    1308746481000
+002/local/Accumulo.README \x7F\xFF\xFE\xCFH\xA1\x82\x97:length [exampleVis]    9192
+002/local/Accumulo.README \x7F\xFF\xFE\xCFH\xA1\x82\x97:md5 [exampleVis]    274af6419a3c4c4a259260ac7017cbf1
+</code></pre></div></div>
+
+<p>The rows are of the form depth + path, where depth is the number of slashes (“/”) in the path padded to 3 digits. This is so that all the children of a directory appear as consecutive keys in Accumulo; without the depth, you would for example see all the subdirectories of /local before you saw /usr.
+For directories the column family is “dir”. For files the column family is Long.MAX_VALUE - lastModified in bytes rather than string format so that newer versions sort earlier.</p>
+
+<h2 id="index-table">Index Table</h2>
+
+<p>Here is an illustration of what data looks like in the index table:</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>row colf:colq [vis]
+fAccumulo.README i:002/local/Accumulo.README [exampleVis]
+flocal i:001/local [exampleVis]
+rEMDAER.olumuccA i:002/local/Accumulo.README [exampleVis]
+rlacol i:001/local [exampleVis]
+</code></pre></div></div>
+
+<p>The values of the index table are null. The rows are of the form “f” + filename or “r” + reverse file name. This is to enable searches with wildcards at the beginning, middle, or end.</p>
+
+<h2 id="data-table">Data Table</h2>
+
+<p>Here is an illustration of what data looks like in the data table:</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>row colf:colq [vis]	value
+274af6419a3c4c4a259260ac7017cbf1 refs:e77276a2b56e5c15b540eaae32b12c69\x00filext [exampleVis]    README
+274af6419a3c4c4a259260ac7017cbf1 refs:e77276a2b56e5c15b540eaae32b12c69\x00name [exampleVis]    /local/Accumulo.README
+274af6419a3c4c4a259260ac7017cbf1 ~chunk:\x00\x0FB@\x00\x00\x00\x00 [exampleVis]    *******************************************************************************\x0A1. Building\x0A\x0AIn the normal tarball release of accumulo, [truncated]
+274af6419a3c4c4a259260ac7017cbf1 ~chunk:\x00\x0FB@\x00\x00\x00\x01 [exampleVis]
+</code></pre></div></div>
+
+<p>The rows are the md5 hash of the file. Some column family : column qualifier pairs are “refs” : hash of file name + null byte + property name, in which case the value is property value. There can be multiple references to the same file which are distinguished by the hash of the file name.
+Other column family : column qualifier pairs are “~chunk” : chunk size in bytes + chunk number in bytes, in which case the value is the bytes for that chunk of the file. There is an end of file data marker whose chunk number is the number of chunks for the file and whose value is empty.</p>
+
+<table>
+  <tbody>
+    <tr>
+      <td>There may exist multiple copies of the same file (with the same md5 hash) with different chunk sizes or different visibilities. There is an iterator that can be set on the data table that combines these copies into a single copy with a visibility taken from the visibilities of the file references, e.g. (vis from ref1)</td>
+      <td>(vis from ref2).</td>
+    </tr>
+  </tbody>
+</table>
+
+        </div>
+
+        
+<footer>
+
+  <p><a href="https://www.apache.org/foundation/contributing"><img src="https://www.apache.org/images/SupportApache-small.png" alt="Support the ASF" id="asf-logo" height="100" /></a></p>
+
+  <p>Copyright © 2011-2019 <a href="https://www.apache.org">The Apache Software Foundation</a>.
+Licensed under the <a href="https://www.apache.org/licenses/">Apache License, Version 2.0</a>.</p>
+
+  <p>Apache®, the names of Apache projects and their logos, and the multicolor feather
+logo are registered trademarks or trademarks of The Apache Software Foundation
+in the United States and/or other countries.</p>
+
+</footer>
+
+
+      </div>
+    </div>
+  </div>
+</body>
+</html>
diff --git a/1.9/examples/dirlist.md b/1.9/examples/dirlist.md
deleted file mode 100644
index 50623c6..0000000
--- a/1.9/examples/dirlist.md
+++ /dev/null
@@ -1,114 +0,0 @@
-Title: Apache Accumulo File System Archive
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This example stores filesystem information in accumulo. The example stores the information in the following three tables. More information about the table structures can be found at the end of README.dirlist.
-
- * directory table : This table stores information about the filesystem directory structure.
- * index table     : This table stores a file name index. It can be used to quickly find files with given name, suffix, or prefix.
- * data table      : This table stores the file data. File with duplicate data are only stored once.
-
-This example shows how to use Accumulo to store a file system history. It has the following classes:
-
- * Ingest.java - Recursively lists the files and directories under a given path, ingests their names and file info into one Accumulo table, indexes the file names in a separate table, and the file data into a third table.
- * QueryUtil.java - Provides utility methods for getting the info for a file, listing the contents of a directory, and performing single wild card searches on file or directory names.
- * Viewer.java - Provides a GUI for browsing the file system information stored in Accumulo.
- * FileCount.java - Computes recursive counts over file system information and stores them back into the same Accumulo table.
-
-To begin, ingest some data with Ingest.java.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.Ingest -i instance -z zookeepers -u username -p password --vis exampleVis --chunkSize 100000 /local/username/workspace
-
-This may take some time if there are large files in the /local/username/workspace directory. If you use 0 instead of 100000 on the command line, the ingest will run much faster, but it will not put any file data into Accumulo (the dataTable will be empty).
-Note that running this example will create tables dirTable, indexTable, and dataTable in Accumulo that you should delete when you have completed the example.
-If you modify a file or add new files in the directory ingested (e.g. /local/username/workspace), you can run Ingest again to add new information into the Accumulo tables.
-
-To browse the data ingested, use Viewer.java. Be sure to give the "username" user the authorizations to see the data (in this case, run
-
-    $ ./bin/accumulo shell -u root -e 'setauths -u username -s exampleVis'
-
-then run the Viewer:
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.Viewer -i instance -z zookeepers -u username -p password -t dirTable --dataTable dataTable --auths exampleVis --path /local/username/workspace
-
-To list the contents of specific directories, use QueryUtil.java.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t dirTable --auths exampleVis --path /local/username
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t dirTable --auths exampleVis --path /local/username/workspace
-
-To perform searches on file or directory names, also use QueryUtil.java. Search terms must contain no more than one wild card and cannot contain "/".
-*Note* these queries run on the _indexTable_ table instead of the dirTable table.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t indexTable --auths exampleVis --path filename --search
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t indexTable --auths exampleVis --path 'filename*' --search
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t indexTable --auths exampleVis --path '*jar' --search
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.QueryUtil -i instance -z zookeepers -u username -p password -t indexTable --auths exampleVis --path 'filename*jar' --search
-
-To count the number of direct children (directories and files) and descendants (children and children's descendants, directories and files), run the FileCount over the dirTable table.
-The results are written back to the same table. FileCount reads from and writes to Accumulo. This requires scan authorizations for the read and a visibility for the data written.
-In this example, the authorizations and visibility are set to the same value, exampleVis. See README.visibility for more information on visibility and authorizations.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.dirlist.FileCount -i instance -z zookeepers -u username -p password -t dirTable --auths exampleVis
-
-## Directory Table
-
-Here is a illustration of what data looks like in the directory table:
-
-    row colf:colq [vis]	value
-    000 dir:exec [exampleVis]    true
-    000 dir:hidden [exampleVis]    false
-    000 dir:lastmod [exampleVis]    1291996886000
-    000 dir:length [exampleVis]    1666
-    001/local dir:exec [exampleVis]    true
-    001/local dir:hidden [exampleVis]    false
-    001/local dir:lastmod [exampleVis]    1304945270000
-    001/local dir:length [exampleVis]    272
-    002/local/Accumulo.README \x7F\xFF\xFE\xCFH\xA1\x82\x97:exec [exampleVis]    false
-    002/local/Accumulo.README \x7F\xFF\xFE\xCFH\xA1\x82\x97:hidden [exampleVis]    false
-    002/local/Accumulo.README \x7F\xFF\xFE\xCFH\xA1\x82\x97:lastmod [exampleVis]    1308746481000
-    002/local/Accumulo.README \x7F\xFF\xFE\xCFH\xA1\x82\x97:length [exampleVis]    9192
-    002/local/Accumulo.README \x7F\xFF\xFE\xCFH\xA1\x82\x97:md5 [exampleVis]    274af6419a3c4c4a259260ac7017cbf1
-
-The rows are of the form depth + path, where depth is the number of slashes ("/") in the path padded to 3 digits. This is so that all the children of a directory appear as consecutive keys in Accumulo; without the depth, you would for example see all the subdirectories of /local before you saw /usr.
-For directories the column family is "dir". For files the column family is Long.MAX_VALUE - lastModified in bytes rather than string format so that newer versions sort earlier.
-
-## Index Table
-
-Here is an illustration of what data looks like in the index table:
-
-    row colf:colq [vis]
-    fAccumulo.README i:002/local/Accumulo.README [exampleVis]
-    flocal i:001/local [exampleVis]
-    rEMDAER.olumuccA i:002/local/Accumulo.README [exampleVis]
-    rlacol i:001/local [exampleVis]
-
-The values of the index table are null. The rows are of the form "f" + filename or "r" + reverse file name. This is to enable searches with wildcards at the beginning, middle, or end.
-
-## Data Table
-
-Here is an illustration of what data looks like in the data table:
-
-    row colf:colq [vis]	value
-    274af6419a3c4c4a259260ac7017cbf1 refs:e77276a2b56e5c15b540eaae32b12c69\x00filext [exampleVis]    README
-    274af6419a3c4c4a259260ac7017cbf1 refs:e77276a2b56e5c15b540eaae32b12c69\x00name [exampleVis]    /local/Accumulo.README
-    274af6419a3c4c4a259260ac7017cbf1 ~chunk:\x00\x0FB@\x00\x00\x00\x00 [exampleVis]    *******************************************************************************\x0A1. Building\x0A\x0AIn the normal tarball release of accumulo, [truncated]
-    274af6419a3c4c4a259260ac7017cbf1 ~chunk:\x00\x0FB@\x00\x00\x00\x01 [exampleVis]
-
-The rows are the md5 hash of the file. Some column family : column qualifier pairs are "refs" : hash of file name + null byte + property name, in which case the value is property value. There can be multiple references to the same file which are distinguished by the hash of the file name.
-Other column family : column qualifier pairs are "~chunk" : chunk size in bytes + chunk number in bytes, in which case the value is the bytes for that chunk of the file. There is an end of file data marker whose chunk number is the number of chunks for the file and whose value is empty.
-
-There may exist multiple copies of the same file (with the same md5 hash) with different chunk sizes or different visibilities. There is an iterator that can be set on the data table that combines these copies into a single copy with a visibility taken from the visibilities of the file references, e.g. (vis from ref1)|(vis from ref2).
diff --git a/1.8/examples/index.html b/1.9/examples/export.html
similarity index 66%
copy from 1.8/examples/index.html
copy to 1.9/examples/export.html
index 3af64b6..d687b92 100644
--- a/1.8/examples/index.html
+++ b/1.9/examples/export.html
@@ -25,7 +25,7 @@
 <link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/v/bs/jq-2.2.3/dt-1.10.12/datatables.min.css">
 <link href="/css/accumulo.css" rel="stylesheet" type="text/css">
 
-<title>Apache Accumulo Examples</title>
+<title>Apache Accumulo Export/Import Example</title>
 
 <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.2.4/jquery.min.js" integrity="sha256-BbhdlvQf/xTY9gja0Dq3HiwQF8LaCRTXxZKRutelT44=" crossorigin="anonymous"></script>
 <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
@@ -136,89 +136,83 @@
         </div>
         <div id="content">
           
-          <h1 class="title">Apache Accumulo Examples</h1>
+          <h1 class="title">Apache Accumulo Export/Import Example</h1>
           
-          <p>Before running any of the examples, the following steps must be performed.</p>
-
-<ol>
-  <li>
-    <p>Install and run Accumulo via the instructions found in $ACCUMULO_HOME/README.
-Remember the instance name. It will be referred to as “instance” throughout
-the examples. A comma-separated list of zookeeper servers will be referred
-to as “zookeepers”.</p>
-  </li>
-  <li>
-    <p>Create an Accumulo user (see the <a href="../accumulo_user_manual#_user_administration">user manual</a>), or use the root user.
-The “username” Accumulo user name with password “password” is used
-throughout the examples. This user needs the ability to create tables.</p>
-  </li>
-</ol>
-
-<p>In all commands, you will need to replace “instance”, “zookeepers”,
-“username”, and “password” with the values you set for your Accumulo instance.</p>
-
-<p>Commands intended to be run in bash are prefixed by ‘$’. These are always
-assumed to be run from the $ACCUMULO_HOME directory.</p>
-
-<p>Commands intended to be run in the Accumulo shell are prefixed by ‘&gt;’.</p>
-
-<p>Each README in the examples directory highlights the use of particular
-features of Apache Accumulo.</p>
-
-<p><a href="batch">batch</a>:       Using the batch writer and batch scanner.</p>
-
-<p><a href="bloom">bloom</a>:       Creating a bloom filter enabled table to increase query
-                       performance.</p>
-
-<p><a href="bulkIngest">bulkIngest</a>:  Ingesting bulk data using map/reduce jobs on Hadoop.</p>
-
-<p><a href="classpath">classpath</a>:   Using per-table classpaths.</p>
-
-<p><a href="client">client</a>:      Using table operations, reading and writing data in Java.</p>
-
-<p><a href="combiner">combiner</a>:    Using example StatsCombiner to find min, max, sum, and
-                       count.</p>
-
-<p><a href="constraints">constraints</a>: Using constraints with tables.</p>
-
-<p><a href="dirlist">dirlist</a>:     Storing filesystem information.</p>
-
-<p><a href="export">export</a>:      Exporting and importing tables.</p>
-
-<p><a href="filedata">filedata</a>:    Storing file data.</p>
-
-<p><a href="filter">filter</a>:      Using the AgeOffFilter to remove records more than 30
-                       seconds old.</p>
-
-<p><a href="helloworld">helloworld</a>:  Inserting records both inside map/reduce jobs and
-                       outside. And reading records between two rows.</p>
-
-<p><a href="isolation">isolation</a>:   Using the isolated scanner to ensure partial changes
-                       are not seen.</p>
-
-<p><a href="mapred">mapred</a>:      Using MapReduce to read from and write to Accumulo
-                       tables.</p>
-
-<p><a href="maxmutation">maxmutation</a>: Limiting mutation size to avoid running out of memory.</p>
-
-<p><a href="regex">regex</a>:       Using MapReduce and Accumulo to find data using regular
-                       expressions.</p>
-
-<p><a href="rowhash">rowhash</a>:     Using MapReduce to read a table and write to a new
-                       column in the same table.</p>
-
-<p><a href="sample">sample</a>:      Building and using sample data in Accumulo.</p>
-
-<p><a href="shard">shard</a>:       Using the intersecting iterator with a term index
-                       partitioned by document.</p>
-
-<p><a href="tabletofile">tabletofile</a>: Using MapReduce to read a table and write one of its
-                       columns to a file in HDFS.</p>
-
-<p><a href="terasort">terasort</a>:    Generating random data and sorting it using Accumulo.</p>
-
-<p><a href="visibility">visibility</a> :  Using visibilities (or combinations of authorizations).
-                       Also shows user permissions.</p>
+          <p>Accumulo provides a mechanism to export and import tables. This README shows
+how to use this feature.</p>
+
+<p>The shell session below shows creating a table, inserting data, and exporting
+the table. A table must be offline to export it, and it should remain offline
+for the duration of the distcp. An easy way to take a table offline without
+interuppting access to it is to clone it and take the clone offline.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>root@test15&gt; createtable table1
+root@test15 table1&gt; insert a cf1 cq1 v1
+root@test15 table1&gt; insert h cf1 cq1 v2
+root@test15 table1&gt; insert z cf1 cq1 v3
+root@test15 table1&gt; insert z cf1 cq2 v4
+root@test15 table1&gt; addsplits -t table1 b r
+root@test15 table1&gt; scan
+a cf1:cq1 []    v1
+h cf1:cq1 []    v2
+z cf1:cq1 []    v3
+z cf1:cq2 []    v4
+root@test15&gt; config -t table1 -s table.split.threshold=100M
+root@test15 table1&gt; clonetable table1 table1_exp
+root@test15 table1&gt; offline table1_exp
+root@test15 table1&gt; exporttable -t table1_exp /tmp/table1_export
+root@test15 table1&gt; quit
+</code></pre></div></div>
+
+<p>After executing the export command, a few files are created in the hdfs dir.
+One of the files is a list of files to distcp as shown below.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ hadoop fs -ls /tmp/table1_export
+Found 2 items
+-rw-r--r--   3 user supergroup        162 2012-07-25 09:56 /tmp/table1_export/distcp.txt
+-rw-r--r--   3 user supergroup        821 2012-07-25 09:56 /tmp/table1_export/exportMetadata.zip
+$ hadoop fs -cat /tmp/table1_export/distcp.txt
+hdfs://n1.example.com:6093/accumulo/tables/3/default_tablet/F0000000.rf
+hdfs://n1.example.com:6093/tmp/table1_export/exportMetadata.zip
+</code></pre></div></div>
+
+<p>Before the table can be imported, it must be copied using distcp. After the
+discp completed, the cloned table may be deleted.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ hadoop distcp -f /tmp/table1_export/distcp.txt /tmp/table1_export_dest
+</code></pre></div></div>
+
+<p>The Accumulo shell session below shows importing the table and inspecting it.
+The data, splits, config, and logical time information for the table were
+preserved.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>root@test15&gt; importtable table1_copy /tmp/table1_export_dest
+root@test15&gt; table table1_copy
+root@test15 table1_copy&gt; scan
+a cf1:cq1 []    v1
+h cf1:cq1 []    v2
+z cf1:cq1 []    v3
+z cf1:cq2 []    v4
+root@test15 table1_copy&gt; getsplits -t table1_copy
+b
+r
+root@test15&gt; config -t table1_copy -f split
+---------+--------------------------+-------------------------------------------
+SCOPE    | NAME                     | VALUE
+---------+--------------------------+-------------------------------------------
+default  | table.split.threshold .. | 1G
+table    |    @override ........... | 100M
+---------+--------------------------+-------------------------------------------
+root@test15&gt; tables -l
+accumulo.metadata    =&gt;        !0
+accumulo.root        =&gt;        +r
+table1_copy          =&gt;         5
+trace                =&gt;         1
+root@test15 table1_copy&gt; scan -t accumulo.metadata -b 5 -c srv:time
+5;b srv:time []    M1343224500467
+5;r srv:time []    M1343224500467
+5&lt; srv:time []    M1343224500467
+</code></pre></div></div>
 
 
         </div>
diff --git a/1.9/examples/export.md b/1.9/examples/export.md
deleted file mode 100644
index b6ea8f8..0000000
--- a/1.9/examples/export.md
+++ /dev/null
@@ -1,91 +0,0 @@
-Title: Apache Accumulo Export/Import Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-Accumulo provides a mechanism to export and import tables. This README shows
-how to use this feature.
-
-The shell session below shows creating a table, inserting data, and exporting
-the table. A table must be offline to export it, and it should remain offline
-for the duration of the distcp. An easy way to take a table offline without
-interuppting access to it is to clone it and take the clone offline.
-
-    root@test15> createtable table1
-    root@test15 table1> insert a cf1 cq1 v1
-    root@test15 table1> insert h cf1 cq1 v2
-    root@test15 table1> insert z cf1 cq1 v3
-    root@test15 table1> insert z cf1 cq2 v4
-    root@test15 table1> addsplits -t table1 b r
-    root@test15 table1> scan
-    a cf1:cq1 []    v1
-    h cf1:cq1 []    v2
-    z cf1:cq1 []    v3
-    z cf1:cq2 []    v4
-    root@test15> config -t table1 -s table.split.threshold=100M
-    root@test15 table1> clonetable table1 table1_exp
-    root@test15 table1> offline table1_exp
-    root@test15 table1> exporttable -t table1_exp /tmp/table1_export
-    root@test15 table1> quit
-
-After executing the export command, a few files are created in the hdfs dir.
-One of the files is a list of files to distcp as shown below.
-
-    $ hadoop fs -ls /tmp/table1_export
-    Found 2 items
-    -rw-r--r--   3 user supergroup        162 2012-07-25 09:56 /tmp/table1_export/distcp.txt
-    -rw-r--r--   3 user supergroup        821 2012-07-25 09:56 /tmp/table1_export/exportMetadata.zip
-    $ hadoop fs -cat /tmp/table1_export/distcp.txt
-    hdfs://n1.example.com:6093/accumulo/tables/3/default_tablet/F0000000.rf
-    hdfs://n1.example.com:6093/tmp/table1_export/exportMetadata.zip
-
-Before the table can be imported, it must be copied using distcp. After the
-discp completed, the cloned table may be deleted.
-
-    $ hadoop distcp -f /tmp/table1_export/distcp.txt /tmp/table1_export_dest
-
-The Accumulo shell session below shows importing the table and inspecting it.
-The data, splits, config, and logical time information for the table were
-preserved.
-
-    root@test15> importtable table1_copy /tmp/table1_export_dest
-    root@test15> table table1_copy
-    root@test15 table1_copy> scan
-    a cf1:cq1 []    v1
-    h cf1:cq1 []    v2
-    z cf1:cq1 []    v3
-    z cf1:cq2 []    v4
-    root@test15 table1_copy> getsplits -t table1_copy
-    b
-    r
-    root@test15> config -t table1_copy -f split
-    ---------+--------------------------+-------------------------------------------
-    SCOPE    | NAME                     | VALUE
-    ---------+--------------------------+-------------------------------------------
-    default  | table.split.threshold .. | 1G
-    table    |    @override ........... | 100M
-    ---------+--------------------------+-------------------------------------------
-    root@test15> tables -l
-    accumulo.metadata    =>        !0
-    accumulo.root        =>        +r
-    table1_copy          =>         5
-    trace                =>         1
-    root@test15 table1_copy> scan -t accumulo.metadata -b 5 -c srv:time
-    5;b srv:time []    M1343224500467
-    5;r srv:time []    M1343224500467
-    5< srv:time []    M1343224500467
-
-
diff --git a/1.8/examples/index.html b/1.9/examples/filedata.html
similarity index 67%
copy from 1.8/examples/index.html
copy to 1.9/examples/filedata.html
index 3af64b6..732c446 100644
--- a/1.8/examples/index.html
+++ b/1.9/examples/filedata.html
@@ -25,7 +25,7 @@
 <link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/v/bs/jq-2.2.3/dt-1.10.12/datatables.min.css">
 <link href="/css/accumulo.css" rel="stylesheet" type="text/css">
 
-<title>Apache Accumulo Examples</title>
+<title>Apache Accumulo File System Archive Example (Data Only)</title>
 
 <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.2.4/jquery.min.js" integrity="sha256-BbhdlvQf/xTY9gja0Dq3HiwQF8LaCRTXxZKRutelT44=" crossorigin="anonymous"></script>
 <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
@@ -136,90 +136,53 @@
         </div>
         <div id="content">
           
-          <h1 class="title">Apache Accumulo Examples</h1>
+          <h1 class="title">Apache Accumulo File System Archive Example (Data Only)</h1>
           
-          <p>Before running any of the examples, the following steps must be performed.</p>
-
-<ol>
-  <li>
-    <p>Install and run Accumulo via the instructions found in $ACCUMULO_HOME/README.
-Remember the instance name. It will be referred to as “instance” throughout
-the examples. A comma-separated list of zookeeper servers will be referred
-to as “zookeepers”.</p>
-  </li>
+          <p>This example archives file data into an Accumulo table. Files with duplicate data are only stored once.
+The example has the following classes:</p>
+
+<ul>
+  <li>CharacterHistogram - A MapReduce that computes a histogram of byte frequency for each file and stores the histogram alongside the file data. An example use of the ChunkInputFormat.</li>
+  <li>ChunkCombiner - An Iterator that dedupes file data and sets their visibilities to a combined visibility based on current references to the file data.</li>
+  <li>ChunkInputFormat - An Accumulo InputFormat that provides keys containing file info (List&lt;Entry&lt;Key,Value») and values with an InputStream over the file (ChunkInputStream).</li>
+  <li>ChunkInputStream - An input stream over file data stored in Accumulo.</li>
+  <li>FileDataIngest - Takes a list of files and archives them into Accumulo keyed on hashes of the files.</li>
+  <li>FileDataQuery - Retrieves file data based on the hash of the file. (Used by the dirlist.Viewer.)</li>
+  <li>KeyUtil - A utility for creating and parsing null-byte separated strings into/from Text objects.</li>
   <li>
-    <p>Create an Accumulo user (see the <a href="../accumulo_user_manual#_user_administration">user manual</a>), or use the root user.
-The “username” Accumulo user name with password “password” is used
-throughout the examples. This user needs the ability to create tables.</p>
+    <table>
+      <tbody>
+        <tr>
+          <td>VisibilityCombiner - A utility for merging visibilities into the form (VIS1)</td>
+          <td>(VIS2)</td>
+          <td>…</td>
+        </tr>
+      </tbody>
+    </table>
   </li>
-</ol>
-
-<p>In all commands, you will need to replace “instance”, “zookeepers”,
-“username”, and “password” with the values you set for your Accumulo instance.</p>
-
-<p>Commands intended to be run in bash are prefixed by ‘$’. These are always
-assumed to be run from the $ACCUMULO_HOME directory.</p>
-
-<p>Commands intended to be run in the Accumulo shell are prefixed by ‘&gt;’.</p>
-
-<p>Each README in the examples directory highlights the use of particular
-features of Apache Accumulo.</p>
-
-<p><a href="batch">batch</a>:       Using the batch writer and batch scanner.</p>
-
-<p><a href="bloom">bloom</a>:       Creating a bloom filter enabled table to increase query
-                       performance.</p>
-
-<p><a href="bulkIngest">bulkIngest</a>:  Ingesting bulk data using map/reduce jobs on Hadoop.</p>
-
-<p><a href="classpath">classpath</a>:   Using per-table classpaths.</p>
-
-<p><a href="client">client</a>:      Using table operations, reading and writing data in Java.</p>
-
-<p><a href="combiner">combiner</a>:    Using example StatsCombiner to find min, max, sum, and
-                       count.</p>
-
-<p><a href="constraints">constraints</a>: Using constraints with tables.</p>
-
-<p><a href="dirlist">dirlist</a>:     Storing filesystem information.</p>
-
-<p><a href="export">export</a>:      Exporting and importing tables.</p>
-
-<p><a href="filedata">filedata</a>:    Storing file data.</p>
-
-<p><a href="filter">filter</a>:      Using the AgeOffFilter to remove records more than 30
-                       seconds old.</p>
-
-<p><a href="helloworld">helloworld</a>:  Inserting records both inside map/reduce jobs and
-                       outside. And reading records between two rows.</p>
-
-<p><a href="isolation">isolation</a>:   Using the isolated scanner to ensure partial changes
-                       are not seen.</p>
-
-<p><a href="mapred">mapred</a>:      Using MapReduce to read from and write to Accumulo
-                       tables.</p>
+</ul>
 
-<p><a href="maxmutation">maxmutation</a>: Limiting mutation size to avoid running out of memory.</p>
+<p>This example is coupled with the dirlist example. See README.dirlist for instructions.</p>
 
-<p><a href="regex">regex</a>:       Using MapReduce and Accumulo to find data using regular
-                       expressions.</p>
+<p>If you haven’t already run the README.dirlist example, ingest a file with FileDataIngest.</p>
 
-<p><a href="rowhash">rowhash</a>:     Using MapReduce to read a table and write to a new
-                       column in the same table.</p>
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo org.apache.accumulo.examples.simple.filedata.FileDataIngest -i instance -z zookeepers -u username -p password -t dataTable --auths exampleVis --chunk 1000 $ACCUMULO_HOME/README
+</code></pre></div></div>
 
-<p><a href="sample">sample</a>:      Building and using sample data in Accumulo.</p>
+<p>Open the accumulo shell and look at the data. The row is the MD5 hash of the file, which you can verify by running a command such as ‘md5sum’ on the file.</p>
 
-<p><a href="shard">shard</a>:       Using the intersecting iterator with a term index
-                       partitioned by document.</p>
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>&gt; scan -t dataTable
+</code></pre></div></div>
 
-<p><a href="tabletofile">tabletofile</a>: Using MapReduce to read a table and write one of its
-                       columns to a file in HDFS.</p>
+<p>Run the CharacterHistogram MapReduce to add some information about the file.</p>
 
-<p><a href="terasort">terasort</a>:    Generating random data and sorting it using Accumulo.</p>
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ bin/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.filedata.CharacterHistogram -i instance -z zookeepers -u username -p password -t dataTable --auths exampleVis --vis exampleVis
+</code></pre></div></div>
 
-<p><a href="visibility">visibility</a> :  Using visibilities (or combinations of authorizations).
-                       Also shows user permissions.</p>
+<p>Scan again to see the histogram stored in the ‘info’ column family.</p>
 
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>&gt; scan -t dataTable
+</code></pre></div></div>
 
         </div>
 
diff --git a/1.9/examples/filedata.md b/1.9/examples/filedata.md
deleted file mode 100644
index 26a6c1e..0000000
--- a/1.9/examples/filedata.md
+++ /dev/null
@@ -1,47 +0,0 @@
-Title: Apache Accumulo File System Archive Example (Data Only)
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This example archives file data into an Accumulo table. Files with duplicate data are only stored once.
-The example has the following classes:
-
- * CharacterHistogram - A MapReduce that computes a histogram of byte frequency for each file and stores the histogram alongside the file data. An example use of the ChunkInputFormat.
- * ChunkCombiner - An Iterator that dedupes file data and sets their visibilities to a combined visibility based on current references to the file data.
- * ChunkInputFormat - An Accumulo InputFormat that provides keys containing file info (List<Entry<Key,Value>>) and values with an InputStream over the file (ChunkInputStream).
- * ChunkInputStream - An input stream over file data stored in Accumulo.
- * FileDataIngest - Takes a list of files and archives them into Accumulo keyed on hashes of the files.
- * FileDataQuery - Retrieves file data based on the hash of the file. (Used by the dirlist.Viewer.)
- * KeyUtil - A utility for creating and parsing null-byte separated strings into/from Text objects.
- * VisibilityCombiner - A utility for merging visibilities into the form (VIS1)|(VIS2)|...
-
-This example is coupled with the dirlist example. See README.dirlist for instructions.
-
-If you haven't already run the README.dirlist example, ingest a file with FileDataIngest.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.filedata.FileDataIngest -i instance -z zookeepers -u username -p password -t dataTable --auths exampleVis --chunk 1000 $ACCUMULO_HOME/README
-
-Open the accumulo shell and look at the data. The row is the MD5 hash of the file, which you can verify by running a command such as 'md5sum' on the file.
-
-    > scan -t dataTable
-
-Run the CharacterHistogram MapReduce to add some information about the file.
-
-    $ bin/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.filedata.CharacterHistogram -i instance -z zookeepers -u username -p password -t dataTable --auths exampleVis --vis exampleVis
-
-Scan again to see the histogram stored in the 'info' column family.
-
-    > scan -t dataTable
diff --git a/1.9/examples/index.html b/1.9/examples/filter.html
similarity index 53%
copy from 1.9/examples/index.html
copy to 1.9/examples/filter.html
index 3af64b6..4eef7fd 100644
--- a/1.9/examples/index.html
+++ b/1.9/examples/filter.html
@@ -25,7 +25,7 @@
 <link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/v/bs/jq-2.2.3/dt-1.10.12/datatables.min.css">
 <link href="/css/accumulo.css" rel="stylesheet" type="text/css">
 
-<title>Apache Accumulo Examples</title>
+<title>Apache Accumulo Filter Example</title>
 
 <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.2.4/jquery.min.js" integrity="sha256-BbhdlvQf/xTY9gja0Dq3HiwQF8LaCRTXxZKRutelT44=" crossorigin="anonymous"></script>
 <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
@@ -136,90 +136,104 @@
         </div>
         <div id="content">
           
-          <h1 class="title">Apache Accumulo Examples</h1>
+          <h1 class="title">Apache Accumulo Filter Example</h1>
           
-          <p>Before running any of the examples, the following steps must be performed.</p>
-
-<ol>
-  <li>
-    <p>Install and run Accumulo via the instructions found in $ACCUMULO_HOME/README.
-Remember the instance name. It will be referred to as “instance” throughout
-the examples. A comma-separated list of zookeeper servers will be referred
-to as “zookeepers”.</p>
-  </li>
-  <li>
-    <p>Create an Accumulo user (see the <a href="../accumulo_user_manual#_user_administration">user manual</a>), or use the root user.
-The “username” Accumulo user name with password “password” is used
-throughout the examples. This user needs the ability to create tables.</p>
-  </li>
-</ol>
-
-<p>In all commands, you will need to replace “instance”, “zookeepers”,
-“username”, and “password” with the values you set for your Accumulo instance.</p>
-
-<p>Commands intended to be run in bash are prefixed by ‘$’. These are always
-assumed to be run from the $ACCUMULO_HOME directory.</p>
-
-<p>Commands intended to be run in the Accumulo shell are prefixed by ‘&gt;’.</p>
-
-<p>Each README in the examples directory highlights the use of particular
-features of Apache Accumulo.</p>
-
-<p><a href="batch">batch</a>:       Using the batch writer and batch scanner.</p>
-
-<p><a href="bloom">bloom</a>:       Creating a bloom filter enabled table to increase query
-                       performance.</p>
-
-<p><a href="bulkIngest">bulkIngest</a>:  Ingesting bulk data using map/reduce jobs on Hadoop.</p>
-
-<p><a href="classpath">classpath</a>:   Using per-table classpaths.</p>
-
-<p><a href="client">client</a>:      Using table operations, reading and writing data in Java.</p>
-
-<p><a href="combiner">combiner</a>:    Using example StatsCombiner to find min, max, sum, and
-                       count.</p>
-
-<p><a href="constraints">constraints</a>: Using constraints with tables.</p>
-
-<p><a href="dirlist">dirlist</a>:     Storing filesystem information.</p>
-
-<p><a href="export">export</a>:      Exporting and importing tables.</p>
-
-<p><a href="filedata">filedata</a>:    Storing file data.</p>
-
-<p><a href="filter">filter</a>:      Using the AgeOffFilter to remove records more than 30
-                       seconds old.</p>
-
-<p><a href="helloworld">helloworld</a>:  Inserting records both inside map/reduce jobs and
-                       outside. And reading records between two rows.</p>
-
-<p><a href="isolation">isolation</a>:   Using the isolated scanner to ensure partial changes
-                       are not seen.</p>
-
-<p><a href="mapred">mapred</a>:      Using MapReduce to read from and write to Accumulo
-                       tables.</p>
-
-<p><a href="maxmutation">maxmutation</a>: Limiting mutation size to avoid running out of memory.</p>
-
-<p><a href="regex">regex</a>:       Using MapReduce and Accumulo to find data using regular
-                       expressions.</p>
-
-<p><a href="rowhash">rowhash</a>:     Using MapReduce to read a table and write to a new
-                       column in the same table.</p>
-
-<p><a href="sample">sample</a>:      Building and using sample data in Accumulo.</p>
-
-<p><a href="shard">shard</a>:       Using the intersecting iterator with a term index
-                       partitioned by document.</p>
-
-<p><a href="tabletofile">tabletofile</a>: Using MapReduce to read a table and write one of its
-                       columns to a file in HDFS.</p>
-
-<p><a href="terasort">terasort</a>:    Generating random data and sorting it using Accumulo.</p>
-
-<p><a href="visibility">visibility</a> :  Using visibilities (or combinations of authorizations).
-                       Also shows user permissions.</p>
-
+          <p>This is a simple filter example. It uses the AgeOffFilter that is provided as
+part of the core package org.apache.accumulo.core.iterators.user. Filters are
+iterators that select desired key/value pairs (or weed out undesired ones).
+Filters extend the org.apache.accumulo.core.iterators.Filter class
+and must implement a method accept(Key k, Value v). This method returns true
+if the key/value pair are to be delivered and false if they are to be ignored.
+Filter takes a “negate” parameter which defaults to false. If set to true, the
+return value of the accept method is negated, so that key/value pairs accepted
+by the method are omitted by the Filter.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>username@instance&gt; createtable filtertest
+username@instance filtertest&gt; setiter -t filtertest -scan -p 10 -n myfilter -ageoff
+AgeOffFilter removes entries with timestamps more than &lt;ttl&gt; milliseconds old
+<span class="gd">----------&gt; set AgeOffFilter parameter negate, default false keeps k/v that pass accept method, true rejects k/v that pass accept method:
+----------&gt; set AgeOffFilter parameter ttl, time to live (milliseconds): 30000
+----------&gt; set AgeOffFilter parameter currentTime, if set, use the given value as the absolute time in milliseconds as the current time of day:
+</span>username@instance filtertest&gt; scan
+username@instance filtertest&gt; insert foo a b c
+username@instance filtertest&gt; scan
+foo a:b []    c
+username@instance filtertest&gt;
+</code></pre></div></div>
+
+<p>… wait 30 seconds …</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>username@instance filtertest&gt; scan
+username@instance filtertest&gt;
+</code></pre></div></div>
+
+<p>Note the absence of the entry inserted more than 30 seconds ago. Since the
+scope was set to “scan”, this means the entry is still in Accumulo, but is
+being filtered out at query time. To delete entries from Accumulo based on
+the ages of their timestamps, AgeOffFilters should be set up for the “minc”
+and “majc” scopes, as well.</p>
+
+<p>To force an ageoff of the persisted data, after setting up the ageoff iterator
+on the “minc” and “majc” scopes you can flush and compact your table. This will
+happen automatically as a background operation on any table that is being
+actively written to, but can also be requested in the shell.</p>
+
+<p>The first setiter command used the special -ageoff flag to specify the
+AgeOffFilter, but any Filter can be configured by using the -class flag. The
+following commands show how to enable the AgeOffFilter for the minc and majc
+scopes using the -class flag, then flush and compact the table.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>username@instance filtertest&gt; setiter -t filtertest -minc -majc -p 10 -n myfilter -class org.apache.accumulo.core.iterators.user.AgeOffFilter
+AgeOffFilter removes entries with timestamps more than &lt;ttl&gt; milliseconds old
+<span class="gd">----------&gt; set AgeOffFilter parameter negate, default false keeps k/v that pass accept method, true rejects k/v that pass accept method:
+----------&gt; set AgeOffFilter parameter ttl, time to live (milliseconds): 30000
+----------&gt; set AgeOffFilter parameter currentTime, if set, use the given value as the absolute time in milliseconds as the current time of day:
+</span>username@instance filtertest&gt; flush
+06 10:42:24,806 [shell.Shell] INFO : Flush of table filtertest initiated...
+username@instance filtertest&gt; compact
+06 10:42:36,781 [shell.Shell] INFO : Compaction of table filtertest started for given range
+username@instance filtertest&gt; flush -t filtertest -w
+06 10:42:52,881 [shell.Shell] INFO : Flush of table filtertest completed.
+username@instance filtertest&gt; compact -t filtertest -w
+06 10:43:00,632 [shell.Shell] INFO : Compacting table ...
+06 10:43:01,307 [shell.Shell] INFO : Compaction of table filtertest completed for given range
+username@instance filtertest&gt;
+</code></pre></div></div>
+
+<p>By default, flush and compact execute in the background, but with the -w flag
+they will wait to return until the operation has completed. Both are
+demonstrated above, though only one call to each would be necessary. A
+specific table can be specified with -t.</p>
+
+<p>After the compaction runs, the newly created files will not contain any data
+that should have been aged off, and the Accumulo garbage collector will remove
+the old files.</p>
+
+<p>To see the iterator settings for a table, use config.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>username@instance filtertest&gt; config -t filtertest -f iterator
+---------+---------------------------------------------+---------------------------------------------------------------------------
+SCOPE    | NAME                                        | VALUE
+---------+---------------------------------------------+---------------------------------------------------------------------------
+table    | table.iterator.majc.myfilter .............. | 10,org.apache.accumulo.core.iterators.user.AgeOffFilter
+table    | table.iterator.majc.myfilter.opt.ttl ...... | 30000
+table    | table.iterator.majc.vers .................. | 20,org.apache.accumulo.core.iterators.user.VersioningIterator
+table    | table.iterator.majc.vers.opt.maxVersions .. | 1
+table    | table.iterator.minc.myfilter .............. | 10,org.apache.accumulo.core.iterators.user.AgeOffFilter
+table    | table.iterator.minc.myfilter.opt.ttl ...... | 30000
+table    | table.iterator.minc.vers .................. | 20,org.apache.accumulo.core.iterators.user.VersioningIterator
+table    | table.iterator.minc.vers.opt.maxVersions .. | 1
+table    | table.iterator.scan.myfilter .............. | 10,org.apache.accumulo.core.iterators.user.AgeOffFilter
+table    | table.iterator.scan.myfilter.opt.ttl ...... | 30000
+table    | table.iterator.scan.vers .................. | 20,org.apache.accumulo.core.iterators.user.VersioningIterator
+table    | table.iterator.scan.vers.opt.maxVersions .. | 1
+---------+---------------------------------------------+---------------------------------------------------------------------------
+username@instance filtertest&gt;
+</code></pre></div></div>
+
+<p>When setting new iterators, make sure to order their priority numbers
+(specified with -p) in the order you would like the iterators to be applied.
+Also, each iterator must have a unique name and priority within each scope.</p>
 
         </div>
 
diff --git a/1.9/examples/filter.md b/1.9/examples/filter.md
deleted file mode 100644
index e00ba4a..0000000
--- a/1.9/examples/filter.md
+++ /dev/null
@@ -1,110 +0,0 @@
-Title: Apache Accumulo Filter Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This is a simple filter example. It uses the AgeOffFilter that is provided as
-part of the core package org.apache.accumulo.core.iterators.user. Filters are
-iterators that select desired key/value pairs (or weed out undesired ones).
-Filters extend the org.apache.accumulo.core.iterators.Filter class
-and must implement a method accept(Key k, Value v). This method returns true
-if the key/value pair are to be delivered and false if they are to be ignored.
-Filter takes a "negate" parameter which defaults to false. If set to true, the
-return value of the accept method is negated, so that key/value pairs accepted
-by the method are omitted by the Filter.
-
-    username@instance> createtable filtertest
-    username@instance filtertest> setiter -t filtertest -scan -p 10 -n myfilter -ageoff
-    AgeOffFilter removes entries with timestamps more than <ttl> milliseconds old
-    ----------> set AgeOffFilter parameter negate, default false keeps k/v that pass accept method, true rejects k/v that pass accept method:
-    ----------> set AgeOffFilter parameter ttl, time to live (milliseconds): 30000
-    ----------> set AgeOffFilter parameter currentTime, if set, use the given value as the absolute time in milliseconds as the current time of day:
-    username@instance filtertest> scan
-    username@instance filtertest> insert foo a b c
-    username@instance filtertest> scan
-    foo a:b []    c
-    username@instance filtertest>
-
-... wait 30 seconds ...
-
-    username@instance filtertest> scan
-    username@instance filtertest>
-
-Note the absence of the entry inserted more than 30 seconds ago. Since the
-scope was set to "scan", this means the entry is still in Accumulo, but is
-being filtered out at query time. To delete entries from Accumulo based on
-the ages of their timestamps, AgeOffFilters should be set up for the "minc"
-and "majc" scopes, as well.
-
-To force an ageoff of the persisted data, after setting up the ageoff iterator
-on the "minc" and "majc" scopes you can flush and compact your table. This will
-happen automatically as a background operation on any table that is being
-actively written to, but can also be requested in the shell.
-
-The first setiter command used the special -ageoff flag to specify the
-AgeOffFilter, but any Filter can be configured by using the -class flag. The
-following commands show how to enable the AgeOffFilter for the minc and majc
-scopes using the -class flag, then flush and compact the table.
-
-    username@instance filtertest> setiter -t filtertest -minc -majc -p 10 -n myfilter -class org.apache.accumulo.core.iterators.user.AgeOffFilter
-    AgeOffFilter removes entries with timestamps more than <ttl> milliseconds old
-    ----------> set AgeOffFilter parameter negate, default false keeps k/v that pass accept method, true rejects k/v that pass accept method:
-    ----------> set AgeOffFilter parameter ttl, time to live (milliseconds): 30000
-    ----------> set AgeOffFilter parameter currentTime, if set, use the given value as the absolute time in milliseconds as the current time of day:
-    username@instance filtertest> flush
-    06 10:42:24,806 [shell.Shell] INFO : Flush of table filtertest initiated...
-    username@instance filtertest> compact
-    06 10:42:36,781 [shell.Shell] INFO : Compaction of table filtertest started for given range
-    username@instance filtertest> flush -t filtertest -w
-    06 10:42:52,881 [shell.Shell] INFO : Flush of table filtertest completed.
-    username@instance filtertest> compact -t filtertest -w
-    06 10:43:00,632 [shell.Shell] INFO : Compacting table ...
-    06 10:43:01,307 [shell.Shell] INFO : Compaction of table filtertest completed for given range
-    username@instance filtertest>
-
-By default, flush and compact execute in the background, but with the -w flag
-they will wait to return until the operation has completed. Both are
-demonstrated above, though only one call to each would be necessary. A
-specific table can be specified with -t.
-
-After the compaction runs, the newly created files will not contain any data
-that should have been aged off, and the Accumulo garbage collector will remove
-the old files.
-
-To see the iterator settings for a table, use config.
-
-    username@instance filtertest> config -t filtertest -f iterator
-    ---------+---------------------------------------------+---------------------------------------------------------------------------
-    SCOPE    | NAME                                        | VALUE
-    ---------+---------------------------------------------+---------------------------------------------------------------------------
-    table    | table.iterator.majc.myfilter .............. | 10,org.apache.accumulo.core.iterators.user.AgeOffFilter
-    table    | table.iterator.majc.myfilter.opt.ttl ...... | 30000
-    table    | table.iterator.majc.vers .................. | 20,org.apache.accumulo.core.iterators.user.VersioningIterator
-    table    | table.iterator.majc.vers.opt.maxVersions .. | 1
-    table    | table.iterator.minc.myfilter .............. | 10,org.apache.accumulo.core.iterators.user.AgeOffFilter
-    table    | table.iterator.minc.myfilter.opt.ttl ...... | 30000
-    table    | table.iterator.minc.vers .................. | 20,org.apache.accumulo.core.iterators.user.VersioningIterator
-    table    | table.iterator.minc.vers.opt.maxVersions .. | 1
-    table    | table.iterator.scan.myfilter .............. | 10,org.apache.accumulo.core.iterators.user.AgeOffFilter
-    table    | table.iterator.scan.myfilter.opt.ttl ...... | 30000
-    table    | table.iterator.scan.vers .................. | 20,org.apache.accumulo.core.iterators.user.VersioningIterator
-    table    | table.iterator.scan.vers.opt.maxVersions .. | 1
-    ---------+---------------------------------------------+---------------------------------------------------------------------------
-    username@instance filtertest>
-
-When setting new iterators, make sure to order their priority numbers
-(specified with -p) in the order you would like the iterators to be applied.
-Also, each iterator must have a unique name and priority within each scope.
diff --git a/1.8/examples/index.html b/1.9/examples/helloworld.html
similarity index 66%
copy from 1.8/examples/index.html
copy to 1.9/examples/helloworld.html
index 3af64b6..3d6caef 100644
--- a/1.8/examples/index.html
+++ b/1.9/examples/helloworld.html
@@ -25,7 +25,7 @@
 <link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/v/bs/jq-2.2.3/dt-1.10.12/datatables.min.css">
 <link href="/css/accumulo.css" rel="stylesheet" type="text/css">
 
-<title>Apache Accumulo Examples</title>
+<title>Apache Accumulo Hello World Example</title>
 
 <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.2.4/jquery.min.js" integrity="sha256-BbhdlvQf/xTY9gja0Dq3HiwQF8LaCRTXxZKRutelT44=" crossorigin="anonymous"></script>
 <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
@@ -136,90 +136,45 @@
         </div>
         <div id="content">
           
-          <h1 class="title">Apache Accumulo Examples</h1>
+          <h1 class="title">Apache Accumulo Hello World Example</h1>
           
-          <p>Before running any of the examples, the following steps must be performed.</p>
+          <p>This tutorial uses the following Java classes, which can be found in org.apache.accumulo.examples.simple.helloworld in the examples-simple module:</p>
 
-<ol>
-  <li>
-    <p>Install and run Accumulo via the instructions found in $ACCUMULO_HOME/README.
-Remember the instance name. It will be referred to as “instance” throughout
-the examples. A comma-separated list of zookeeper servers will be referred
-to as “zookeepers”.</p>
-  </li>
-  <li>
-    <p>Create an Accumulo user (see the <a href="../accumulo_user_manual#_user_administration">user manual</a>), or use the root user.
-The “username” Accumulo user name with password “password” is used
-throughout the examples. This user needs the ability to create tables.</p>
-  </li>
-</ol>
+<ul>
+  <li>InsertWithBatchWriter.java - Inserts 10K rows (50K entries) into accumulo with each row having 5 entries</li>
+  <li>ReadData.java - Reads all data between two rows</li>
+</ul>
 
-<p>In all commands, you will need to replace “instance”, “zookeepers”,
-“username”, and “password” with the values you set for your Accumulo instance.</p>
+<p>Log into the accumulo shell:</p>
 
-<p>Commands intended to be run in bash are prefixed by ‘$’. These are always
-assumed to be run from the $ACCUMULO_HOME directory.</p>
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo shell -u username -p password
+</code></pre></div></div>
 
-<p>Commands intended to be run in the Accumulo shell are prefixed by ‘&gt;’.</p>
+<p>Create a table called ‘hellotable’:</p>
 
-<p>Each README in the examples directory highlights the use of particular
-features of Apache Accumulo.</p>
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>username@instance&gt; createtable hellotable
+</code></pre></div></div>
 
-<p><a href="batch">batch</a>:       Using the batch writer and batch scanner.</p>
+<p>Launch a Java program that inserts data with a BatchWriter:</p>
 
-<p><a href="bloom">bloom</a>:       Creating a bloom filter enabled table to increase query
-                       performance.</p>
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo org.apache.accumulo.examples.simple.helloworld.InsertWithBatchWriter -i instance -z zookeepers -u username -p password -t hellotable
+</code></pre></div></div>
 
-<p><a href="bulkIngest">bulkIngest</a>:  Ingesting bulk data using map/reduce jobs on Hadoop.</p>
+<p>On the accumulo status page at the URL below (where ‘master’ is replaced with the name or IP of your accumulo master), you should see 50K entries</p>
 
-<p><a href="classpath">classpath</a>:   Using per-table classpaths.</p>
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>http://master:9995/
+</code></pre></div></div>
 
-<p><a href="client">client</a>:      Using table operations, reading and writing data in Java.</p>
+<p>To view the entries, use the shell to scan the table:</p>
 
-<p><a href="combiner">combiner</a>:    Using example StatsCombiner to find min, max, sum, and
-                       count.</p>
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>username@instance&gt; table hellotable
+username@instance hellotable&gt; scan
+</code></pre></div></div>
 
-<p><a href="constraints">constraints</a>: Using constraints with tables.</p>
-
-<p><a href="dirlist">dirlist</a>:     Storing filesystem information.</p>
-
-<p><a href="export">export</a>:      Exporting and importing tables.</p>
-
-<p><a href="filedata">filedata</a>:    Storing file data.</p>
-
-<p><a href="filter">filter</a>:      Using the AgeOffFilter to remove records more than 30
-                       seconds old.</p>
-
-<p><a href="helloworld">helloworld</a>:  Inserting records both inside map/reduce jobs and
-                       outside. And reading records between two rows.</p>
-
-<p><a href="isolation">isolation</a>:   Using the isolated scanner to ensure partial changes
-                       are not seen.</p>
-
-<p><a href="mapred">mapred</a>:      Using MapReduce to read from and write to Accumulo
-                       tables.</p>
-
-<p><a href="maxmutation">maxmutation</a>: Limiting mutation size to avoid running out of memory.</p>
-
-<p><a href="regex">regex</a>:       Using MapReduce and Accumulo to find data using regular
-                       expressions.</p>
-
-<p><a href="rowhash">rowhash</a>:     Using MapReduce to read a table and write to a new
-                       column in the same table.</p>
-
-<p><a href="sample">sample</a>:      Building and using sample data in Accumulo.</p>
-
-<p><a href="shard">shard</a>:       Using the intersecting iterator with a term index
-                       partitioned by document.</p>
-
-<p><a href="tabletofile">tabletofile</a>: Using MapReduce to read a table and write one of its
-                       columns to a file in HDFS.</p>
-
-<p><a href="terasort">terasort</a>:    Generating random data and sorting it using Accumulo.</p>
-
-<p><a href="visibility">visibility</a> :  Using visibilities (or combinations of authorizations).
-                       Also shows user permissions.</p>
+<p>You can also use a Java class to scan the table:</p>
 
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo org.apache.accumulo.examples.simple.helloworld.ReadData -i instance -z zookeepers -u username -p password -t hellotable --startKey row_0 --endKey row_1001
+</code></pre></div></div>
 
         </div>
 
diff --git a/1.9/examples/helloworld.md b/1.9/examples/helloworld.md
deleted file mode 100644
index 618e301..0000000
--- a/1.9/examples/helloworld.md
+++ /dev/null
@@ -1,47 +0,0 @@
-Title: Apache Accumulo Hello World Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This tutorial uses the following Java classes, which can be found in org.apache.accumulo.examples.simple.helloworld in the examples-simple module:
-
- * InsertWithBatchWriter.java - Inserts 10K rows (50K entries) into accumulo with each row having 5 entries
- * ReadData.java - Reads all data between two rows
-
-Log into the accumulo shell:
-
-    $ ./bin/accumulo shell -u username -p password
-
-Create a table called 'hellotable':
-
-    username@instance> createtable hellotable
-
-Launch a Java program that inserts data with a BatchWriter:
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.helloworld.InsertWithBatchWriter -i instance -z zookeepers -u username -p password -t hellotable
-
-On the accumulo status page at the URL below (where 'master' is replaced with the name or IP of your accumulo master), you should see 50K entries
-
-    http://master:9995/
-
-To view the entries, use the shell to scan the table:
-
-    username@instance> table hellotable
-    username@instance hellotable> scan
-
-You can also use a Java class to scan the table:
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.helloworld.ReadData -i instance -z zookeepers -u username -p password -t hellotable --startKey row_0 --endKey row_1001
diff --git a/1.9/examples/index.html b/1.9/examples/index.html
index 3af64b6..22a4d58 100644
--- a/1.9/examples/index.html
+++ b/1.9/examples/index.html
@@ -204,6 +204,10 @@ features of Apache Accumulo.</p>
 <p><a href="regex">regex</a>:       Using MapReduce and Accumulo to find data using regular
                        expressions.</p>
 
+<p><a href="reservations">reservations</a>: Running a reservation system with Conditional Mutations.</p>
+
+<p><a href="rgbalancer">rgbalancer</a>: Spreading out groups of tablets with a Regex Group Balancer.</p>
+
 <p><a href="rowhash">rowhash</a>:     Using MapReduce to read a table and write to a new
                        column in the same table.</p>
 
diff --git a/1.8/examples/index.html b/1.9/examples/isolation.html
similarity index 66%
copy from 1.8/examples/index.html
copy to 1.9/examples/isolation.html
index 3af64b6..586a33b 100644
--- a/1.8/examples/index.html
+++ b/1.9/examples/isolation.html
@@ -25,7 +25,7 @@
 <link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/v/bs/jq-2.2.3/dt-1.10.12/datatables.min.css">
 <link href="/css/accumulo.css" rel="stylesheet" type="text/css">
 
-<title>Apache Accumulo Examples</title>
+<title>Apache Accumulo Isolation Example</title>
 
 <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.2.4/jquery.min.js" integrity="sha256-BbhdlvQf/xTY9gja0Dq3HiwQF8LaCRTXxZKRutelT44=" crossorigin="anonymous"></script>
 <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
@@ -136,89 +136,39 @@
         </div>
         <div id="content">
           
-          <h1 class="title">Apache Accumulo Examples</h1>
+          <h1 class="title">Apache Accumulo Isolation Example</h1>
           
-          <p>Before running any of the examples, the following steps must be performed.</p>
-
-<ol>
-  <li>
-    <p>Install and run Accumulo via the instructions found in $ACCUMULO_HOME/README.
-Remember the instance name. It will be referred to as “instance” throughout
-the examples. A comma-separated list of zookeeper servers will be referred
-to as “zookeepers”.</p>
-  </li>
-  <li>
-    <p>Create an Accumulo user (see the <a href="../accumulo_user_manual#_user_administration">user manual</a>), or use the root user.
-The “username” Accumulo user name with password “password” is used
-throughout the examples. This user needs the ability to create tables.</p>
-  </li>
-</ol>
-
-<p>In all commands, you will need to replace “instance”, “zookeepers”,
-“username”, and “password” with the values you set for your Accumulo instance.</p>
-
-<p>Commands intended to be run in bash are prefixed by ‘$’. These are always
-assumed to be run from the $ACCUMULO_HOME directory.</p>
-
-<p>Commands intended to be run in the Accumulo shell are prefixed by ‘&gt;’.</p>
-
-<p>Each README in the examples directory highlights the use of particular
-features of Apache Accumulo.</p>
-
-<p><a href="batch">batch</a>:       Using the batch writer and batch scanner.</p>
-
-<p><a href="bloom">bloom</a>:       Creating a bloom filter enabled table to increase query
-                       performance.</p>
-
-<p><a href="bulkIngest">bulkIngest</a>:  Ingesting bulk data using map/reduce jobs on Hadoop.</p>
-
-<p><a href="classpath">classpath</a>:   Using per-table classpaths.</p>
-
-<p><a href="client">client</a>:      Using table operations, reading and writing data in Java.</p>
-
-<p><a href="combiner">combiner</a>:    Using example StatsCombiner to find min, max, sum, and
-                       count.</p>
-
-<p><a href="constraints">constraints</a>: Using constraints with tables.</p>
-
-<p><a href="dirlist">dirlist</a>:     Storing filesystem information.</p>
-
-<p><a href="export">export</a>:      Exporting and importing tables.</p>
-
-<p><a href="filedata">filedata</a>:    Storing file data.</p>
-
-<p><a href="filter">filter</a>:      Using the AgeOffFilter to remove records more than 30
-                       seconds old.</p>
-
-<p><a href="helloworld">helloworld</a>:  Inserting records both inside map/reduce jobs and
-                       outside. And reading records between two rows.</p>
-
-<p><a href="isolation">isolation</a>:   Using the isolated scanner to ensure partial changes
-                       are not seen.</p>
-
-<p><a href="mapred">mapred</a>:      Using MapReduce to read from and write to Accumulo
-                       tables.</p>
-
-<p><a href="maxmutation">maxmutation</a>: Limiting mutation size to avoid running out of memory.</p>
-
-<p><a href="regex">regex</a>:       Using MapReduce and Accumulo to find data using regular
-                       expressions.</p>
-
-<p><a href="rowhash">rowhash</a>:     Using MapReduce to read a table and write to a new
-                       column in the same table.</p>
-
-<p><a href="sample">sample</a>:      Building and using sample data in Accumulo.</p>
-
-<p><a href="shard">shard</a>:       Using the intersecting iterator with a term index
-                       partitioned by document.</p>
-
-<p><a href="tabletofile">tabletofile</a>: Using MapReduce to read a table and write one of its
-                       columns to a file in HDFS.</p>
-
-<p><a href="terasort">terasort</a>:    Generating random data and sorting it using Accumulo.</p>
-
-<p><a href="visibility">visibility</a> :  Using visibilities (or combinations of authorizations).
-                       Also shows user permissions.</p>
+          <p>Accumulo has an isolated scanner that ensures partial changes to rows are not
+seen. Isolation is documented in ../docs/isolation.html and the user manual.</p>
+
+<p>InterferenceTest is a simple example that shows the effects of scanning with
+and without isolation. This program starts two threads. One threads
+continually upates all of the values in a row to be the same thing, but
+different from what it used to be. The other thread continually scans the
+table and checks that all values in a row are the same. Without isolation the
+scanning thread will sometimes see different values, which is the result of
+reading the row at the same time a mutation is changing the row.</p>
+
+<p>Below, Interference Test is run without isolation enabled for 5000 iterations
+and it reports problems.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo org.apache.accumulo.examples.simple.isolation.InterferenceTest -i instance -z zookeepers -u username -p password -t isotest --iterations 5000
+ERROR Columns in row 053 had multiple values [53, 4553]
+ERROR Columns in row 061 had multiple values [561, 61]
+ERROR Columns in row 070 had multiple values [570, 1070]
+ERROR Columns in row 079 had multiple values [1079, 1579]
+ERROR Columns in row 088 had multiple values [2588, 1588]
+ERROR Columns in row 106 had multiple values [2606, 3106]
+ERROR Columns in row 115 had multiple values [4615, 3115]
+finished
+</code></pre></div></div>
+
+<p>Below, Interference Test is run with isolation enabled for 5000 iterations and
+it reports no problems.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo org.apache.accumulo.examples.simple.isolation.InterferenceTest -i instance -z zookeepers -u username -p password -t isotest --iterations 5000 --isolated
+finished
+</code></pre></div></div>
 
 
         </div>
diff --git a/1.9/examples/isolation.md b/1.9/examples/isolation.md
deleted file mode 100644
index 4739f59..0000000
--- a/1.9/examples/isolation.md
+++ /dev/null
@@ -1,50 +0,0 @@
-Title: Apache Accumulo Isolation Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-
-Accumulo has an isolated scanner that ensures partial changes to rows are not
-seen. Isolation is documented in ../docs/isolation.html and the user manual.
-
-InterferenceTest is a simple example that shows the effects of scanning with
-and without isolation. This program starts two threads. One threads
-continually upates all of the values in a row to be the same thing, but
-different from what it used to be. The other thread continually scans the
-table and checks that all values in a row are the same. Without isolation the
-scanning thread will sometimes see different values, which is the result of
-reading the row at the same time a mutation is changing the row.
-
-Below, Interference Test is run without isolation enabled for 5000 iterations
-and it reports problems.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.isolation.InterferenceTest -i instance -z zookeepers -u username -p password -t isotest --iterations 5000
-    ERROR Columns in row 053 had multiple values [53, 4553]
-    ERROR Columns in row 061 had multiple values [561, 61]
-    ERROR Columns in row 070 had multiple values [570, 1070]
-    ERROR Columns in row 079 had multiple values [1079, 1579]
-    ERROR Columns in row 088 had multiple values [2588, 1588]
-    ERROR Columns in row 106 had multiple values [2606, 3106]
-    ERROR Columns in row 115 had multiple values [4615, 3115]
-    finished
-
-Below, Interference Test is run with isolation enabled for 5000 iterations and
-it reports no problems.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.isolation.InterferenceTest -i instance -z zookeepers -u username -p password -t isotest --iterations 5000 --isolated
-    finished
-
-
diff --git a/1.9/examples/index.html b/1.9/examples/mapred.html
similarity index 50%
copy from 1.9/examples/index.html
copy to 1.9/examples/mapred.html
index 3af64b6..097cdef 100644
--- a/1.9/examples/index.html
+++ b/1.9/examples/mapred.html
@@ -25,7 +25,7 @@
 <link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/v/bs/jq-2.2.3/dt-1.10.12/datatables.min.css">
 <link href="/css/accumulo.css" rel="stylesheet" type="text/css">
 
-<title>Apache Accumulo Examples</title>
+<title>Apache Accumulo MapReduce Example</title>
 
 <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.2.4/jquery.min.js" integrity="sha256-BbhdlvQf/xTY9gja0Dq3HiwQF8LaCRTXxZKRutelT44=" crossorigin="anonymous"></script>
 <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
@@ -136,89 +136,144 @@
         </div>
         <div id="content">
           
-          <h1 class="title">Apache Accumulo Examples</h1>
+          <h1 class="title">Apache Accumulo MapReduce Example</h1>
           
-          <p>Before running any of the examples, the following steps must be performed.</p>
-
-<ol>
-  <li>
-    <p>Install and run Accumulo via the instructions found in $ACCUMULO_HOME/README.
-Remember the instance name. It will be referred to as “instance” throughout
-the examples. A comma-separated list of zookeeper servers will be referred
-to as “zookeepers”.</p>
-  </li>
-  <li>
-    <p>Create an Accumulo user (see the <a href="../accumulo_user_manual#_user_administration">user manual</a>), or use the root user.
-The “username” Accumulo user name with password “password” is used
-throughout the examples. This user needs the ability to create tables.</p>
-  </li>
-</ol>
-
-<p>In all commands, you will need to replace “instance”, “zookeepers”,
-“username”, and “password” with the values you set for your Accumulo instance.</p>
-
-<p>Commands intended to be run in bash are prefixed by ‘$’. These are always
-assumed to be run from the $ACCUMULO_HOME directory.</p>
-
-<p>Commands intended to be run in the Accumulo shell are prefixed by ‘&gt;’.</p>
-
-<p>Each README in the examples directory highlights the use of particular
-features of Apache Accumulo.</p>
-
-<p><a href="batch">batch</a>:       Using the batch writer and batch scanner.</p>
-
-<p><a href="bloom">bloom</a>:       Creating a bloom filter enabled table to increase query
-                       performance.</p>
-
-<p><a href="bulkIngest">bulkIngest</a>:  Ingesting bulk data using map/reduce jobs on Hadoop.</p>
-
-<p><a href="classpath">classpath</a>:   Using per-table classpaths.</p>
-
-<p><a href="client">client</a>:      Using table operations, reading and writing data in Java.</p>
-
-<p><a href="combiner">combiner</a>:    Using example StatsCombiner to find min, max, sum, and
-                       count.</p>
-
-<p><a href="constraints">constraints</a>: Using constraints with tables.</p>
-
-<p><a href="dirlist">dirlist</a>:     Storing filesystem information.</p>
-
-<p><a href="export">export</a>:      Exporting and importing tables.</p>
-
-<p><a href="filedata">filedata</a>:    Storing file data.</p>
-
-<p><a href="filter">filter</a>:      Using the AgeOffFilter to remove records more than 30
-                       seconds old.</p>
-
-<p><a href="helloworld">helloworld</a>:  Inserting records both inside map/reduce jobs and
-                       outside. And reading records between two rows.</p>
-
-<p><a href="isolation">isolation</a>:   Using the isolated scanner to ensure partial changes
-                       are not seen.</p>
-
-<p><a href="mapred">mapred</a>:      Using MapReduce to read from and write to Accumulo
-                       tables.</p>
-
-<p><a href="maxmutation">maxmutation</a>: Limiting mutation size to avoid running out of memory.</p>
-
-<p><a href="regex">regex</a>:       Using MapReduce and Accumulo to find data using regular
-                       expressions.</p>
-
-<p><a href="rowhash">rowhash</a>:     Using MapReduce to read a table and write to a new
-                       column in the same table.</p>
-
-<p><a href="sample">sample</a>:      Building and using sample data in Accumulo.</p>
-
-<p><a href="shard">shard</a>:       Using the intersecting iterator with a term index
-                       partitioned by document.</p>
-
-<p><a href="tabletofile">tabletofile</a>: Using MapReduce to read a table and write one of its
-                       columns to a file in HDFS.</p>
-
-<p><a href="terasort">terasort</a>:    Generating random data and sorting it using Accumulo.</p>
-
-<p><a href="visibility">visibility</a> :  Using visibilities (or combinations of authorizations).
-                       Also shows user permissions.</p>
+          <p>This example uses mapreduce and accumulo to compute word counts for a set of
+documents. This is accomplished using a map-only mapreduce job and a
+accumulo table with combiners.</p>
+
+<p>To run this example you will need a directory in HDFS containing text files.
+The accumulo readme will be used to show how to run this example.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ hadoop fs -copyFromLocal $ACCUMULO_HOME/README /user/username/wc/Accumulo.README
+$ hadoop fs -ls /user/username/wc
+Found 1 items
+-rw-r--r--   2 username supergroup       9359 2009-07-15 17:54 /user/username/wc/Accumulo.README
+</code></pre></div></div>
+
+<p>The first part of running this example is to create a table with a combiner
+for the column family count.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo shell -u username -p password
+Shell - Apache Accumulo Interactive Shell
+<span class="gd">- version: 1.5.0
+- instance name: instance
+- instance id: 00000000-0000-0000-0000-000000000000
+-
+- type 'help' for a list of available commands
+-
+</span>username@instance&gt; createtable wordCount
+username@instance wordCount&gt; setiter -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 10 -t wordCount -majc -minc -scan
+SummingCombiner interprets Values as Longs and adds them together. A variety of encodings (variable length, fixed length, or string) are available
+<span class="gd">----------&gt; set SummingCombiner parameter all, set to true to apply Combiner to every column, otherwise leave blank. if true, columns option will be ignored.: false
+----------&gt; set SummingCombiner parameter columns, &lt;col fam&gt;[:&lt;col qual&gt;]{,&lt;col fam&gt;[:&lt;col qual&gt;]} escape non-alphanum chars using %&lt;hex&gt;.: count
+----------&gt; set SummingCombiner parameter lossy, if true, failed decodes are ignored. Otherwise combiner will error on failed decodes (default false): &lt;TRUE|FALSE&gt;: false
+----------&gt; set SummingCombiner parameter type, &lt;VARLEN|FIXEDLEN|STRING|fullClassName&gt;: STRING
+</span>username@instance wordCount&gt; quit
+</code></pre></div></div>
+
+<p>After creating the table, run the word count map reduce job.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ bin/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.mapreduce.WordCount -i instance -z zookeepers  --input /user/username/wc -t wordCount -u username -p password
+
+11/02/07 18:20:11 INFO input.FileInputFormat: Total input paths to process : 1
+11/02/07 18:20:12 INFO mapred.JobClient: Running job: job_201102071740_0003
+11/02/07 18:20:13 INFO mapred.JobClient:  map 0% reduce 0%
+11/02/07 18:20:20 INFO mapred.JobClient:  map 100% reduce 0%
+11/02/07 18:20:22 INFO mapred.JobClient: Job complete: job_201102071740_0003
+11/02/07 18:20:22 INFO mapred.JobClient: Counters: 6
+11/02/07 18:20:22 INFO mapred.JobClient:   Job Counters
+11/02/07 18:20:22 INFO mapred.JobClient:     Launched map tasks=1
+11/02/07 18:20:22 INFO mapred.JobClient:     Data-local map tasks=1
+11/02/07 18:20:22 INFO mapred.JobClient:   FileSystemCounters
+11/02/07 18:20:22 INFO mapred.JobClient:     HDFS_BYTES_READ=10487
+11/02/07 18:20:22 INFO mapred.JobClient:   Map-Reduce Framework
+11/02/07 18:20:22 INFO mapred.JobClient:     Map input records=255
+11/02/07 18:20:22 INFO mapred.JobClient:     Spilled Records=0
+11/02/07 18:20:22 INFO mapred.JobClient:     Map output records=1452
+</code></pre></div></div>
+
+<p>After the map reduce job completes, query the accumulo table to see word
+counts.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo shell -u username -p password
+username@instance&gt; table wordCount
+username@instance wordCount&gt; scan -b the
+the count:20080906 []    75
+their count:20080906 []    2
+them count:20080906 []    1
+then count:20080906 []    1
+there count:20080906 []    1
+these count:20080906 []    3
+this count:20080906 []    6
+through count:20080906 []    1
+time count:20080906 []    3
+time. count:20080906 []    1
+to count:20080906 []    27
+total count:20080906 []    1
+tserver, count:20080906 []    1
+tserver.compaction.major.concurrent.max count:20080906 []    1
+...
+</code></pre></div></div>
+
+<p>Another example to look at is
+org.apache.accumulo.examples.simple.mapreduce.UniqueColumns. This example
+computes the unique set of columns in a table and shows how a map reduce job
+can directly read a tables files from HDFS.</p>
+
+<p>One more example available is
+org.apache.accumulo.examples.simple.mapreduce.TokenFileWordCount.
+The TokenFileWordCount example works exactly the same as the WordCount example
+explained above except that it uses a token file rather than giving the
+password directly to the map-reduce job (this avoids having the password
+displayed in the job’s configuration which is world-readable).</p>
+
+<p>To create a token file, use the create-token utility</p>
+
+<p>$ ./bin/accumulo create-token</p>
+
+<p>It defaults to creating a PasswordToken, but you can specify the token class
+with -tc (requires the fully qualified class name). Based on the token class,
+it will prompt you for each property required to create the token.</p>
+
+<p>The last value it prompts for is a local filename to save to. If this file
+exists, it will append the new token to the end. Multiple tokens can exist in
+a file, but only the first one for each user will be recognized.</p>
+
+<p>Rather than waiting for the prompts, you can specify some options when calling
+create-token, for example</p>
+
+<p>$ ./bin/accumulo create-token -u root -p secret -f root.pw</p>
+
+<p>would create a token file containing a PasswordToken for
+user ‘root’ with password ‘secret’ and saved to ‘root.pw’</p>
+
+<p>This local file needs to be uploaded to hdfs to be used with the
+map-reduce job. For example, if the file were ‘root.pw’ in the local directory:</p>
+
+<p>$ hadoop fs -put root.pw root.pw</p>
+
+<p>This would put ‘root.pw’ in the user’s home directory in hdfs.</p>
+
+<p>Because the basic WordCount example uses Opts to parse its arguments
+(which extends ClientOnRequiredTable), you can use a token file with
+the basic WordCount example by calling the same command as explained above
+except replacing the password with the token file (rather than -p, use -tf).</p>
+
+<p>$ ./bin/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.mapreduce.WordCount -i instance -z zookeepers  –input /user/username/wc -t wordCount -u username -tf tokenfile</p>
+
+<p>In the above examples, username was ‘root’ and tokenfile was ‘root.pw’</p>
+
+<p>However, if you don’t want to use the Opts class to parse arguments,
+the TokenFileWordCount is an example of using the token file manually.</p>
+
+<p>$ bin/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.mapreduce.TokenFileWordCount instance zookeepers username tokenfile /user/username/wc wordCount</p>
+
+<p>The results should be the same as the WordCount example except that the
+authentication token was not stored in the configuration. It was instead
+stored in a file that the map-reduce job pulled into the distributed cache.
+(If you ran either of these on the same table right after the
+WordCount example, then the resulting counts should just double.)</p>
 
 
         </div>
diff --git a/1.9/examples/mapred.md b/1.9/examples/mapred.md
deleted file mode 100644
index 9e9b17f..0000000
--- a/1.9/examples/mapred.md
+++ /dev/null
@@ -1,154 +0,0 @@
-Title: Apache Accumulo MapReduce Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This example uses mapreduce and accumulo to compute word counts for a set of
-documents. This is accomplished using a map-only mapreduce job and a
-accumulo table with combiners.
-
-To run this example you will need a directory in HDFS containing text files.
-The accumulo readme will be used to show how to run this example.
-
-    $ hadoop fs -copyFromLocal $ACCUMULO_HOME/README /user/username/wc/Accumulo.README
-    $ hadoop fs -ls /user/username/wc
-    Found 1 items
-    -rw-r--r--   2 username supergroup       9359 2009-07-15 17:54 /user/username/wc/Accumulo.README
-
-The first part of running this example is to create a table with a combiner
-for the column family count.
-
-    $ ./bin/accumulo shell -u username -p password
-    Shell - Apache Accumulo Interactive Shell
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> createtable wordCount
-    username@instance wordCount> setiter -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 10 -t wordCount -majc -minc -scan
-    SummingCombiner interprets Values as Longs and adds them together. A variety of encodings (variable length, fixed length, or string) are available
-    ----------> set SummingCombiner parameter all, set to true to apply Combiner to every column, otherwise leave blank. if true, columns option will be ignored.: false
-    ----------> set SummingCombiner parameter columns, <col fam>[:<col qual>]{,<col fam>[:<col qual>]} escape non-alphanum chars using %<hex>.: count
-    ----------> set SummingCombiner parameter lossy, if true, failed decodes are ignored. Otherwise combiner will error on failed decodes (default false): <TRUE|FALSE>: false
-    ----------> set SummingCombiner parameter type, <VARLEN|FIXEDLEN|STRING|fullClassName>: STRING
-    username@instance wordCount> quit
-
-After creating the table, run the word count map reduce job.
-
-    $ bin/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.mapreduce.WordCount -i instance -z zookeepers  --input /user/username/wc -t wordCount -u username -p password
-
-    11/02/07 18:20:11 INFO input.FileInputFormat: Total input paths to process : 1
-    11/02/07 18:20:12 INFO mapred.JobClient: Running job: job_201102071740_0003
-    11/02/07 18:20:13 INFO mapred.JobClient:  map 0% reduce 0%
-    11/02/07 18:20:20 INFO mapred.JobClient:  map 100% reduce 0%
-    11/02/07 18:20:22 INFO mapred.JobClient: Job complete: job_201102071740_0003
-    11/02/07 18:20:22 INFO mapred.JobClient: Counters: 6
-    11/02/07 18:20:22 INFO mapred.JobClient:   Job Counters
-    11/02/07 18:20:22 INFO mapred.JobClient:     Launched map tasks=1
-    11/02/07 18:20:22 INFO mapred.JobClient:     Data-local map tasks=1
-    11/02/07 18:20:22 INFO mapred.JobClient:   FileSystemCounters
-    11/02/07 18:20:22 INFO mapred.JobClient:     HDFS_BYTES_READ=10487
-    11/02/07 18:20:22 INFO mapred.JobClient:   Map-Reduce Framework
-    11/02/07 18:20:22 INFO mapred.JobClient:     Map input records=255
-    11/02/07 18:20:22 INFO mapred.JobClient:     Spilled Records=0
-    11/02/07 18:20:22 INFO mapred.JobClient:     Map output records=1452
-
-After the map reduce job completes, query the accumulo table to see word
-counts.
-
-    $ ./bin/accumulo shell -u username -p password
-    username@instance> table wordCount
-    username@instance wordCount> scan -b the
-    the count:20080906 []    75
-    their count:20080906 []    2
-    them count:20080906 []    1
-    then count:20080906 []    1
-    there count:20080906 []    1
-    these count:20080906 []    3
-    this count:20080906 []    6
-    through count:20080906 []    1
-    time count:20080906 []    3
-    time. count:20080906 []    1
-    to count:20080906 []    27
-    total count:20080906 []    1
-    tserver, count:20080906 []    1
-    tserver.compaction.major.concurrent.max count:20080906 []    1
-    ...
-
-Another example to look at is
-org.apache.accumulo.examples.simple.mapreduce.UniqueColumns. This example
-computes the unique set of columns in a table and shows how a map reduce job
-can directly read a tables files from HDFS.
-
-One more example available is
-org.apache.accumulo.examples.simple.mapreduce.TokenFileWordCount.
-The TokenFileWordCount example works exactly the same as the WordCount example
-explained above except that it uses a token file rather than giving the
-password directly to the map-reduce job (this avoids having the password
-displayed in the job's configuration which is world-readable).
-
-To create a token file, use the create-token utility
-
-  $ ./bin/accumulo create-token
-
-It defaults to creating a PasswordToken, but you can specify the token class
-with -tc (requires the fully qualified class name). Based on the token class,
-it will prompt you for each property required to create the token.
-
-The last value it prompts for is a local filename to save to. If this file
-exists, it will append the new token to the end. Multiple tokens can exist in
-a file, but only the first one for each user will be recognized.
-
-Rather than waiting for the prompts, you can specify some options when calling
-create-token, for example
-
-  $ ./bin/accumulo create-token -u root -p secret -f root.pw
-
-would create a token file containing a PasswordToken for
-user 'root' with password 'secret' and saved to 'root.pw'
-
-This local file needs to be uploaded to hdfs to be used with the
-map-reduce job. For example, if the file were 'root.pw' in the local directory:
-
-  $ hadoop fs -put root.pw root.pw
-
-This would put 'root.pw' in the user's home directory in hdfs.
-
-Because the basic WordCount example uses Opts to parse its arguments
-(which extends ClientOnRequiredTable), you can use a token file with
-the basic WordCount example by calling the same command as explained above
-except replacing the password with the token file (rather than -p, use -tf).
-
-  $ ./bin/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.mapreduce.WordCount -i instance -z zookeepers  --input /user/username/wc -t wordCount -u username -tf tokenfile
-
-In the above examples, username was 'root' and tokenfile was 'root.pw'
-
-However, if you don't want to use the Opts class to parse arguments,
-the TokenFileWordCount is an example of using the token file manually.
-
-  $ bin/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.mapreduce.TokenFileWordCount instance zookeepers username tokenfile /user/username/wc wordCount
-
-The results should be the same as the WordCount example except that the
-authentication token was not stored in the configuration. It was instead
-stored in a file that the map-reduce job pulled into the distributed cache.
-(If you ran either of these on the same table right after the
-WordCount example, then the resulting counts should just double.)
-
-
-
-
diff --git a/1.8/examples/index.html b/1.9/examples/maxmutation.html
similarity index 66%
copy from 1.8/examples/index.html
copy to 1.9/examples/maxmutation.html
index 3af64b6..781740e 100644
--- a/1.8/examples/index.html
+++ b/1.9/examples/maxmutation.html
@@ -25,7 +25,7 @@
 <link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/v/bs/jq-2.2.3/dt-1.10.12/datatables.min.css">
 <link href="/css/accumulo.css" rel="stylesheet" type="text/css">
 
-<title>Apache Accumulo Examples</title>
+<title>Apache Accumulo MaxMutation Constraints Example</title>
 
 <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.2.4/jquery.min.js" integrity="sha256-BbhdlvQf/xTY9gja0Dq3HiwQF8LaCRTXxZKRutelT44=" crossorigin="anonymous"></script>
 <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
@@ -136,89 +136,39 @@
         </div>
         <div id="content">
           
-          <h1 class="title">Apache Accumulo Examples</h1>
+          <h1 class="title">Apache Accumulo MaxMutation Constraints Example</h1>
           
-          <p>Before running any of the examples, the following steps must be performed.</p>
-
-<ol>
-  <li>
-    <p>Install and run Accumulo via the instructions found in $ACCUMULO_HOME/README.
-Remember the instance name. It will be referred to as “instance” throughout
-the examples. A comma-separated list of zookeeper servers will be referred
-to as “zookeepers”.</p>
-  </li>
-  <li>
-    <p>Create an Accumulo user (see the <a href="../accumulo_user_manual#_user_administration">user manual</a>), or use the root user.
-The “username” Accumulo user name with password “password” is used
-throughout the examples. This user needs the ability to create tables.</p>
-  </li>
-</ol>
-
-<p>In all commands, you will need to replace “instance”, “zookeepers”,
-“username”, and “password” with the values you set for your Accumulo instance.</p>
-
-<p>Commands intended to be run in bash are prefixed by ‘$’. These are always
-assumed to be run from the $ACCUMULO_HOME directory.</p>
-
-<p>Commands intended to be run in the Accumulo shell are prefixed by ‘&gt;’.</p>
-
-<p>Each README in the examples directory highlights the use of particular
-features of Apache Accumulo.</p>
-
-<p><a href="batch">batch</a>:       Using the batch writer and batch scanner.</p>
-
-<p><a href="bloom">bloom</a>:       Creating a bloom filter enabled table to increase query
-                       performance.</p>
-
-<p><a href="bulkIngest">bulkIngest</a>:  Ingesting bulk data using map/reduce jobs on Hadoop.</p>
-
-<p><a href="classpath">classpath</a>:   Using per-table classpaths.</p>
-
-<p><a href="client">client</a>:      Using table operations, reading and writing data in Java.</p>
-
-<p><a href="combiner">combiner</a>:    Using example StatsCombiner to find min, max, sum, and
-                       count.</p>
-
-<p><a href="constraints">constraints</a>: Using constraints with tables.</p>
-
-<p><a href="dirlist">dirlist</a>:     Storing filesystem information.</p>
-
-<p><a href="export">export</a>:      Exporting and importing tables.</p>
-
-<p><a href="filedata">filedata</a>:    Storing file data.</p>
-
-<p><a href="filter">filter</a>:      Using the AgeOffFilter to remove records more than 30
-                       seconds old.</p>
-
-<p><a href="helloworld">helloworld</a>:  Inserting records both inside map/reduce jobs and
-                       outside. And reading records between two rows.</p>
-
-<p><a href="isolation">isolation</a>:   Using the isolated scanner to ensure partial changes
-                       are not seen.</p>
-
-<p><a href="mapred">mapred</a>:      Using MapReduce to read from and write to Accumulo
-                       tables.</p>
-
-<p><a href="maxmutation">maxmutation</a>: Limiting mutation size to avoid running out of memory.</p>
-
-<p><a href="regex">regex</a>:       Using MapReduce and Accumulo to find data using regular
-                       expressions.</p>
-
-<p><a href="rowhash">rowhash</a>:     Using MapReduce to read a table and write to a new
-                       column in the same table.</p>
-
-<p><a href="sample">sample</a>:      Building and using sample data in Accumulo.</p>
-
-<p><a href="shard">shard</a>:       Using the intersecting iterator with a term index
-                       partitioned by document.</p>
-
-<p><a href="tabletofile">tabletofile</a>: Using MapReduce to read a table and write one of its
-                       columns to a file in HDFS.</p>
-
-<p><a href="terasort">terasort</a>:    Generating random data and sorting it using Accumulo.</p>
-
-<p><a href="visibility">visibility</a> :  Using visibilities (or combinations of authorizations).
-                       Also shows user permissions.</p>
+          <p>This an example of how to limit the size of mutations that will be accepted into
+a table. Under the default configuration, accumulo does not provide a limitation
+on the size of mutations that can be ingested. Poorly behaved writers might
+inadvertently create mutations so large, that they cause the tablet servers to
+run out of memory. A simple contraint can be added to a table to reject very
+large mutations.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo shell -u username -p password
+
+Shell - Apache Accumulo Interactive Shell
+-
+- version: 1.5.0
+- instance name: instance
+- instance id: 00000000-0000-0000-0000-000000000000
+-
+- type 'help' for a list of available commands
+-
+username@instance&gt; createtable test_ingest
+username@instance test_ingest&gt; config -t test_ingest -s table.constraint.1=org.apache.accumulo.examples.simple.constraints.MaxMutationSize
+username@instance test_ingest&gt;
+</code></pre></div></div>
+
+<p>Now the table will reject any mutation that is larger than 1/256th of the 
+working memory of the tablet server.  The following command attempts to ingest 
+a single row with 10000 columns, which exceeds the memory limit. Depending on the
+amount of Java heap your tserver(s) are given, you may have to increase the number
+of columns provided to see the failure.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo org.apache.accumulo.test.TestIngest -i instance -z zookeepers -u username -p password --rows 1 --cols 10000 
+ERROR : Constraint violates : ConstraintViolationSummary(constrainClass:org.apache.accumulo.examples.simple.constraints.MaxMutationSize, violationCode:0, violationDescription:mutation exceeded maximum size of 188160, numberOfViolatingMutations:1)
+</code></pre></div></div>
 
 
         </div>
diff --git a/1.9/examples/maxmutation.md b/1.9/examples/maxmutation.md
deleted file mode 100644
index 45b80d4..0000000
--- a/1.9/examples/maxmutation.md
+++ /dev/null
@@ -1,49 +0,0 @@
-Title: Apache Accumulo MaxMutation Constraints Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This an example of how to limit the size of mutations that will be accepted into
-a table. Under the default configuration, accumulo does not provide a limitation
-on the size of mutations that can be ingested. Poorly behaved writers might
-inadvertently create mutations so large, that they cause the tablet servers to
-run out of memory. A simple contraint can be added to a table to reject very
-large mutations.
-
-    $ ./bin/accumulo shell -u username -p password
-
-    Shell - Apache Accumulo Interactive Shell
-    -
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> createtable test_ingest
-    username@instance test_ingest> config -t test_ingest -s table.constraint.1=org.apache.accumulo.examples.simple.constraints.MaxMutationSize
-    username@instance test_ingest>
-
-
-Now the table will reject any mutation that is larger than 1/256th of the 
-working memory of the tablet server.  The following command attempts to ingest 
-a single row with 10000 columns, which exceeds the memory limit. Depending on the
-amount of Java heap your tserver(s) are given, you may have to increase the number
-of columns provided to see the failure.
-
-    $ ./bin/accumulo org.apache.accumulo.test.TestIngest -i instance -z zookeepers -u username -p password --rows 1 --cols 10000 
-    ERROR : Constraint violates : ConstraintViolationSummary(constrainClass:org.apache.accumulo.examples.simple.constraints.MaxMutationSize, violationCode:0, violationDescription:mutation exceeded maximum size of 188160, numberOfViolatingMutations:1)
-
diff --git a/1.8/examples/index.html b/1.9/examples/regex.html
similarity index 66%
copy from 1.8/examples/index.html
copy to 1.9/examples/regex.html
index 3af64b6..2b318df 100644
--- a/1.8/examples/index.html
+++ b/1.9/examples/regex.html
@@ -25,7 +25,7 @@
 <link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/v/bs/jq-2.2.3/dt-1.10.12/datatables.min.css">
 <link href="/css/accumulo.css" rel="stylesheet" type="text/css">
 
-<title>Apache Accumulo Examples</title>
+<title>Apache Accumulo Regex Example</title>
 
 <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.2.4/jquery.min.js" integrity="sha256-BbhdlvQf/xTY9gja0Dq3HiwQF8LaCRTXxZKRutelT44=" crossorigin="anonymous"></script>
 <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
@@ -136,89 +136,48 @@
         </div>
         <div id="content">
           
-          <h1 class="title">Apache Accumulo Examples</h1>
+          <h1 class="title">Apache Accumulo Regex Example</h1>
           
-          <p>Before running any of the examples, the following steps must be performed.</p>
-
-<ol>
-  <li>
-    <p>Install and run Accumulo via the instructions found in $ACCUMULO_HOME/README.
-Remember the instance name. It will be referred to as “instance” throughout
-the examples. A comma-separated list of zookeeper servers will be referred
-to as “zookeepers”.</p>
-  </li>
-  <li>
-    <p>Create an Accumulo user (see the <a href="../accumulo_user_manual#_user_administration">user manual</a>), or use the root user.
-The “username” Accumulo user name with password “password” is used
-throughout the examples. This user needs the ability to create tables.</p>
-  </li>
-</ol>
-
-<p>In all commands, you will need to replace “instance”, “zookeepers”,
-“username”, and “password” with the values you set for your Accumulo instance.</p>
-
-<p>Commands intended to be run in bash are prefixed by ‘$’. These are always
-assumed to be run from the $ACCUMULO_HOME directory.</p>
-
-<p>Commands intended to be run in the Accumulo shell are prefixed by ‘&gt;’.</p>
-
-<p>Each README in the examples directory highlights the use of particular
-features of Apache Accumulo.</p>
-
-<p><a href="batch">batch</a>:       Using the batch writer and batch scanner.</p>
-
-<p><a href="bloom">bloom</a>:       Creating a bloom filter enabled table to increase query
-                       performance.</p>
-
-<p><a href="bulkIngest">bulkIngest</a>:  Ingesting bulk data using map/reduce jobs on Hadoop.</p>
-
-<p><a href="classpath">classpath</a>:   Using per-table classpaths.</p>
-
-<p><a href="client">client</a>:      Using table operations, reading and writing data in Java.</p>
-
-<p><a href="combiner">combiner</a>:    Using example StatsCombiner to find min, max, sum, and
-                       count.</p>
-
-<p><a href="constraints">constraints</a>: Using constraints with tables.</p>
-
-<p><a href="dirlist">dirlist</a>:     Storing filesystem information.</p>
-
-<p><a href="export">export</a>:      Exporting and importing tables.</p>
-
-<p><a href="filedata">filedata</a>:    Storing file data.</p>
-
-<p><a href="filter">filter</a>:      Using the AgeOffFilter to remove records more than 30
-                       seconds old.</p>
-
-<p><a href="helloworld">helloworld</a>:  Inserting records both inside map/reduce jobs and
-                       outside. And reading records between two rows.</p>
-
-<p><a href="isolation">isolation</a>:   Using the isolated scanner to ensure partial changes
-                       are not seen.</p>
-
-<p><a href="mapred">mapred</a>:      Using MapReduce to read from and write to Accumulo
-                       tables.</p>
-
-<p><a href="maxmutation">maxmutation</a>: Limiting mutation size to avoid running out of memory.</p>
-
-<p><a href="regex">regex</a>:       Using MapReduce and Accumulo to find data using regular
-                       expressions.</p>
-
-<p><a href="rowhash">rowhash</a>:     Using MapReduce to read a table and write to a new
-                       column in the same table.</p>
-
-<p><a href="sample">sample</a>:      Building and using sample data in Accumulo.</p>
-
-<p><a href="shard">shard</a>:       Using the intersecting iterator with a term index
-                       partitioned by document.</p>
-
-<p><a href="tabletofile">tabletofile</a>: Using MapReduce to read a table and write one of its
-                       columns to a file in HDFS.</p>
-
-<p><a href="terasort">terasort</a>:    Generating random data and sorting it using Accumulo.</p>
-
-<p><a href="visibility">visibility</a> :  Using visibilities (or combinations of authorizations).
-                       Also shows user permissions.</p>
+          <p>This example uses mapreduce and accumulo to find items using regular expressions.
+This is accomplished using a map-only mapreduce job and a scan-time iterator.</p>
+
+<p>To run this example you will need some data in a table. The following will
+put a trivial amount of data into accumulo using the accumulo shell:</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo shell -u username -p password
+Shell - Apache Accumulo Interactive Shell
+- version: 1.5.0
+- instance name: instance
+- instance id: 00000000-0000-0000-0000-000000000000
+-
+- type 'help' for a list of available commands
+-
+username@instance&gt; createtable input
+username@instance&gt; insert dogrow dogcf dogcq dogvalue
+username@instance&gt; insert catrow catcf catcq catvalue
+username@instance&gt; quit
+</code></pre></div></div>
+
+<p>The RegexExample class sets an iterator on the scanner. This does pattern matching
+against each key/value in accumulo, and only returns matching items. It will do this
+in parallel and will store the results in files in hdfs.</p>
+
+<p>The following will search for any rows in the input table that starts with “dog”:</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ bin/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.mapreduce.RegexExample -u user -p passwd -i instance -t input --rowRegex 'dog.*' --output /tmp/output
+
+$ hadoop fs -ls /tmp/output
+Found 3 items
+-rw-r--r--   1 username supergroup          0 2013-01-10 14:11 /tmp/output/_SUCCESS
+drwxr-xr-x   - username supergroup          0 2013-01-10 14:10 /tmp/output/_logs
+-rw-r--r--   1 username supergroup         51 2013-01-10 14:10 /tmp/output/part-m-00000
+</code></pre></div></div>
+
+<p>We can see the output of our little map-reduce job:</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ hadoop fs -text /tmp/output/part-m-00000
+dogrow dogcf:dogcq [] 1357844987994 false	dogvalue
+</code></pre></div></div>
 
 
         </div>
diff --git a/1.9/examples/regex.md b/1.9/examples/regex.md
deleted file mode 100644
index ea9f208..0000000
--- a/1.9/examples/regex.md
+++ /dev/null
@@ -1,57 +0,0 @@
-Title: Apache Accumulo Regex Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This example uses mapreduce and accumulo to find items using regular expressions.
-This is accomplished using a map-only mapreduce job and a scan-time iterator.
-
-To run this example you will need some data in a table. The following will
-put a trivial amount of data into accumulo using the accumulo shell:
-
-    $ ./bin/accumulo shell -u username -p password
-    Shell - Apache Accumulo Interactive Shell
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> createtable input
-    username@instance> insert dogrow dogcf dogcq dogvalue
-    username@instance> insert catrow catcf catcq catvalue
-    username@instance> quit
-
-The RegexExample class sets an iterator on the scanner. This does pattern matching
-against each key/value in accumulo, and only returns matching items. It will do this
-in parallel and will store the results in files in hdfs.
-
-The following will search for any rows in the input table that starts with "dog":
-
-    $ bin/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.mapreduce.RegexExample -u user -p passwd -i instance -t input --rowRegex 'dog.*' --output /tmp/output
-
-    $ hadoop fs -ls /tmp/output
-    Found 3 items
-    -rw-r--r--   1 username supergroup          0 2013-01-10 14:11 /tmp/output/_SUCCESS
-    drwxr-xr-x   - username supergroup          0 2013-01-10 14:10 /tmp/output/_logs
-    -rw-r--r--   1 username supergroup         51 2013-01-10 14:10 /tmp/output/part-m-00000
-
-We can see the output of our little map-reduce job:
-
-    $ hadoop fs -text /tmp/output/part-m-00000
-    dogrow dogcf:dogcq [] 1357844987994 false	dogvalue
-
-
diff --git a/1.8/examples/index.html b/1.9/examples/reservations.html
similarity index 66%
copy from 1.8/examples/index.html
copy to 1.9/examples/reservations.html
index 3af64b6..cb3e868 100644
--- a/1.8/examples/index.html
+++ b/1.9/examples/reservations.html
@@ -25,7 +25,7 @@
 <link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/v/bs/jq-2.2.3/dt-1.10.12/datatables.min.css">
 <link href="/css/accumulo.css" rel="stylesheet" type="text/css">
 
-<title>Apache Accumulo Examples</title>
+<title>Apache Accumulo Reservation Example</title>
 
 <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.2.4/jquery.min.js" integrity="sha256-BbhdlvQf/xTY9gja0Dq3HiwQF8LaCRTXxZKRutelT44=" crossorigin="anonymous"></script>
 <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
@@ -136,89 +136,57 @@
         </div>
         <div id="content">
           
-          <h1 class="title">Apache Accumulo Examples</h1>
+          <h1 class="title">Apache Accumulo Reservation Example</h1>
           
-          <p>Before running any of the examples, the following steps must be performed.</p>
-
-<ol>
-  <li>
-    <p>Install and run Accumulo via the instructions found in $ACCUMULO_HOME/README.
-Remember the instance name. It will be referred to as “instance” throughout
-the examples. A comma-separated list of zookeeper servers will be referred
-to as “zookeepers”.</p>
-  </li>
-  <li>
-    <p>Create an Accumulo user (see the <a href="../accumulo_user_manual#_user_administration">user manual</a>), or use the root user.
-The “username” Accumulo user name with password “password” is used
-throughout the examples. This user needs the ability to create tables.</p>
-  </li>
-</ol>
-
-<p>In all commands, you will need to replace “instance”, “zookeepers”,
-“username”, and “password” with the values you set for your Accumulo instance.</p>
-
-<p>Commands intended to be run in bash are prefixed by ‘$’. These are always
-assumed to be run from the $ACCUMULO_HOME directory.</p>
-
-<p>Commands intended to be run in the Accumulo shell are prefixed by ‘&gt;’.</p>
-
-<p>Each README in the examples directory highlights the use of particular
-features of Apache Accumulo.</p>
-
-<p><a href="batch">batch</a>:       Using the batch writer and batch scanner.</p>
-
-<p><a href="bloom">bloom</a>:       Creating a bloom filter enabled table to increase query
-                       performance.</p>
-
-<p><a href="bulkIngest">bulkIngest</a>:  Ingesting bulk data using map/reduce jobs on Hadoop.</p>
-
-<p><a href="classpath">classpath</a>:   Using per-table classpaths.</p>
-
-<p><a href="client">client</a>:      Using table operations, reading and writing data in Java.</p>
-
-<p><a href="combiner">combiner</a>:    Using example StatsCombiner to find min, max, sum, and
-                       count.</p>
-
-<p><a href="constraints">constraints</a>: Using constraints with tables.</p>
-
-<p><a href="dirlist">dirlist</a>:     Storing filesystem information.</p>
-
-<p><a href="export">export</a>:      Exporting and importing tables.</p>
-
-<p><a href="filedata">filedata</a>:    Storing file data.</p>
-
-<p><a href="filter">filter</a>:      Using the AgeOffFilter to remove records more than 30
-                       seconds old.</p>
-
-<p><a href="helloworld">helloworld</a>:  Inserting records both inside map/reduce jobs and
-                       outside. And reading records between two rows.</p>
-
-<p><a href="isolation">isolation</a>:   Using the isolated scanner to ensure partial changes
-                       are not seen.</p>
-
-<p><a href="mapred">mapred</a>:      Using MapReduce to read from and write to Accumulo
-                       tables.</p>
-
-<p><a href="maxmutation">maxmutation</a>: Limiting mutation size to avoid running out of memory.</p>
-
-<p><a href="regex">regex</a>:       Using MapReduce and Accumulo to find data using regular
-                       expressions.</p>
-
-<p><a href="rowhash">rowhash</a>:     Using MapReduce to read a table and write to a new
-                       column in the same table.</p>
-
-<p><a href="sample">sample</a>:      Building and using sample data in Accumulo.</p>
-
-<p><a href="shard">shard</a>:       Using the intersecting iterator with a term index
-                       partitioned by document.</p>
-
-<p><a href="tabletofile">tabletofile</a>: Using MapReduce to read a table and write one of its
-                       columns to a file in HDFS.</p>
-
-<p><a href="terasort">terasort</a>:    Generating random data and sorting it using Accumulo.</p>
-
-<p><a href="visibility">visibility</a> :  Using visibilities (or combinations of authorizations).
-                       Also shows user permissions.</p>
+          <p>This example shows running a simple reservation system implemented using
+conditional mutations. This system guarantees that only one concurrent user can
+reserve a resource. The example’s reserve command allows multiple users to be
+specified. When this is done, it creates a separate reservation thread for each
+user. In the example below threads are spun up for alice, bob, eve, mallory,
+and trent to reserve room06 on 20140101. Bob ends up getting the reservation
+and everyone else is put on a wait list. The example code will take any string
+for what, when and who.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo org.apache.accumulo.examples.simple.reservations.ARS
+&gt;connect test16 localhost root secret ars
+  connected
+&gt;
+  Commands :
+    reserve &lt;what&gt; &lt;when&gt; &lt;who&gt; {who}
+    cancel &lt;what&gt; &lt;when&gt; &lt;who&gt;
+    list &lt;what&gt; &lt;when&gt;
+&gt;reserve room06 20140101 alice bob eve mallory trent
+                   bob : RESERVED
+               mallory : WAIT_LISTED
+                 alice : WAIT_LISTED
+                 trent : WAIT_LISTED
+                   eve : WAIT_LISTED
+&gt;list room06 20140101
+  Reservation holder : bob
+  Wait list : [mallory, alice, trent, eve]
+&gt;cancel room06 20140101 alice
+&gt;cancel room06 20140101 bob
+&gt;list room06 20140101
+  Reservation holder : mallory
+  Wait list : [trent, eve]
+&gt;quit
+</code></pre></div></div>
+
+<p>Scanning the table in the Accumulo shell after running the example shows the
+following:</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>root@test16&gt; table ars
+root@test16 ars&gt; scan
+room06:20140101 res:0001 []    mallory
+room06:20140101 res:0003 []    trent
+room06:20140101 res:0004 []    eve
+room06:20140101 tx:seq []    6
+</code></pre></div></div>
+
+<p>The tx:seq column is incremented for each update to the row allowing for
+detection of concurrent changes. For an update to go through, the sequence
+number must not have changed since the data was read. If it does change,
+the conditional mutation will fail and the example code will retry.</p>
 
 
         </div>
diff --git a/1.9/examples/reservations.md b/1.9/examples/reservations.md
deleted file mode 100644
index ff111b4..0000000
--- a/1.9/examples/reservations.md
+++ /dev/null
@@ -1,66 +0,0 @@
-Title: Apache Accumulo Isolation Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This example shows running a simple reservation system implemented using
-conditional mutations. This system guarantees that only one concurrent user can
-reserve a resource. The example's reserve command allows multiple users to be
-specified. When this is done, it creates a separate reservation thread for each
-user. In the example below threads are spun up for alice, bob, eve, mallory,
-and trent to reserve room06 on 20140101. Bob ends up getting the reservation
-and everyone else is put on a wait list. The example code will take any string
-for what, when and who.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.reservations.ARS
-    >connect test16 localhost root secret ars
-      connected
-    >
-      Commands :
-        reserve <what> <when> <who> {who}
-        cancel <what> <when> <who>
-        list <what> <when>
-    >reserve room06 20140101 alice bob eve mallory trent
-                       bob : RESERVED
-                   mallory : WAIT_LISTED
-                     alice : WAIT_LISTED
-                     trent : WAIT_LISTED
-                       eve : WAIT_LISTED
-    >list room06 20140101
-      Reservation holder : bob
-      Wait list : [mallory, alice, trent, eve]
-    >cancel room06 20140101 alice
-    >cancel room06 20140101 bob
-    >list room06 20140101
-      Reservation holder : mallory
-      Wait list : [trent, eve]
-    >quit
-
-Scanning the table in the Accumulo shell after running the example shows the
-following:
-
-    root@test16> table ars
-    root@test16 ars> scan
-    room06:20140101 res:0001 []    mallory
-    room06:20140101 res:0003 []    trent
-    room06:20140101 res:0004 []    eve
-    room06:20140101 tx:seq []    6
-
-The tx:seq column is incremented for each update to the row allowing for
-detection of concurrent changes. For an update to go through, the sequence
-number must not have changed since the data was read. If it does change,
-the conditional mutation will fail and the example code will retry.
-
diff --git a/1.9/examples/index.html b/1.9/examples/rgbalancer.html
similarity index 51%
copy from 1.9/examples/index.html
copy to 1.9/examples/rgbalancer.html
index 3af64b6..d0dba8e 100644
--- a/1.9/examples/index.html
+++ b/1.9/examples/rgbalancer.html
@@ -25,7 +25,7 @@
 <link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/v/bs/jq-2.2.3/dt-1.10.12/datatables.min.css">
 <link href="/css/accumulo.css" rel="stylesheet" type="text/css">
 
-<title>Apache Accumulo Examples</title>
+<title>Apache Accumulo Regex Group Balancer Example</title>
 
 <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.2.4/jquery.min.js" integrity="sha256-BbhdlvQf/xTY9gja0Dq3HiwQF8LaCRTXxZKRutelT44=" crossorigin="anonymous"></script>
 <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
@@ -136,90 +136,155 @@
         </div>
         <div id="content">
           
-          <h1 class="title">Apache Accumulo Examples</h1>
+          <h1 class="title">Apache Accumulo Regex Group Balancer Example</h1>
           
-          <p>Before running any of the examples, the following steps must be performed.</p>
-
-<ol>
-  <li>
-    <p>Install and run Accumulo via the instructions found in $ACCUMULO_HOME/README.
-Remember the instance name. It will be referred to as “instance” throughout
-the examples. A comma-separated list of zookeeper servers will be referred
-to as “zookeepers”.</p>
-  </li>
-  <li>
-    <p>Create an Accumulo user (see the <a href="../accumulo_user_manual#_user_administration">user manual</a>), or use the root user.
-The “username” Accumulo user name with password “password” is used
-throughout the examples. This user needs the ability to create tables.</p>
-  </li>
-</ol>
-
-<p>In all commands, you will need to replace “instance”, “zookeepers”,
-“username”, and “password” with the values you set for your Accumulo instance.</p>
-
-<p>Commands intended to be run in bash are prefixed by ‘$’. These are always
-assumed to be run from the $ACCUMULO_HOME directory.</p>
-
-<p>Commands intended to be run in the Accumulo shell are prefixed by ‘&gt;’.</p>
-
-<p>Each README in the examples directory highlights the use of particular
-features of Apache Accumulo.</p>
-
-<p><a href="batch">batch</a>:       Using the batch writer and batch scanner.</p>
-
-<p><a href="bloom">bloom</a>:       Creating a bloom filter enabled table to increase query
-                       performance.</p>
-
-<p><a href="bulkIngest">bulkIngest</a>:  Ingesting bulk data using map/reduce jobs on Hadoop.</p>
-
-<p><a href="classpath">classpath</a>:   Using per-table classpaths.</p>
-
-<p><a href="client">client</a>:      Using table operations, reading and writing data in Java.</p>
-
-<p><a href="combiner">combiner</a>:    Using example StatsCombiner to find min, max, sum, and
-                       count.</p>
-
-<p><a href="constraints">constraints</a>: Using constraints with tables.</p>
-
-<p><a href="dirlist">dirlist</a>:     Storing filesystem information.</p>
-
-<p><a href="export">export</a>:      Exporting and importing tables.</p>
-
-<p><a href="filedata">filedata</a>:    Storing file data.</p>
-
-<p><a href="filter">filter</a>:      Using the AgeOffFilter to remove records more than 30
-                       seconds old.</p>
-
-<p><a href="helloworld">helloworld</a>:  Inserting records both inside map/reduce jobs and
-                       outside. And reading records between two rows.</p>
-
-<p><a href="isolation">isolation</a>:   Using the isolated scanner to ensure partial changes
-                       are not seen.</p>
-
-<p><a href="mapred">mapred</a>:      Using MapReduce to read from and write to Accumulo
-                       tables.</p>
-
-<p><a href="maxmutation">maxmutation</a>: Limiting mutation size to avoid running out of memory.</p>
-
-<p><a href="regex">regex</a>:       Using MapReduce and Accumulo to find data using regular
-                       expressions.</p>
-
-<p><a href="rowhash">rowhash</a>:     Using MapReduce to read a table and write to a new
-                       column in the same table.</p>
-
-<p><a href="sample">sample</a>:      Building and using sample data in Accumulo.</p>
-
-<p><a href="shard">shard</a>:       Using the intersecting iterator with a term index
-                       partitioned by document.</p>
-
-<p><a href="tabletofile">tabletofile</a>: Using MapReduce to read a table and write one of its
-                       columns to a file in HDFS.</p>
-
-<p><a href="terasort">terasort</a>:    Generating random data and sorting it using Accumulo.</p>
-
-<p><a href="visibility">visibility</a> :  Using visibilities (or combinations of authorizations).
-                       Also shows user permissions.</p>
-
+          <p>For some data access patterns, its important to spread groups of tablets within
+a table out evenly.  Accumulo has a balancer that can do this using a regular
+expression to group tablets. This example shows how this balancer spreads 4
+groups of tablets within a table evenly across 17 tablet servers.</p>
+
+<p>Below shows creating a table and adding splits.  For this example we would like
+all of the tablets where the split point has the same two digits to be on
+different tservers.  This gives us four groups of tablets: 01, 02, 03, and 04.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>root@accumulo&gt; createtable testRGB
+root@accumulo testRGB&gt; addsplits -t testRGB 01b 01m 01r 01z  02b 02m 02r 02z 03b 03m 03r 03z 04a 04b 04c 04d 04e 04f 04g 04h 04i 04j 04k 04l 04m 04n 04o 04p
+root@accumulo testRGB&gt; tables -l
+accumulo.metadata    =&gt;        !0
+accumulo.replication =&gt;      +rep
+accumulo.root        =&gt;        +r
+testRGB              =&gt;         2
+trace                =&gt;         1
+</code></pre></div></div>
+
+<p>After adding the splits we look at the locations in the metadata table.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>root@accumulo testRGB&gt; scan -t accumulo.metadata -b 2; -e 2&lt; -c loc
+2;01b loc:34a5f6e086b000c []    ip-10-1-2-25:9997
+2;01m loc:34a5f6e086b000c []    ip-10-1-2-25:9997
+2;01r loc:14a5f6e079d0011 []    ip-10-1-2-15:9997
+2;01z loc:14a5f6e079d000f []    ip-10-1-2-13:9997
+2;02b loc:34a5f6e086b000b []    ip-10-1-2-26:9997
+2;02m loc:14a5f6e079d000c []    ip-10-1-2-28:9997
+2;02r loc:14a5f6e079d0012 []    ip-10-1-2-27:9997
+2;02z loc:14a5f6e079d0012 []    ip-10-1-2-27:9997
+2;03b loc:14a5f6e079d000d []    ip-10-1-2-21:9997
+2;03m loc:14a5f6e079d000e []    ip-10-1-2-20:9997
+2;03r loc:14a5f6e079d000d []    ip-10-1-2-21:9997
+2;03z loc:14a5f6e079d000e []    ip-10-1-2-20:9997
+2;04a loc:34a5f6e086b000b []    ip-10-1-2-26:9997
+2;04b loc:14a5f6e079d0010 []    ip-10-1-2-17:9997
+2;04c loc:14a5f6e079d0010 []    ip-10-1-2-17:9997
+2;04d loc:24a5f6e07d3000c []    ip-10-1-2-16:9997
+2;04e loc:24a5f6e07d3000d []    ip-10-1-2-29:9997
+2;04f loc:24a5f6e07d3000c []    ip-10-1-2-16:9997
+2;04g loc:24a5f6e07d3000a []    ip-10-1-2-14:9997
+2;04h loc:14a5f6e079d000c []    ip-10-1-2-28:9997
+2;04i loc:34a5f6e086b000d []    ip-10-1-2-19:9997
+2;04j loc:34a5f6e086b000d []    ip-10-1-2-19:9997
+2;04k loc:24a5f6e07d30009 []    ip-10-1-2-23:9997
+2;04l loc:24a5f6e07d3000b []    ip-10-1-2-22:9997
+2;04m loc:24a5f6e07d30009 []    ip-10-1-2-23:9997
+2;04n loc:24a5f6e07d3000b []    ip-10-1-2-22:9997
+2;04o loc:34a5f6e086b000a []    ip-10-1-2-18:9997
+2;04p loc:24a5f6e07d30008 []    ip-10-1-2-24:9997
+2&lt; loc:24a5f6e07d30008 []    ip-10-1-2-24:9997
+</code></pre></div></div>
+
+<p>Below the information above was massaged to show which tablet groups are on
+each tserver.  The four tablets in group 03 are on two tservers, ideally those
+tablets would be spread across 4 tservers.  Note the default tablet (2&lt;) was
+categorized as group 04 below.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>ip-10-1-2-13:9997 01
+ip-10-1-2-14:9997 04
+ip-10-1-2-15:9997 01
+ip-10-1-2-16:9997 04 04
+ip-10-1-2-17:9997 04 04
+ip-10-1-2-18:9997 04
+ip-10-1-2-19:9997 04 04
+ip-10-1-2-20:9997 03 03
+ip-10-1-2-21:9997 03 03
+ip-10-1-2-22:9997 04 04
+ip-10-1-2-23:9997 04 04
+ip-10-1-2-24:9997 04 04
+ip-10-1-2-25:9997 01 01
+ip-10-1-2-26:9997 02 04
+ip-10-1-2-27:9997 02 02
+ip-10-1-2-28:9997 02 04
+ip-10-1-2-29:9997 04
+</code></pre></div></div>
+
+<p>To remedy this situation, the RegexGroupBalancer is configured with the
+commands below.  The configured regular expression selects the first two digits
+from a tablets end row as the group id.  Tablets that don’t match and the
+default tablet are configured to be in group 04.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>root@accumulo testRGB&gt; config -t testRGB -s table.custom.balancer.group.regex.pattern=(\\d\\d).*
+root@accumulo testRGB&gt; config -t testRGB -s table.custom.balancer.group.regex.default=04
+root@accumulo testRGB&gt; config -t testRGB -s table.balancer=org.apache.accumulo.server.master.balancer.RegexGroupBalancer
+</code></pre></div></div>
+
+<p>After waiting a little bit, look at the tablet locations again and all is good.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>root@accumulo testRGB&gt; scan -t accumulo.metadata -b 2; -e 2&lt; -c loc
+2;01b loc:34a5f6e086b000a []    ip-10-1-2-18:9997
+2;01m loc:34a5f6e086b000c []    ip-10-1-2-25:9997
+2;01r loc:14a5f6e079d0011 []    ip-10-1-2-15:9997
+2;01z loc:14a5f6e079d000f []    ip-10-1-2-13:9997
+2;02b loc:34a5f6e086b000b []    ip-10-1-2-26:9997
+2;02m loc:14a5f6e079d000c []    ip-10-1-2-28:9997
+2;02r loc:34a5f6e086b000d []    ip-10-1-2-19:9997
+2;02z loc:14a5f6e079d0012 []    ip-10-1-2-27:9997
+2;03b loc:24a5f6e07d3000d []    ip-10-1-2-29:9997
+2;03m loc:24a5f6e07d30009 []    ip-10-1-2-23:9997
+2;03r loc:14a5f6e079d000d []    ip-10-1-2-21:9997
+2;03z loc:14a5f6e079d000e []    ip-10-1-2-20:9997
+2;04a loc:34a5f6e086b000b []    ip-10-1-2-26:9997
+2;04b loc:34a5f6e086b000c []    ip-10-1-2-25:9997
+2;04c loc:14a5f6e079d0010 []    ip-10-1-2-17:9997
+2;04d loc:14a5f6e079d000e []    ip-10-1-2-20:9997
+2;04e loc:24a5f6e07d3000d []    ip-10-1-2-29:9997
+2;04f loc:24a5f6e07d3000c []    ip-10-1-2-16:9997
+2;04g loc:24a5f6e07d3000a []    ip-10-1-2-14:9997
+2;04h loc:14a5f6e079d000c []    ip-10-1-2-28:9997
+2;04i loc:14a5f6e079d0011 []    ip-10-1-2-15:9997
+2;04j loc:34a5f6e086b000d []    ip-10-1-2-19:9997
+2;04k loc:14a5f6e079d0012 []    ip-10-1-2-27:9997
+2;04l loc:14a5f6e079d000f []    ip-10-1-2-13:9997
+2;04m loc:24a5f6e07d30009 []    ip-10-1-2-23:9997
+2;04n loc:24a5f6e07d3000b []    ip-10-1-2-22:9997
+2;04o loc:34a5f6e086b000a []    ip-10-1-2-18:9997
+2;04p loc:14a5f6e079d000d []    ip-10-1-2-21:9997
+2&lt; loc:24a5f6e07d30008 []    ip-10-1-2-24:9997
+</code></pre></div></div>
+
+<p>Once again, the data above is transformed to make it easier to see which groups
+are on tservers.  The transformed data below shows that all groups are now
+evenly spread.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>ip-10-1-2-13:9997 01 04
+ip-10-1-2-14:9997    04
+ip-10-1-2-15:9997 01 04
+ip-10-1-2-16:9997    04
+ip-10-1-2-17:9997    04
+ip-10-1-2-18:9997 01 04
+ip-10-1-2-19:9997 02 04
+ip-10-1-2-20:9997 03 04
+ip-10-1-2-21:9997 03 04
+ip-10-1-2-22:9997    04
+ip-10-1-2-23:9997 03 04
+ip-10-1-2-24:9997    04
+ip-10-1-2-25:9997 01 04
+ip-10-1-2-26:9997 02 04
+ip-10-1-2-27:9997 02 04
+ip-10-1-2-28:9997 02 04
+ip-10-1-2-29:9997 03 04
+</code></pre></div></div>
+
+<p>If you need this functionality, but a regular expression does not meet your
+needs then extend GroupBalancer.  This allows you to specify a partitioning
+function in Java.  Use the RegexGroupBalancer source as an example.</p>
 
         </div>
 
diff --git a/1.9/examples/rgbalancer.md b/1.9/examples/rgbalancer.md
deleted file mode 100644
index f192a93..0000000
--- a/1.9/examples/rgbalancer.md
+++ /dev/null
@@ -1,159 +0,0 @@
-Title: Apache Accumulo Hello World Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-For some data access patterns, its important to spread groups of tablets within
-a table out evenly.  Accumulo has a balancer that can do this using a regular
-expression to group tablets. This example shows how this balancer spreads 4
-groups of tablets within a table evenly across 17 tablet servers.
-
-Below shows creating a table and adding splits.  For this example we would like
-all of the tablets where the split point has the same two digits to be on
-different tservers.  This gives us four groups of tablets: 01, 02, 03, and 04.   
-
-    root@accumulo> createtable testRGB
-    root@accumulo testRGB> addsplits -t testRGB 01b 01m 01r 01z  02b 02m 02r 02z 03b 03m 03r 03z 04a 04b 04c 04d 04e 04f 04g 04h 04i 04j 04k 04l 04m 04n 04o 04p
-    root@accumulo testRGB> tables -l
-    accumulo.metadata    =>        !0
-    accumulo.replication =>      +rep
-    accumulo.root        =>        +r
-    testRGB              =>         2
-    trace                =>         1
-
-After adding the splits we look at the locations in the metadata table.
-
-    root@accumulo testRGB> scan -t accumulo.metadata -b 2; -e 2< -c loc
-    2;01b loc:34a5f6e086b000c []    ip-10-1-2-25:9997
-    2;01m loc:34a5f6e086b000c []    ip-10-1-2-25:9997
-    2;01r loc:14a5f6e079d0011 []    ip-10-1-2-15:9997
-    2;01z loc:14a5f6e079d000f []    ip-10-1-2-13:9997
-    2;02b loc:34a5f6e086b000b []    ip-10-1-2-26:9997
-    2;02m loc:14a5f6e079d000c []    ip-10-1-2-28:9997
-    2;02r loc:14a5f6e079d0012 []    ip-10-1-2-27:9997
-    2;02z loc:14a5f6e079d0012 []    ip-10-1-2-27:9997
-    2;03b loc:14a5f6e079d000d []    ip-10-1-2-21:9997
-    2;03m loc:14a5f6e079d000e []    ip-10-1-2-20:9997
-    2;03r loc:14a5f6e079d000d []    ip-10-1-2-21:9997
-    2;03z loc:14a5f6e079d000e []    ip-10-1-2-20:9997
-    2;04a loc:34a5f6e086b000b []    ip-10-1-2-26:9997
-    2;04b loc:14a5f6e079d0010 []    ip-10-1-2-17:9997
-    2;04c loc:14a5f6e079d0010 []    ip-10-1-2-17:9997
-    2;04d loc:24a5f6e07d3000c []    ip-10-1-2-16:9997
-    2;04e loc:24a5f6e07d3000d []    ip-10-1-2-29:9997
-    2;04f loc:24a5f6e07d3000c []    ip-10-1-2-16:9997
-    2;04g loc:24a5f6e07d3000a []    ip-10-1-2-14:9997
-    2;04h loc:14a5f6e079d000c []    ip-10-1-2-28:9997
-    2;04i loc:34a5f6e086b000d []    ip-10-1-2-19:9997
-    2;04j loc:34a5f6e086b000d []    ip-10-1-2-19:9997
-    2;04k loc:24a5f6e07d30009 []    ip-10-1-2-23:9997
-    2;04l loc:24a5f6e07d3000b []    ip-10-1-2-22:9997
-    2;04m loc:24a5f6e07d30009 []    ip-10-1-2-23:9997
-    2;04n loc:24a5f6e07d3000b []    ip-10-1-2-22:9997
-    2;04o loc:34a5f6e086b000a []    ip-10-1-2-18:9997
-    2;04p loc:24a5f6e07d30008 []    ip-10-1-2-24:9997
-    2< loc:24a5f6e07d30008 []    ip-10-1-2-24:9997
-
-Below the information above was massaged to show which tablet groups are on
-each tserver.  The four tablets in group 03 are on two tservers, ideally those
-tablets would be spread across 4 tservers.  Note the default tablet (2<) was
-categorized as group 04 below.
-
-    ip-10-1-2-13:9997 01
-    ip-10-1-2-14:9997 04
-    ip-10-1-2-15:9997 01
-    ip-10-1-2-16:9997 04 04
-    ip-10-1-2-17:9997 04 04
-    ip-10-1-2-18:9997 04
-    ip-10-1-2-19:9997 04 04
-    ip-10-1-2-20:9997 03 03
-    ip-10-1-2-21:9997 03 03
-    ip-10-1-2-22:9997 04 04
-    ip-10-1-2-23:9997 04 04
-    ip-10-1-2-24:9997 04 04
-    ip-10-1-2-25:9997 01 01
-    ip-10-1-2-26:9997 02 04
-    ip-10-1-2-27:9997 02 02
-    ip-10-1-2-28:9997 02 04
-    ip-10-1-2-29:9997 04
-
-To remedy this situation, the RegexGroupBalancer is configured with the
-commands below.  The configured regular expression selects the first two digits
-from a tablets end row as the group id.  Tablets that don't match and the
-default tablet are configured to be in group 04.
-
-    root@accumulo testRGB> config -t testRGB -s table.custom.balancer.group.regex.pattern=(\\d\\d).*
-    root@accumulo testRGB> config -t testRGB -s table.custom.balancer.group.regex.default=04
-    root@accumulo testRGB> config -t testRGB -s table.balancer=org.apache.accumulo.server.master.balancer.RegexGroupBalancer
-
-After waiting a little bit, look at the tablet locations again and all is good.
-
-    root@accumulo testRGB> scan -t accumulo.metadata -b 2; -e 2< -c loc
-    2;01b loc:34a5f6e086b000a []    ip-10-1-2-18:9997
-    2;01m loc:34a5f6e086b000c []    ip-10-1-2-25:9997
-    2;01r loc:14a5f6e079d0011 []    ip-10-1-2-15:9997
-    2;01z loc:14a5f6e079d000f []    ip-10-1-2-13:9997
-    2;02b loc:34a5f6e086b000b []    ip-10-1-2-26:9997
-    2;02m loc:14a5f6e079d000c []    ip-10-1-2-28:9997
-    2;02r loc:34a5f6e086b000d []    ip-10-1-2-19:9997
-    2;02z loc:14a5f6e079d0012 []    ip-10-1-2-27:9997
-    2;03b loc:24a5f6e07d3000d []    ip-10-1-2-29:9997
-    2;03m loc:24a5f6e07d30009 []    ip-10-1-2-23:9997
-    2;03r loc:14a5f6e079d000d []    ip-10-1-2-21:9997
-    2;03z loc:14a5f6e079d000e []    ip-10-1-2-20:9997
-    2;04a loc:34a5f6e086b000b []    ip-10-1-2-26:9997
-    2;04b loc:34a5f6e086b000c []    ip-10-1-2-25:9997
-    2;04c loc:14a5f6e079d0010 []    ip-10-1-2-17:9997
-    2;04d loc:14a5f6e079d000e []    ip-10-1-2-20:9997
-    2;04e loc:24a5f6e07d3000d []    ip-10-1-2-29:9997
-    2;04f loc:24a5f6e07d3000c []    ip-10-1-2-16:9997
-    2;04g loc:24a5f6e07d3000a []    ip-10-1-2-14:9997
-    2;04h loc:14a5f6e079d000c []    ip-10-1-2-28:9997
-    2;04i loc:14a5f6e079d0011 []    ip-10-1-2-15:9997
-    2;04j loc:34a5f6e086b000d []    ip-10-1-2-19:9997
-    2;04k loc:14a5f6e079d0012 []    ip-10-1-2-27:9997
-    2;04l loc:14a5f6e079d000f []    ip-10-1-2-13:9997
-    2;04m loc:24a5f6e07d30009 []    ip-10-1-2-23:9997
-    2;04n loc:24a5f6e07d3000b []    ip-10-1-2-22:9997
-    2;04o loc:34a5f6e086b000a []    ip-10-1-2-18:9997
-    2;04p loc:14a5f6e079d000d []    ip-10-1-2-21:9997
-    2< loc:24a5f6e07d30008 []    ip-10-1-2-24:9997
-
-Once again, the data above is transformed to make it easier to see which groups
-are on tservers.  The transformed data below shows that all groups are now
-evenly spread.
-
-    ip-10-1-2-13:9997 01 04
-    ip-10-1-2-14:9997    04
-    ip-10-1-2-15:9997 01 04
-    ip-10-1-2-16:9997    04
-    ip-10-1-2-17:9997    04
-    ip-10-1-2-18:9997 01 04
-    ip-10-1-2-19:9997 02 04
-    ip-10-1-2-20:9997 03 04
-    ip-10-1-2-21:9997 03 04
-    ip-10-1-2-22:9997    04
-    ip-10-1-2-23:9997 03 04
-    ip-10-1-2-24:9997    04
-    ip-10-1-2-25:9997 01 04
-    ip-10-1-2-26:9997 02 04
-    ip-10-1-2-27:9997 02 04
-    ip-10-1-2-28:9997 02 04
-    ip-10-1-2-29:9997 03 04
-
-If you need this functionality, but a regular expression does not meet your
-needs then extend GroupBalancer.  This allows you to specify a partitioning
-function in Java.  Use the RegexGroupBalancer source as an example.
diff --git a/1.8/examples/index.html b/1.9/examples/rowhash.html
similarity index 66%
copy from 1.8/examples/index.html
copy to 1.9/examples/rowhash.html
index 3af64b6..7308284 100644
--- a/1.8/examples/index.html
+++ b/1.9/examples/rowhash.html
@@ -25,7 +25,7 @@
 <link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/v/bs/jq-2.2.3/dt-1.10.12/datatables.min.css">
 <link href="/css/accumulo.css" rel="stylesheet" type="text/css">
 
-<title>Apache Accumulo Examples</title>
+<title>Apache Accumulo RowHash Example</title>
 
 <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.2.4/jquery.min.js" integrity="sha256-BbhdlvQf/xTY9gja0Dq3HiwQF8LaCRTXxZKRutelT44=" crossorigin="anonymous"></script>
 <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
@@ -136,89 +136,51 @@
         </div>
         <div id="content">
           
-          <h1 class="title">Apache Accumulo Examples</h1>
+          <h1 class="title">Apache Accumulo RowHash Example</h1>
           
-          <p>Before running any of the examples, the following steps must be performed.</p>
-
-<ol>
-  <li>
-    <p>Install and run Accumulo via the instructions found in $ACCUMULO_HOME/README.
-Remember the instance name. It will be referred to as “instance” throughout
-the examples. A comma-separated list of zookeeper servers will be referred
-to as “zookeepers”.</p>
-  </li>
-  <li>
-    <p>Create an Accumulo user (see the <a href="../accumulo_user_manual#_user_administration">user manual</a>), or use the root user.
-The “username” Accumulo user name with password “password” is used
-throughout the examples. This user needs the ability to create tables.</p>
-  </li>
-</ol>
-
-<p>In all commands, you will need to replace “instance”, “zookeepers”,
-“username”, and “password” with the values you set for your Accumulo instance.</p>
-
-<p>Commands intended to be run in bash are prefixed by ‘$’. These are always
-assumed to be run from the $ACCUMULO_HOME directory.</p>
-
-<p>Commands intended to be run in the Accumulo shell are prefixed by ‘&gt;’.</p>
-
-<p>Each README in the examples directory highlights the use of particular
-features of Apache Accumulo.</p>
-
-<p><a href="batch">batch</a>:       Using the batch writer and batch scanner.</p>
-
-<p><a href="bloom">bloom</a>:       Creating a bloom filter enabled table to increase query
-                       performance.</p>
-
-<p><a href="bulkIngest">bulkIngest</a>:  Ingesting bulk data using map/reduce jobs on Hadoop.</p>
-
-<p><a href="classpath">classpath</a>:   Using per-table classpaths.</p>
-
-<p><a href="client">client</a>:      Using table operations, reading and writing data in Java.</p>
-
-<p><a href="combiner">combiner</a>:    Using example StatsCombiner to find min, max, sum, and
-                       count.</p>
-
-<p><a href="constraints">constraints</a>: Using constraints with tables.</p>
-
-<p><a href="dirlist">dirlist</a>:     Storing filesystem information.</p>
-
-<p><a href="export">export</a>:      Exporting and importing tables.</p>
-
-<p><a href="filedata">filedata</a>:    Storing file data.</p>
-
-<p><a href="filter">filter</a>:      Using the AgeOffFilter to remove records more than 30
-                       seconds old.</p>
-
-<p><a href="helloworld">helloworld</a>:  Inserting records both inside map/reduce jobs and
-                       outside. And reading records between two rows.</p>
-
-<p><a href="isolation">isolation</a>:   Using the isolated scanner to ensure partial changes
-                       are not seen.</p>
-
-<p><a href="mapred">mapred</a>:      Using MapReduce to read from and write to Accumulo
-                       tables.</p>
-
-<p><a href="maxmutation">maxmutation</a>: Limiting mutation size to avoid running out of memory.</p>
-
-<p><a href="regex">regex</a>:       Using MapReduce and Accumulo to find data using regular
-                       expressions.</p>
-
-<p><a href="rowhash">rowhash</a>:     Using MapReduce to read a table and write to a new
-                       column in the same table.</p>
-
-<p><a href="sample">sample</a>:      Building and using sample data in Accumulo.</p>
-
-<p><a href="shard">shard</a>:       Using the intersecting iterator with a term index
-                       partitioned by document.</p>
-
-<p><a href="tabletofile">tabletofile</a>: Using MapReduce to read a table and write one of its
-                       columns to a file in HDFS.</p>
-
-<p><a href="terasort">terasort</a>:    Generating random data and sorting it using Accumulo.</p>
-
-<p><a href="visibility">visibility</a> :  Using visibilities (or combinations of authorizations).
-                       Also shows user permissions.</p>
+          <p>This example shows a simple map/reduce job that reads from an accumulo table and
+writes back into that table.</p>
+
+<p>To run this example you will need some data in a table. The following will
+put a trivial amount of data into accumulo using the accumulo shell:</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo shell -u username -p password
+Shell - Apache Accumulo Interactive Shell
+- version: 1.5.0
+- instance name: instance
+- instance id: 00000000-0000-0000-0000-000000000000
+-
+- type 'help' for a list of available commands
+-
+username@instance&gt; createtable input
+username@instance&gt; insert a-row cf cq value
+username@instance&gt; insert b-row cf cq value
+username@instance&gt; quit
+</code></pre></div></div>
+
+<p>The RowHash class will insert a hash for each row in the database if it contains a
+specified colum. Here’s how you run the map/reduce job</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ bin/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.mapreduce.RowHash -u user -p passwd -i instance -t input --column cf:cq
+</code></pre></div></div>
+
+<p>Now we can scan the table and see the hashes:</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo shell -u username -p password
+Shell - Apache Accumulo Interactive Shell
+- version: 1.5.0
+- instance name: instance
+- instance id: 00000000-0000-0000-0000-000000000000
+-
+- type 'help' for a list of available commands
+-
+username@instance&gt; scan -t input
+a-row cf:cq []    value
+a-row cf-HASHTYPE:cq-MD5BASE64 []    IGPBYI1uC6+AJJxC4r5YBA==
+b-row cf:cq []    value
+b-row cf-HASHTYPE:cq-MD5BASE64 []    IGPBYI1uC6+AJJxC4r5YBA==
+username@instance&gt;
+</code></pre></div></div>
 
 
         </div>
diff --git a/1.9/examples/rowhash.md b/1.9/examples/rowhash.md
deleted file mode 100644
index 43782c9..0000000
--- a/1.9/examples/rowhash.md
+++ /dev/null
@@ -1,59 +0,0 @@
-Title: Apache Accumulo RowHash Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This example shows a simple map/reduce job that reads from an accumulo table and
-writes back into that table.
-
-To run this example you will need some data in a table. The following will
-put a trivial amount of data into accumulo using the accumulo shell:
-
-    $ ./bin/accumulo shell -u username -p password
-    Shell - Apache Accumulo Interactive Shell
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> createtable input
-    username@instance> insert a-row cf cq value
-    username@instance> insert b-row cf cq value
-    username@instance> quit
-
-The RowHash class will insert a hash for each row in the database if it contains a
-specified colum. Here's how you run the map/reduce job
-
-    $ bin/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.mapreduce.RowHash -u user -p passwd -i instance -t input --column cf:cq
-
-Now we can scan the table and see the hashes:
-
-    $ ./bin/accumulo shell -u username -p password
-    Shell - Apache Accumulo Interactive Shell
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> scan -t input
-    a-row cf:cq []    value
-    a-row cf-HASHTYPE:cq-MD5BASE64 []    IGPBYI1uC6+AJJxC4r5YBA==
-    b-row cf:cq []    value
-    b-row cf-HASHTYPE:cq-MD5BASE64 []    IGPBYI1uC6+AJJxC4r5YBA==
-    username@instance>
-
diff --git a/1.9/examples/sample.html b/1.9/examples/sample.html
new file mode 100644
index 0000000..7cfa192
--- /dev/null
+++ b/1.9/examples/sample.html
@@ -0,0 +1,347 @@
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+-->
+<meta charset="utf-8">
+<meta http-equiv="X-UA-Compatible" content="IE=edge">
+<meta name="viewport" content="width=device-width, initial-scale=1">
+<link href="https://maxcdn.bootstrapcdn.com/bootswatch/3.3.7/paper/bootstrap.min.css" rel="stylesheet" integrity="sha384-awusxf8AUojygHf2+joICySzB780jVvQaVCAt1clU3QsyAitLGul28Qxb2r1e5g+" crossorigin="anonymous">
+<link href="//netdna.bootstrapcdn.com/font-awesome/4.0.3/css/font-awesome.css" rel="stylesheet">
+<link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/v/bs/jq-2.2.3/dt-1.10.12/datatables.min.css">
+<link href="/css/accumulo.css" rel="stylesheet" type="text/css">
+
+<title>Apache Accumulo Batch Writing and Scanning Example</title>
+
+<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.2.4/jquery.min.js" integrity="sha256-BbhdlvQf/xTY9gja0Dq3HiwQF8LaCRTXxZKRutelT44=" crossorigin="anonymous"></script>
+<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
+<script type="text/javascript" src="https://cdn.datatables.net/v/bs/jq-2.2.3/dt-1.10.12/datatables.min.js"></script>
+<script>
+  // show location of canonical site if not currently on the canonical site
+  $(function() {
+    var host = window.location.host;
+    if (typeof host !== 'undefined' && host !== 'accumulo.apache.org') {
+      $('#non-canonical').show();
+    }
+  });
+
+  $(function() {
+    // decorate section headers with anchors
+    return $("h2, h3, h4, h5, h6").each(function(i, el) {
+      var $el, icon, id;
+      $el = $(el);
+      id = $el.attr('id');
+      icon = '<i class="fa fa-link"></i>';
+      if (id) {
+        return $el.append($("<a />").addClass("header-link").attr("href", "#" + id).html(icon));
+      }
+    });
+  });
+
+  // fix sidebar width in documentation
+  $(function() {
+    var $affixElement = $('div[data-spy="affix"]');
+    $affixElement.width($affixElement.parent().width());
+  });
+</script>
+
+</head>
+<body style="padding-top: 100px">
+
+  <nav class="navbar navbar-default navbar-fixed-top">
+  <div class="container">
+    <div class="navbar-header">
+      <button type="button" class="navbar-toggle" data-toggle="collapse" data-target="#navbar-items">
+        <span class="sr-only">Toggle navigation</span>
+        <span class="icon-bar"></span>
+        <span class="icon-bar"></span>
+        <span class="icon-bar"></span>
+      </button>
+      <a href="/"><img id="nav-logo" alt="Apache Accumulo" class="img-responsive" src="/images/accumulo-logo.png" width="200"
+        /></a>
+    </div>
+    <div class="collapse navbar-collapse" id="navbar-items">
+      <ul class="nav navbar-nav">
+        <li class="nav-link"><a href="/downloads">Download</a></li>
+        <li class="nav-link"><a href="/tour">Tour</a></li>
+        <li class="dropdown">
+          <a class="dropdown-toggle" data-toggle="dropdown" href="#">Releases<span class="caret"></span></a>
+          <ul class="dropdown-menu">
+            <li><a href="/release/accumulo-2.0.0-alpha-2/">2.0.0-alpha-2 (Preview Release)</a></li>
+            <li><a href="/release/accumulo-1.9.3/">1.9.3 (Latest)</a></li>
+            <li><a href="/release/">Archive</a></li>
+          </ul>
+        </li>
+        <li class="dropdown">
+          <a class="dropdown-toggle" data-toggle="dropdown" href="#">Documentation<span class="caret"></span></a>
+          <ul class="dropdown-menu">
+            <li><a href="/docs/2.x/getting-started/quickstart">User Manual (2.x)</a></li>
+            <li><a href="/quickstart-1.x">Quickstart (1.x)</a></li>
+            <li><a href="/1.9/accumulo_user_manual.html">User Manual (1.9)</a></li>
+            <li><a href="/1.9/apidocs">Javadocs (1.9)</a></li>
+            <li><a href="/external-docs">External Docs</a></li>
+            <li><a href="/docs-archive/">Archive</a></li>
+          </ul>
+        </li>
+        <li class="dropdown">
+          <a class="dropdown-toggle" data-toggle="dropdown" href="#">Community<span class="caret"></span></a>
+          <ul class="dropdown-menu">
+            <li><a href="/contact-us">Contact Us</a></li>
+            <li><a href="/how-to-contribute">How To Contribute</a></li>
+            <li><a href="/people">People</a></li>
+            <li><a href="/related-projects">Related Projects</a></li>
+          </ul>
+        </li>
+        <li class="nav-link"><a href="/search">Search</a></li>
+      </ul>
+      <ul class="nav navbar-nav navbar-right">
+        <li class="dropdown">
+          <a class="dropdown-toggle" data-toggle="dropdown" href="#"><img alt="Apache Software Foundation" src="https://www.apache.org/foundation/press/kit/feather.svg" width="15"/><span class="caret"></span></a>
+          <ul class="dropdown-menu">
+            <li><a href="https://www.apache.org">Apache Homepage <i class="fa fa-external-link"></i></a></li>
+            <li><a href="https://www.apache.org/licenses/">License <i class="fa fa-external-link"></i></a></li>
+            <li><a href="https://www.apache.org/foundation/sponsorship">Sponsorship <i class="fa fa-external-link"></i></a></li>
+            <li><a href="https://www.apache.org/security">Security <i class="fa fa-external-link"></i></a></li>
+            <li><a href="https://www.apache.org/foundation/thanks">Thanks <i class="fa fa-external-link"></i></a></li>
+            <li><a href="https://www.apache.org/foundation/policies/conduct">Code of Conduct <i class="fa fa-external-link"></i></a></li>
+            <li><a href="https://www.apache.org/events/current-event.html">Current Event <i class="fa fa-external-link"></i></a></li>
+          </ul>
+        </li>
+      </ul>
+    </div>
+  </div>
+</nav>
+
+
+  <div class="container">
+    <div class="row">
+      <div class="col-md-12">
+
+        <div id="non-canonical" style="display: none; background-color: #F0E68C; padding-left: 1em;">
+          Visit the official site at: <a href="https://accumulo.apache.org">https://accumulo.apache.org</a>
+        </div>
+        <div id="content">
+          
+          <h1 class="title">Apache Accumulo Batch Writing and Scanning Example</h1>
+          
+          <h2 id="basic-sampling-example">Basic Sampling Example</h2>
+
+<p>Accumulo supports building a set of sample data that can be efficiently
+accessed by scanners.  What data is included in the sample set is configurable.
+Below, some data representing documents are inserted.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>root@instance sampex&gt; createtable sampex
+root@instance sampex&gt; insert 9255 doc content 'abcde'
+root@instance sampex&gt; insert 9255 doc url file://foo.txt
+root@instance sampex&gt; insert 8934 doc content 'accumulo scales'
+root@instance sampex&gt; insert 8934 doc url file://accumulo_notes.txt
+root@instance sampex&gt; insert 2317 doc content 'milk, eggs, bread, parmigiano-reggiano'
+root@instance sampex&gt; insert 2317 doc url file://groceries/9.txt
+root@instance sampex&gt; insert 3900 doc content 'EC2 ate my homework'
+root@instance sampex&gt; insert 3900 doc uril file://final_project.txt
+</code></pre></div></div>
+
+<p>Below the table sampex is configured to build a sample set.  The configuration
+causes Accumulo to include any row where <code class="highlighter-rouge">murmur3_32(row) % 3 ==0</code> in the
+tables sample data.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>root@instance sampex&gt; config -t sampex -s table.sampler.opt.hasher=murmur3_32
+root@instance sampex&gt; config -t sampex -s table.sampler.opt.modulus=3
+root@instance sampex&gt; config -t sampex -s table.sampler=org.apache.accumulo.core.client.sample.RowSampler
+</code></pre></div></div>
+
+<p>Below, attempting to scan the sample returns an error.  This is because data
+was inserted before the sample set was configured.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>root@instance sampex&gt; scan --sample
+2015-09-09 12:21:50,643 [shell.Shell] ERROR: org.apache.accumulo.core.client.SampleNotPresentException: Table sampex(ID:2) does not have sampling configured or built
+</code></pre></div></div>
+
+<p>To remedy this problem, the following command will flush in memory data and
+compact any files that do not contain the correct sample data.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>root@instance sampex&gt; compact -t sampex --sf-no-sample
+</code></pre></div></div>
+
+<p>After the compaction, the sample scan works.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>root@instance sampex&gt; scan --sample
+2317 doc:content []    milk, eggs, bread, parmigiano-reggiano
+2317 doc:url []    file://groceries/9.txt
+</code></pre></div></div>
+
+<p>The commands below show that updates to data in the sample are seen when
+scanning the sample.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>root@instance sampex&gt; insert 2317 doc content 'milk, eggs, bread, parmigiano-reggiano, butter'
+root@instance sampex&gt; scan --sample
+2317 doc:content []    milk, eggs, bread, parmigiano-reggiano, butter
+2317 doc:url []    file://groceries/9.txt
+</code></pre></div></div>
+
+<p>Inorder to make scanning the sample fast, sample data is partitioned as data is
+written to Accumulo.  This means if the sample configuration is changed, that
+data written previously is partitioned using a different criteria.  Accumulo
+will detect this situation and fail sample scans.  The commands below show this
+failure and fixiing the problem with a compaction.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>root@instance sampex&gt; config -t sampex -s table.sampler.opt.modulus=2
+root@instance sampex&gt; scan --sample
+2015-09-09 12:22:51,058 [shell.Shell] ERROR: org.apache.accumulo.core.client.SampleNotPresentException: Table sampex(ID:2) does not have sampling configured or built
+root@instance sampex&gt; compact -t sampex --sf-no-sample
+2015-09-09 12:23:07,242 [shell.Shell] INFO : Compaction of table sampex started for given range
+root@instance sampex&gt; scan --sample
+2317 doc:content []    milk, eggs, bread, parmigiano-reggiano
+2317 doc:url []    file://groceries/9.txt
+3900 doc:content []    EC2 ate my homework
+3900 doc:uril []    file://final_project.txt
+9255 doc:content []    abcde
+9255 doc:url []    file://foo.txt
+</code></pre></div></div>
+
+<p>The example above is replicated in a java program using the Accumulo API.
+Below is the program name and the command to run it.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>./bin/accumulo org.apache.accumulo.examples.simple.sample.SampleExample -i instance -z localhost -u root -p secret
+</code></pre></div></div>
+
+<p>The commands below look under the hood to give some insight into how this
+feature works.  The commands determine what files the sampex table is using.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>root@instance sampex&gt; tables -l
+accumulo.metadata    =&gt;        !0
+accumulo.replication =&gt;      +rep
+accumulo.root        =&gt;        +r
+sampex               =&gt;         2
+trace                =&gt;         1
+root@instance sampex&gt; scan -t accumulo.metadata -c file -b 2 -e 2&lt;
+2&lt; file:hdfs://localhost:10000/accumulo/tables/2/default_tablet/A000000s.rf []    702,8
+</code></pre></div></div>
+
+<p>Below shows running <code class="highlighter-rouge">accumulo rfile-info</code> on the file above.  This shows the
+rfile has a normal default locality group and a sample default locality group.
+The output also shows the configuration used to create the sample locality
+group.  The sample configuration within a rfile must match the tables sample
+configuration for sample scan to work.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo rfile-info hdfs://localhost:10000/accumulo/tables/2/default_tablet/A000000s.rf
+Reading file: hdfs://localhost:10000/accumulo/tables/2/default_tablet/A000000s.rf
+RFile Version            : 8
+
+Locality group           : &lt;DEFAULT&gt;
+	Start block            : 0
+	Num   blocks           : 1
+	Index level 0          : 35 bytes  1 blocks
+	First key              : 2317 doc:content [] 1437672014986 false
+	Last key               : 9255 doc:url [] 1437672014875 false
+	Num entries            : 8
+	Column families        : [doc]
+
+Sample Configuration     :
+	Sampler class          : org.apache.accumulo.core.client.sample.RowSampler
+	Sampler options        : {hasher=murmur3_32, modulus=2}
+
+Sample Locality group    : &lt;DEFAULT&gt;
+	Start block            : 0
+	Num   blocks           : 1
+	Index level 0          : 36 bytes  1 blocks
+	First key              : 2317 doc:content [] 1437672014986 false
+	Last key               : 9255 doc:url [] 1437672014875 false
+	Num entries            : 6
+	Column families        : [doc]
+
+Meta block     : BCFile.index
+      Raw size             : 4 bytes
+      Compressed size      : 12 bytes
+      Compression type     : gz
+
+Meta block     : RFile.index
+      Raw size             : 309 bytes
+      Compressed size      : 176 bytes
+      Compression type     : gz
+</code></pre></div></div>
+
+<h2 id="shard-sampling-example">Shard Sampling Example</h2>
+
+<p><code class="highlighter-rouge">README.shard</code> shows how to index and search files using Accumulo.  That
+example indexes documents into a table named <code class="highlighter-rouge">shard</code>.  The indexing scheme used
+in that example places the document name in the column qualifier.  A useful
+sample of this indexing scheme should contain all data for any document in the
+sample.   To accomplish this, the following commands build a sample for the
+shard table based on the column qualifier.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>root@instance shard&gt; config -t shard -s table.sampler.opt.hasher=murmur3_32
+root@instance shard&gt; config -t shard -s table.sampler.opt.modulus=101
+root@instance shard&gt; config -t shard -s table.sampler.opt.qualifier=true
+root@instance shard&gt; config -t shard -s table.sampler=org.apache.accumulo.core.client.sample.RowColumnSampler
+root@instance shard&gt; compact -t shard --sf-no-sample -w
+2015-07-23 15:00:09,280 [shell.Shell] INFO : Compacting table ...
+2015-07-23 15:00:10,134 [shell.Shell] INFO : Compaction of table shard completed for given range
+</code></pre></div></div>
+
+<p>After enabling sampling, the command below counts the number of documents in
+the sample containing the words <code class="highlighter-rouge">import</code> and <code class="highlighter-rouge">int</code>.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo org.apache.accumulo.examples.simple.shard.Query --sample -i instance16 -z localhost -t shard -u root -p secret import int | fgrep '.java' | wc
+     11      11    1246
+</code></pre></div></div>
+
+<p>The command below counts the total number of documents containing the words
+<code class="highlighter-rouge">import</code> and <code class="highlighter-rouge">int</code>.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo org.apache.accumulo.examples.simple.shard.Query -i instance16 -z localhost -t shard -u root -p secret import int | fgrep '.java' | wc
+   1085    1085  118175
+</code></pre></div></div>
+
+<p>The counts 11 out of 1085 total are around what would be expected for a modulus
+of 101.  Querying the sample first provides a quick way to estimate how much data
+the real query will bring back.</p>
+
+<p>Another way sample data could be used with the shard example is with a
+specialized iterator.  In the examples source code there is an iterator named
+CutoffIntersectingIterator.  This iterator first checks how many documents are
+found in the sample data.  If too many documents are found in the sample data,
+then it returns nothing.   Otherwise it proceeds to query the full data set.
+To experiment with this iterator, use the following command.  The
+<code class="highlighter-rouge">--sampleCutoff</code> option below will cause the query to return nothing if based
+on the sample it appears a query would return more than 1000 documents.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo org.apache.accumulo.examples.simple.shard.Query --sampleCutoff 1000 -i instance16 -z localhost -t shard -u root -p secret import int | fgrep '.java' | wc
+</code></pre></div></div>
+
+        </div>
+
+        
+<footer>
+
+  <p><a href="https://www.apache.org/foundation/contributing"><img src="https://www.apache.org/images/SupportApache-small.png" alt="Support the ASF" id="asf-logo" height="100" /></a></p>
+
+  <p>Copyright © 2011-2019 <a href="https://www.apache.org">The Apache Software Foundation</a>.
+Licensed under the <a href="https://www.apache.org/licenses/">Apache License, Version 2.0</a>.</p>
+
+  <p>Apache®, the names of Apache projects and their logos, and the multicolor feather
+logo are registered trademarks or trademarks of The Apache Software Foundation
+in the United States and/or other countries.</p>
+
+</footer>
+
+
+      </div>
+    </div>
+  </div>
+</body>
+</html>
diff --git a/1.9/examples/sample.md b/1.9/examples/sample.md
deleted file mode 100644
index 3642cc6..0000000
--- a/1.9/examples/sample.md
+++ /dev/null
@@ -1,192 +0,0 @@
-Title: Apache Accumulo Batch Writing and Scanning Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-
-Basic Sampling Example
-----------------------
-
-Accumulo supports building a set of sample data that can be efficiently
-accessed by scanners.  What data is included in the sample set is configurable.
-Below, some data representing documents are inserted.  
-
-    root@instance sampex> createtable sampex
-    root@instance sampex> insert 9255 doc content 'abcde'
-    root@instance sampex> insert 9255 doc url file://foo.txt
-    root@instance sampex> insert 8934 doc content 'accumulo scales'
-    root@instance sampex> insert 8934 doc url file://accumulo_notes.txt
-    root@instance sampex> insert 2317 doc content 'milk, eggs, bread, parmigiano-reggiano'
-    root@instance sampex> insert 2317 doc url file://groceries/9.txt
-    root@instance sampex> insert 3900 doc content 'EC2 ate my homework'
-    root@instance sampex> insert 3900 doc uril file://final_project.txt
-
-Below the table sampex is configured to build a sample set.  The configuration
-causes Accumulo to include any row where `murmur3_32(row) % 3 ==0` in the
-tables sample data.
-
-    root@instance sampex> config -t sampex -s table.sampler.opt.hasher=murmur3_32
-    root@instance sampex> config -t sampex -s table.sampler.opt.modulus=3
-    root@instance sampex> config -t sampex -s table.sampler=org.apache.accumulo.core.client.sample.RowSampler
-
-Below, attempting to scan the sample returns an error.  This is because data
-was inserted before the sample set was configured.
-
-    root@instance sampex> scan --sample
-    2015-09-09 12:21:50,643 [shell.Shell] ERROR: org.apache.accumulo.core.client.SampleNotPresentException: Table sampex(ID:2) does not have sampling configured or built
-
-To remedy this problem, the following command will flush in memory data and
-compact any files that do not contain the correct sample data.   
-
-    root@instance sampex> compact -t sampex --sf-no-sample
-
-After the compaction, the sample scan works.  
-
-    root@instance sampex> scan --sample
-    2317 doc:content []    milk, eggs, bread, parmigiano-reggiano
-    2317 doc:url []    file://groceries/9.txt
-
-The commands below show that updates to data in the sample are seen when
-scanning the sample.
-
-    root@instance sampex> insert 2317 doc content 'milk, eggs, bread, parmigiano-reggiano, butter'
-    root@instance sampex> scan --sample
-    2317 doc:content []    milk, eggs, bread, parmigiano-reggiano, butter
-    2317 doc:url []    file://groceries/9.txt
-
-Inorder to make scanning the sample fast, sample data is partitioned as data is
-written to Accumulo.  This means if the sample configuration is changed, that
-data written previously is partitioned using a different criteria.  Accumulo
-will detect this situation and fail sample scans.  The commands below show this
-failure and fixiing the problem with a compaction.
-
-    root@instance sampex> config -t sampex -s table.sampler.opt.modulus=2
-    root@instance sampex> scan --sample
-    2015-09-09 12:22:51,058 [shell.Shell] ERROR: org.apache.accumulo.core.client.SampleNotPresentException: Table sampex(ID:2) does not have sampling configured or built
-    root@instance sampex> compact -t sampex --sf-no-sample
-    2015-09-09 12:23:07,242 [shell.Shell] INFO : Compaction of table sampex started for given range
-    root@instance sampex> scan --sample
-    2317 doc:content []    milk, eggs, bread, parmigiano-reggiano
-    2317 doc:url []    file://groceries/9.txt
-    3900 doc:content []    EC2 ate my homework
-    3900 doc:uril []    file://final_project.txt
-    9255 doc:content []    abcde
-    9255 doc:url []    file://foo.txt
-
-The example above is replicated in a java program using the Accumulo API.
-Below is the program name and the command to run it.
-
-    ./bin/accumulo org.apache.accumulo.examples.simple.sample.SampleExample -i instance -z localhost -u root -p secret
-
-The commands below look under the hood to give some insight into how this
-feature works.  The commands determine what files the sampex table is using.
-
-    root@instance sampex> tables -l
-    accumulo.metadata    =>        !0
-    accumulo.replication =>      +rep
-    accumulo.root        =>        +r
-    sampex               =>         2
-    trace                =>         1
-    root@instance sampex> scan -t accumulo.metadata -c file -b 2 -e 2<
-    2< file:hdfs://localhost:10000/accumulo/tables/2/default_tablet/A000000s.rf []    702,8
-
-Below shows running `accumulo rfile-info` on the file above.  This shows the
-rfile has a normal default locality group and a sample default locality group.
-The output also shows the configuration used to create the sample locality
-group.  The sample configuration within a rfile must match the tables sample
-configuration for sample scan to work.
-
-    $ ./bin/accumulo rfile-info hdfs://localhost:10000/accumulo/tables/2/default_tablet/A000000s.rf
-    Reading file: hdfs://localhost:10000/accumulo/tables/2/default_tablet/A000000s.rf
-    RFile Version            : 8
-    
-    Locality group           : <DEFAULT>
-    	Start block            : 0
-    	Num   blocks           : 1
-    	Index level 0          : 35 bytes  1 blocks
-    	First key              : 2317 doc:content [] 1437672014986 false
-    	Last key               : 9255 doc:url [] 1437672014875 false
-    	Num entries            : 8
-    	Column families        : [doc]
-    
-    Sample Configuration     :
-    	Sampler class          : org.apache.accumulo.core.client.sample.RowSampler
-    	Sampler options        : {hasher=murmur3_32, modulus=2}
-
-    Sample Locality group    : <DEFAULT>
-    	Start block            : 0
-    	Num   blocks           : 1
-    	Index level 0          : 36 bytes  1 blocks
-    	First key              : 2317 doc:content [] 1437672014986 false
-    	Last key               : 9255 doc:url [] 1437672014875 false
-    	Num entries            : 6
-    	Column families        : [doc]
-    
-    Meta block     : BCFile.index
-          Raw size             : 4 bytes
-          Compressed size      : 12 bytes
-          Compression type     : gz
-
-    Meta block     : RFile.index
-          Raw size             : 309 bytes
-          Compressed size      : 176 bytes
-          Compression type     : gz
-
-
-Shard Sampling Example
--------------------------
-
-`README.shard` shows how to index and search files using Accumulo.  That
-example indexes documents into a table named `shard`.  The indexing scheme used
-in that example places the document name in the column qualifier.  A useful
-sample of this indexing scheme should contain all data for any document in the
-sample.   To accomplish this, the following commands build a sample for the
-shard table based on the column qualifier.
-
-    root@instance shard> config -t shard -s table.sampler.opt.hasher=murmur3_32
-    root@instance shard> config -t shard -s table.sampler.opt.modulus=101
-    root@instance shard> config -t shard -s table.sampler.opt.qualifier=true
-    root@instance shard> config -t shard -s table.sampler=org.apache.accumulo.core.client.sample.RowColumnSampler
-    root@instance shard> compact -t shard --sf-no-sample -w
-    2015-07-23 15:00:09,280 [shell.Shell] INFO : Compacting table ...
-    2015-07-23 15:00:10,134 [shell.Shell] INFO : Compaction of table shard completed for given range
-
-After enabling sampling, the command below counts the number of documents in
-the sample containing the words `import` and `int`.     
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.shard.Query --sample -i instance16 -z localhost -t shard -u root -p secret import int | fgrep '.java' | wc
-         11      11    1246
-
-The command below counts the total number of documents containing the words
-`import` and `int`.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.shard.Query -i instance16 -z localhost -t shard -u root -p secret import int | fgrep '.java' | wc
-       1085    1085  118175
-
-The counts 11 out of 1085 total are around what would be expected for a modulus
-of 101.  Querying the sample first provides a quick way to estimate how much data
-the real query will bring back. 
-
-Another way sample data could be used with the shard example is with a
-specialized iterator.  In the examples source code there is an iterator named
-CutoffIntersectingIterator.  This iterator first checks how many documents are
-found in the sample data.  If too many documents are found in the sample data,
-then it returns nothing.   Otherwise it proceeds to query the full data set.
-To experiment with this iterator, use the following command.  The
-`--sampleCutoff` option below will cause the query to return nothing if based
-on the sample it appears a query would return more than 1000 documents.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.shard.Query --sampleCutoff 1000 -i instance16 -z localhost -t shard -u root -p secret import int | fgrep '.java' | wc
diff --git a/1.8/examples/index.html b/1.9/examples/shard.html
similarity index 63%
copy from 1.8/examples/index.html
copy to 1.9/examples/shard.html
index 3af64b6..0b6c677 100644
--- a/1.8/examples/index.html
+++ b/1.9/examples/shard.html
@@ -25,7 +25,7 @@
 <link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/v/bs/jq-2.2.3/dt-1.10.12/datatables.min.css">
 <link href="/css/accumulo.css" rel="stylesheet" type="text/css">
 
-<title>Apache Accumulo Examples</title>
+<title>Apache Accumulo Shard Example</title>
 
 <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.2.4/jquery.min.js" integrity="sha256-BbhdlvQf/xTY9gja0Dq3HiwQF8LaCRTXxZKRutelT44=" crossorigin="anonymous"></script>
 <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
@@ -136,90 +136,64 @@
         </div>
         <div id="content">
           
-          <h1 class="title">Apache Accumulo Examples</h1>
+          <h1 class="title">Apache Accumulo Shard Example</h1>
           
-          <p>Before running any of the examples, the following steps must be performed.</p>
-
-<ol>
-  <li>
-    <p>Install and run Accumulo via the instructions found in $ACCUMULO_HOME/README.
-Remember the instance name. It will be referred to as “instance” throughout
-the examples. A comma-separated list of zookeeper servers will be referred
-to as “zookeepers”.</p>
-  </li>
-  <li>
-    <p>Create an Accumulo user (see the <a href="../accumulo_user_manual#_user_administration">user manual</a>), or use the root user.
-The “username” Accumulo user name with password “password” is used
-throughout the examples. This user needs the ability to create tables.</p>
-  </li>
-</ol>
-
-<p>In all commands, you will need to replace “instance”, “zookeepers”,
-“username”, and “password” with the values you set for your Accumulo instance.</p>
-
-<p>Commands intended to be run in bash are prefixed by ‘$’. These are always
-assumed to be run from the $ACCUMULO_HOME directory.</p>
-
-<p>Commands intended to be run in the Accumulo shell are prefixed by ‘&gt;’.</p>
-
-<p>Each README in the examples directory highlights the use of particular
-features of Apache Accumulo.</p>
-
-<p><a href="batch">batch</a>:       Using the batch writer and batch scanner.</p>
-
-<p><a href="bloom">bloom</a>:       Creating a bloom filter enabled table to increase query
-                       performance.</p>
-
-<p><a href="bulkIngest">bulkIngest</a>:  Ingesting bulk data using map/reduce jobs on Hadoop.</p>
-
-<p><a href="classpath">classpath</a>:   Using per-table classpaths.</p>
-
-<p><a href="client">client</a>:      Using table operations, reading and writing data in Java.</p>
-
-<p><a href="combiner">combiner</a>:    Using example StatsCombiner to find min, max, sum, and
-                       count.</p>
-
-<p><a href="constraints">constraints</a>: Using constraints with tables.</p>
-
-<p><a href="dirlist">dirlist</a>:     Storing filesystem information.</p>
-
-<p><a href="export">export</a>:      Exporting and importing tables.</p>
-
-<p><a href="filedata">filedata</a>:    Storing file data.</p>
-
-<p><a href="filter">filter</a>:      Using the AgeOffFilter to remove records more than 30
-                       seconds old.</p>
-
-<p><a href="helloworld">helloworld</a>:  Inserting records both inside map/reduce jobs and
-                       outside. And reading records between two rows.</p>
-
-<p><a href="isolation">isolation</a>:   Using the isolated scanner to ensure partial changes
-                       are not seen.</p>
-
-<p><a href="mapred">mapred</a>:      Using MapReduce to read from and write to Accumulo
-                       tables.</p>
-
-<p><a href="maxmutation">maxmutation</a>: Limiting mutation size to avoid running out of memory.</p>
-
-<p><a href="regex">regex</a>:       Using MapReduce and Accumulo to find data using regular
-                       expressions.</p>
-
-<p><a href="rowhash">rowhash</a>:     Using MapReduce to read a table and write to a new
-                       column in the same table.</p>
-
-<p><a href="sample">sample</a>:      Building and using sample data in Accumulo.</p>
-
-<p><a href="shard">shard</a>:       Using the intersecting iterator with a term index
-                       partitioned by document.</p>
-
-<p><a href="tabletofile">tabletofile</a>: Using MapReduce to read a table and write one of its
-                       columns to a file in HDFS.</p>
-
-<p><a href="terasort">terasort</a>:    Generating random data and sorting it using Accumulo.</p>
-
-<p><a href="visibility">visibility</a> :  Using visibilities (or combinations of authorizations).
-                       Also shows user permissions.</p>
-
+          <p>Accumulo has an iterator called the intersecting iterator which supports querying a term index that is partitioned by
+document, or “sharded”. This example shows how to use the intersecting iterator through these four programs:</p>
+
+<ul>
+  <li>Index.java - Indexes a set of text files into an Accumulo table</li>
+  <li>Query.java - Finds documents containing a given set of terms.</li>
+  <li>Reverse.java - Reads the index table and writes a map of documents to terms into another table.</li>
+  <li>ContinuousQuery.java  Uses the table populated by Reverse.java to select N random terms per document. Then it continuously and randomly queries those terms.</li>
+</ul>
+
+<p>To run these example programs, create two tables like below.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>username@instance&gt; createtable shard
+username@instance shard&gt; createtable doc2term
+</code></pre></div></div>
+
+<p>After creating the tables, index some files. The following command indexes all of the java files in the Accumulo source code.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ cd /local/username/workspace/accumulo/
+$ find core/src server/src -name "*.java" | xargs ./bin/accumulo org.apache.accumulo.examples.simple.shard.Index -i instance -z zookeepers -t shard -u username -p password --partitions 30
+</code></pre></div></div>
+
+<p>The following command queries the index to find all files containing ‘foo’ and ‘bar’.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ cd $ACCUMULO_HOME
+$ ./bin/accumulo org.apache.accumulo.examples.simple.shard.Query -i instance -z zookeepers -t shard -u username -p password foo bar
+/local/username/workspace/accumulo/src/core/src/test/java/accumulo/core/security/ColumnVisibilityTest.java
+/local/username/workspace/accumulo/src/core/src/test/java/accumulo/core/client/mock/MockConnectorTest.java
+/local/username/workspace/accumulo/src/core/src/test/java/accumulo/core/security/VisibilityEvaluatorTest.java
+/local/username/workspace/accumulo/src/server/src/main/java/accumulo/test/functional/RowDeleteTest.java
+/local/username/workspace/accumulo/src/server/src/test/java/accumulo/server/logger/TestLogWriter.java
+/local/username/workspace/accumulo/src/server/src/main/java/accumulo/test/functional/DeleteEverythingTest.java
+/local/username/workspace/accumulo/src/core/src/test/java/accumulo/core/data/KeyExtentTest.java
+/local/username/workspace/accumulo/src/server/src/test/java/accumulo/server/constraints/MetadataConstraintsTest.java
+/local/username/workspace/accumulo/src/core/src/test/java/accumulo/core/iterators/WholeRowIteratorTest.java
+/local/username/workspace/accumulo/src/server/src/test/java/accumulo/server/util/DefaultMapTest.java
+/local/username/workspace/accumulo/src/server/src/test/java/accumulo/server/tabletserver/InMemoryMapTest.java
+</code></pre></div></div>
+
+<p>In order to run ContinuousQuery, we need to run Reverse.java to populate doc2term.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo org.apache.accumulo.examples.simple.shard.Reverse -i instance -z zookeepers --shardTable shard --doc2Term doc2term -u username -p password
+</code></pre></div></div>
+
+<p>Below ContinuousQuery is run using 5 terms. So it selects 5 random terms from each document, then it continually
+randomly selects one set of 5 terms and queries. It prints the number of matching documents and the time in seconds.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo org.apache.accumulo.examples.simple.shard.ContinuousQuery -i instance -z zookeepers --shardTable shard --doc2Term doc2term -u username -p password --terms 5
+[public, core, class, binarycomparable, b] 2  0.081
+[wordtodelete, unindexdocument, doctablename, putdelete, insert] 1  0.041
+[import, columnvisibilityinterpreterfactory, illegalstateexception, cv, columnvisibility] 1  0.049
+[getpackage, testversion, util, version, 55] 1  0.048
+[for, static, println, public, the] 55  0.211
+[sleeptime, wrappingiterator, options, long, utilwaitthread] 1  0.057
+[string, public, long, 0, wait] 12  0.132
+</code></pre></div></div>
 
         </div>
 
diff --git a/1.9/examples/shard.md b/1.9/examples/shard.md
deleted file mode 100644
index d08658a..0000000
--- a/1.9/examples/shard.md
+++ /dev/null
@@ -1,67 +0,0 @@
-Title: Apache Accumulo Shard Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-Accumulo has an iterator called the intersecting iterator which supports querying a term index that is partitioned by
-document, or "sharded". This example shows how to use the intersecting iterator through these four programs:
-
- * Index.java - Indexes a set of text files into an Accumulo table
- * Query.java - Finds documents containing a given set of terms.
- * Reverse.java - Reads the index table and writes a map of documents to terms into another table.
- * ContinuousQuery.java  Uses the table populated by Reverse.java to select N random terms per document. Then it continuously and randomly queries those terms.
-
-To run these example programs, create two tables like below.
-
-    username@instance> createtable shard
-    username@instance shard> createtable doc2term
-
-After creating the tables, index some files. The following command indexes all of the java files in the Accumulo source code.
-
-    $ cd /local/username/workspace/accumulo/
-    $ find core/src server/src -name "*.java" | xargs ./bin/accumulo org.apache.accumulo.examples.simple.shard.Index -i instance -z zookeepers -t shard -u username -p password --partitions 30
-
-The following command queries the index to find all files containing 'foo' and 'bar'.
-
-    $ cd $ACCUMULO_HOME
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.shard.Query -i instance -z zookeepers -t shard -u username -p password foo bar
-    /local/username/workspace/accumulo/src/core/src/test/java/accumulo/core/security/ColumnVisibilityTest.java
-    /local/username/workspace/accumulo/src/core/src/test/java/accumulo/core/client/mock/MockConnectorTest.java
-    /local/username/workspace/accumulo/src/core/src/test/java/accumulo/core/security/VisibilityEvaluatorTest.java
-    /local/username/workspace/accumulo/src/server/src/main/java/accumulo/test/functional/RowDeleteTest.java
-    /local/username/workspace/accumulo/src/server/src/test/java/accumulo/server/logger/TestLogWriter.java
-    /local/username/workspace/accumulo/src/server/src/main/java/accumulo/test/functional/DeleteEverythingTest.java
-    /local/username/workspace/accumulo/src/core/src/test/java/accumulo/core/data/KeyExtentTest.java
-    /local/username/workspace/accumulo/src/server/src/test/java/accumulo/server/constraints/MetadataConstraintsTest.java
-    /local/username/workspace/accumulo/src/core/src/test/java/accumulo/core/iterators/WholeRowIteratorTest.java
-    /local/username/workspace/accumulo/src/server/src/test/java/accumulo/server/util/DefaultMapTest.java
-    /local/username/workspace/accumulo/src/server/src/test/java/accumulo/server/tabletserver/InMemoryMapTest.java
-
-In order to run ContinuousQuery, we need to run Reverse.java to populate doc2term.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.shard.Reverse -i instance -z zookeepers --shardTable shard --doc2Term doc2term -u username -p password
-
-Below ContinuousQuery is run using 5 terms. So it selects 5 random terms from each document, then it continually
-randomly selects one set of 5 terms and queries. It prints the number of matching documents and the time in seconds.
-
-    $ ./bin/accumulo org.apache.accumulo.examples.simple.shard.ContinuousQuery -i instance -z zookeepers --shardTable shard --doc2Term doc2term -u username -p password --terms 5
-    [public, core, class, binarycomparable, b] 2  0.081
-    [wordtodelete, unindexdocument, doctablename, putdelete, insert] 1  0.041
-    [import, columnvisibilityinterpreterfactory, illegalstateexception, cv, columnvisibility] 1  0.049
-    [getpackage, testversion, util, version, 55] 1  0.048
-    [for, static, println, public, the] 55  0.211
-    [sleeptime, wrappingiterator, options, long, utilwaitthread] 1  0.057
-    [string, public, long, 0, wait] 12  0.132
diff --git a/1.8/examples/index.html b/1.9/examples/tabletofile.html
similarity index 66%
copy from 1.8/examples/index.html
copy to 1.9/examples/tabletofile.html
index 3af64b6..91941fd 100644
--- a/1.8/examples/index.html
+++ b/1.9/examples/tabletofile.html
@@ -25,7 +25,7 @@
 <link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/v/bs/jq-2.2.3/dt-1.10.12/datatables.min.css">
 <link href="/css/accumulo.css" rel="stylesheet" type="text/css">
 
-<title>Apache Accumulo Examples</title>
+<title>Apache Accumulo Table-to-File Example</title>
 
 <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.2.4/jquery.min.js" integrity="sha256-BbhdlvQf/xTY9gja0Dq3HiwQF8LaCRTXxZKRutelT44=" crossorigin="anonymous"></script>
 <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
@@ -136,89 +136,51 @@
         </div>
         <div id="content">
           
-          <h1 class="title">Apache Accumulo Examples</h1>
+          <h1 class="title">Apache Accumulo Table-to-File Example</h1>
           
-          <p>Before running any of the examples, the following steps must be performed.</p>
-
-<ol>
-  <li>
-    <p>Install and run Accumulo via the instructions found in $ACCUMULO_HOME/README.
-Remember the instance name. It will be referred to as “instance” throughout
-the examples. A comma-separated list of zookeeper servers will be referred
-to as “zookeepers”.</p>
-  </li>
-  <li>
-    <p>Create an Accumulo user (see the <a href="../accumulo_user_manual#_user_administration">user manual</a>), or use the root user.
-The “username” Accumulo user name with password “password” is used
-throughout the examples. This user needs the ability to create tables.</p>
-  </li>
-</ol>
-
-<p>In all commands, you will need to replace “instance”, “zookeepers”,
-“username”, and “password” with the values you set for your Accumulo instance.</p>
-
-<p>Commands intended to be run in bash are prefixed by ‘$’. These are always
-assumed to be run from the $ACCUMULO_HOME directory.</p>
-
-<p>Commands intended to be run in the Accumulo shell are prefixed by ‘&gt;’.</p>
-
-<p>Each README in the examples directory highlights the use of particular
-features of Apache Accumulo.</p>
-
-<p><a href="batch">batch</a>:       Using the batch writer and batch scanner.</p>
-
-<p><a href="bloom">bloom</a>:       Creating a bloom filter enabled table to increase query
-                       performance.</p>
-
-<p><a href="bulkIngest">bulkIngest</a>:  Ingesting bulk data using map/reduce jobs on Hadoop.</p>
-
-<p><a href="classpath">classpath</a>:   Using per-table classpaths.</p>
-
-<p><a href="client">client</a>:      Using table operations, reading and writing data in Java.</p>
-
-<p><a href="combiner">combiner</a>:    Using example StatsCombiner to find min, max, sum, and
-                       count.</p>
-
-<p><a href="constraints">constraints</a>: Using constraints with tables.</p>
-
-<p><a href="dirlist">dirlist</a>:     Storing filesystem information.</p>
-
-<p><a href="export">export</a>:      Exporting and importing tables.</p>
-
-<p><a href="filedata">filedata</a>:    Storing file data.</p>
-
-<p><a href="filter">filter</a>:      Using the AgeOffFilter to remove records more than 30
-                       seconds old.</p>
-
-<p><a href="helloworld">helloworld</a>:  Inserting records both inside map/reduce jobs and
-                       outside. And reading records between two rows.</p>
-
-<p><a href="isolation">isolation</a>:   Using the isolated scanner to ensure partial changes
-                       are not seen.</p>
-
-<p><a href="mapred">mapred</a>:      Using MapReduce to read from and write to Accumulo
-                       tables.</p>
-
-<p><a href="maxmutation">maxmutation</a>: Limiting mutation size to avoid running out of memory.</p>
-
-<p><a href="regex">regex</a>:       Using MapReduce and Accumulo to find data using regular
-                       expressions.</p>
-
-<p><a href="rowhash">rowhash</a>:     Using MapReduce to read a table and write to a new
-                       column in the same table.</p>
-
-<p><a href="sample">sample</a>:      Building and using sample data in Accumulo.</p>
-
-<p><a href="shard">shard</a>:       Using the intersecting iterator with a term index
-                       partitioned by document.</p>
-
-<p><a href="tabletofile">tabletofile</a>: Using MapReduce to read a table and write one of its
-                       columns to a file in HDFS.</p>
-
-<p><a href="terasort">terasort</a>:    Generating random data and sorting it using Accumulo.</p>
-
-<p><a href="visibility">visibility</a> :  Using visibilities (or combinations of authorizations).
-                       Also shows user permissions.</p>
+          <p>This example uses mapreduce to extract specified columns from an existing table.</p>
+
+<p>To run this example you will need some data in a table. The following will
+put a trivial amount of data into accumulo using the accumulo shell:</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo shell -u username -p password
+Shell - Apache Accumulo Interactive Shell
+- version: 1.5.0
+- instance name: instance
+- instance id: 00000000-0000-0000-0000-000000000000
+-
+- type 'help' for a list of available commands
+-
+username@instance&gt; createtable input
+username@instance&gt; insert dog cf cq dogvalue
+username@instance&gt; insert cat cf cq catvalue
+username@instance&gt; insert junk family qualifier junkvalue
+username@instance&gt; quit
+</code></pre></div></div>
+
+<p>The TableToFile class configures a map-only job to read the specified columns and
+write the key/value pairs to a file in HDFS.</p>
+
+<p>The following will extract the rows containing the column “cf:cq”:</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ bin/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.mapreduce.TableToFile -u user -p passwd -i instance -t input --columns cf:cq --output /tmp/output
+
+$ hadoop fs -ls /tmp/output
+-rw-r--r--   1 username supergroup          0 2013-01-10 14:44 /tmp/output/_SUCCESS
+drwxr-xr-x   - username supergroup          0 2013-01-10 14:44 /tmp/output/_logs
+drwxr-xr-x   - username supergroup          0 2013-01-10 14:44 /tmp/output/_logs/history
+-rw-r--r--   1 username supergroup       9049 2013-01-10 14:44 /tmp/output/_logs/history/job_201301081658_0011_1357847072863_username_TableToFile%5F1357847071434
+-rw-r--r--   1 username supergroup      26172 2013-01-10 14:44 /tmp/output/_logs/history/job_201301081658_0011_conf.xml
+-rw-r--r--   1 username supergroup         50 2013-01-10 14:44 /tmp/output/part-m-00000
+</code></pre></div></div>
+
+<p>We can see the output of our little map-reduce job:</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ hadoop fs -text /tmp/output/output/part-m-00000
+catrow cf:cq []	catvalue
+dogrow cf:cq []	dogvalue
+$
+</code></pre></div></div>
 
 
         </div>
diff --git a/1.9/examples/tabletofile.md b/1.9/examples/tabletofile.md
deleted file mode 100644
index 08b7cc9..0000000
--- a/1.9/examples/tabletofile.md
+++ /dev/null
@@ -1,59 +0,0 @@
-Title: Apache Accumulo Table-to-File Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This example uses mapreduce to extract specified columns from an existing table.
-
-To run this example you will need some data in a table. The following will
-put a trivial amount of data into accumulo using the accumulo shell:
-
-    $ ./bin/accumulo shell -u username -p password
-    Shell - Apache Accumulo Interactive Shell
-    - version: 1.5.0
-    - instance name: instance
-    - instance id: 00000000-0000-0000-0000-000000000000
-    -
-    - type 'help' for a list of available commands
-    -
-    username@instance> createtable input
-    username@instance> insert dog cf cq dogvalue
-    username@instance> insert cat cf cq catvalue
-    username@instance> insert junk family qualifier junkvalue
-    username@instance> quit
-
-The TableToFile class configures a map-only job to read the specified columns and
-write the key/value pairs to a file in HDFS.
-
-The following will extract the rows containing the column "cf:cq":
-
-    $ bin/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.mapreduce.TableToFile -u user -p passwd -i instance -t input --columns cf:cq --output /tmp/output
-
-    $ hadoop fs -ls /tmp/output
-    -rw-r--r--   1 username supergroup          0 2013-01-10 14:44 /tmp/output/_SUCCESS
-    drwxr-xr-x   - username supergroup          0 2013-01-10 14:44 /tmp/output/_logs
-    drwxr-xr-x   - username supergroup          0 2013-01-10 14:44 /tmp/output/_logs/history
-    -rw-r--r--   1 username supergroup       9049 2013-01-10 14:44 /tmp/output/_logs/history/job_201301081658_0011_1357847072863_username_TableToFile%5F1357847071434
-    -rw-r--r--   1 username supergroup      26172 2013-01-10 14:44 /tmp/output/_logs/history/job_201301081658_0011_conf.xml
-    -rw-r--r--   1 username supergroup         50 2013-01-10 14:44 /tmp/output/part-m-00000
-
-We can see the output of our little map-reduce job:
-
-    $ hadoop fs -text /tmp/output/output/part-m-00000
-    catrow cf:cq []	catvalue
-    dogrow cf:cq []	dogvalue
-    $
-
diff --git a/1.8/examples/index.html b/1.9/examples/terasort.html
similarity index 66%
copy from 1.8/examples/index.html
copy to 1.9/examples/terasort.html
index 3af64b6..971567c 100644
--- a/1.8/examples/index.html
+++ b/1.9/examples/terasort.html
@@ -25,7 +25,7 @@
 <link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/v/bs/jq-2.2.3/dt-1.10.12/datatables.min.css">
 <link href="/css/accumulo.css" rel="stylesheet" type="text/css">
 
-<title>Apache Accumulo Examples</title>
+<title>Apache Accumulo Terasort Example</title>
 
 <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.2.4/jquery.min.js" integrity="sha256-BbhdlvQf/xTY9gja0Dq3HiwQF8LaCRTXxZKRutelT44=" crossorigin="anonymous"></script>
 <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
@@ -136,90 +136,42 @@
         </div>
         <div id="content">
           
-          <h1 class="title">Apache Accumulo Examples</h1>
+          <h1 class="title">Apache Accumulo Terasort Example</h1>
           
-          <p>Before running any of the examples, the following steps must be performed.</p>
-
-<ol>
-  <li>
-    <p>Install and run Accumulo via the instructions found in $ACCUMULO_HOME/README.
-Remember the instance name. It will be referred to as “instance” throughout
-the examples. A comma-separated list of zookeeper servers will be referred
-to as “zookeepers”.</p>
-  </li>
-  <li>
-    <p>Create an Accumulo user (see the <a href="../accumulo_user_manual#_user_administration">user manual</a>), or use the root user.
-The “username” Accumulo user name with password “password” is used
-throughout the examples. This user needs the ability to create tables.</p>
-  </li>
-</ol>
-
-<p>In all commands, you will need to replace “instance”, “zookeepers”,
-“username”, and “password” with the values you set for your Accumulo instance.</p>
-
-<p>Commands intended to be run in bash are prefixed by ‘$’. These are always
-assumed to be run from the $ACCUMULO_HOME directory.</p>
-
-<p>Commands intended to be run in the Accumulo shell are prefixed by ‘&gt;’.</p>
-
-<p>Each README in the examples directory highlights the use of particular
-features of Apache Accumulo.</p>
-
-<p><a href="batch">batch</a>:       Using the batch writer and batch scanner.</p>
-
-<p><a href="bloom">bloom</a>:       Creating a bloom filter enabled table to increase query
-                       performance.</p>
-
-<p><a href="bulkIngest">bulkIngest</a>:  Ingesting bulk data using map/reduce jobs on Hadoop.</p>
-
-<p><a href="classpath">classpath</a>:   Using per-table classpaths.</p>
-
-<p><a href="client">client</a>:      Using table operations, reading and writing data in Java.</p>
-
-<p><a href="combiner">combiner</a>:    Using example StatsCombiner to find min, max, sum, and
-                       count.</p>
-
-<p><a href="constraints">constraints</a>: Using constraints with tables.</p>
-
-<p><a href="dirlist">dirlist</a>:     Storing filesystem information.</p>
-
-<p><a href="export">export</a>:      Exporting and importing tables.</p>
-
-<p><a href="filedata">filedata</a>:    Storing file data.</p>
-
-<p><a href="filter">filter</a>:      Using the AgeOffFilter to remove records more than 30
-                       seconds old.</p>
-
-<p><a href="helloworld">helloworld</a>:  Inserting records both inside map/reduce jobs and
-                       outside. And reading records between two rows.</p>
-
-<p><a href="isolation">isolation</a>:   Using the isolated scanner to ensure partial changes
-                       are not seen.</p>
-
-<p><a href="mapred">mapred</a>:      Using MapReduce to read from and write to Accumulo
-                       tables.</p>
-
-<p><a href="maxmutation">maxmutation</a>: Limiting mutation size to avoid running out of memory.</p>
-
-<p><a href="regex">regex</a>:       Using MapReduce and Accumulo to find data using regular
-                       expressions.</p>
-
-<p><a href="rowhash">rowhash</a>:     Using MapReduce to read a table and write to a new
-                       column in the same table.</p>
-
-<p><a href="sample">sample</a>:      Building and using sample data in Accumulo.</p>
-
-<p><a href="shard">shard</a>:       Using the intersecting iterator with a term index
-                       partitioned by document.</p>
-
-<p><a href="tabletofile">tabletofile</a>: Using MapReduce to read a table and write one of its
-                       columns to a file in HDFS.</p>
-
-<p><a href="terasort">terasort</a>:    Generating random data and sorting it using Accumulo.</p>
-
-<p><a href="visibility">visibility</a> :  Using visibilities (or combinations of authorizations).
-                       Also shows user permissions.</p>
-
+          <p>This example uses map/reduce to generate random input data that will
+be sorted by storing it into accumulo. It uses data very similar to the
+hadoop terasort benchmark.</p>
+
+<p>To run this example you run it with arguments describing the amount of data:</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ bin/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.mapreduce.TeraSortIngest \
+-i instance -z zookeepers -u user -p password \
+--count 10 \
+--minKeySize 10 \
+--maxKeySize 10 \
+--minValueSize 78 \
+--maxValueSize 78 \
+--table sort \
+--splits 10 \
+</code></pre></div></div>
+
+<p>After the map reduce job completes, scan the data:</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ ./bin/accumulo shell -u username -p password
+username@instance&gt; scan -t sort
++l-$$OE/ZH c:         4 []    GGGGGGGGGGWWWWWWWWWWMMMMMMMMMMCCCCCCCCCCSSSSSSSSSSIIIIIIIIIIYYYYYYYYYYOOOOOOOO
+,C)wDw//u= c:        10 []    CCCCCCCCCCSSSSSSSSSSIIIIIIIIIIYYYYYYYYYYOOOOOOOOOOEEEEEEEEEEUUUUUUUUUUKKKKKKKK
+75@~?'WdUF c:         1 []    IIIIIIIIIIYYYYYYYYYYOOOOOOOOOOEEEEEEEEEEUUUUUUUUUUKKKKKKKKKKAAAAAAAAAAQQQQQQQQ
+;L+!2rT~hd c:         8 []    MMMMMMMMMMCCCCCCCCCCSSSSSSSSSSIIIIIIIIIIYYYYYYYYYYOOOOOOOOOOEEEEEEEEEEUUUUUUUU
+LsS8)|.ZLD c:         5 []    OOOOOOOOOOEEEEEEEEEEUUUUUUUUUUKKKKKKKKKKAAAAAAAAAAQQQQQQQQQQGGGGGGGGGGWWWWWWWW
+M^*dDE;6^&lt; c:         9 []    UUUUUUUUUUKKKKKKKKKKAAAAAAAAAAQQQQQQQQQQGGGGGGGGGGWWWWWWWWWWMMMMMMMMMMCCCCCCCC
+^Eu)&lt;n#kdP c:         3 []    YYYYYYYYYYOOOOOOOOOOEEEEEEEEEEUUUUUUUUUUKKKKKKKKKKAAAAAAAAAAQQQQQQQQQQGGGGGGGG
+le5awB.$sm c:         6 []    WWWWWWWWWWMMMMMMMMMMCCCCCCCCCCSSSSSSSSSSIIIIIIIIIIYYYYYYYYYYOOOOOOOOOOEEEEEEEE
+q__[fwhKFg c:         7 []    EEEEEEEEEEUUUUUUUUUUKKKKKKKKKKAAAAAAAAAAQQQQQQQQQQGGGGGGGGGGWWWWWWWWWWMMMMMMMM
+w[o||:N&amp;H, c:         2 []    QQQQQQQQQQGGGGGGGGGGWWWWWWWWWWMMMMMMMMMMCCCCCCCCCCSSSSSSSSSSIIIIIIIIIIYYYYYYYY
+</code></pre></div></div>
+
+<p>Of course, a real benchmark would ingest millions of entries.</p>
 
         </div>
 
diff --git a/1.9/examples/terasort.md b/1.9/examples/terasort.md
deleted file mode 100644
index 409c1d1..0000000
--- a/1.9/examples/terasort.md
+++ /dev/null
@@ -1,50 +0,0 @@
-Title: Apache Accumulo Terasort Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-This example uses map/reduce to generate random input data that will
-be sorted by storing it into accumulo. It uses data very similar to the
-hadoop terasort benchmark.
-
-To run this example you run it with arguments describing the amount of data:
-
-    $ bin/tool.sh lib/accumulo-examples-simple.jar org.apache.accumulo.examples.simple.mapreduce.TeraSortIngest \
-    -i instance -z zookeepers -u user -p password \
-    --count 10 \
-    --minKeySize 10 \
-    --maxKeySize 10 \
-    --minValueSize 78 \
-    --maxValueSize 78 \
-    --table sort \
-    --splits 10 \
-
-After the map reduce job completes, scan the data:
-
-    $ ./bin/accumulo shell -u username -p password
-    username@instance> scan -t sort
-    +l-$$OE/ZH c:         4 []    GGGGGGGGGGWWWWWWWWWWMMMMMMMMMMCCCCCCCCCCSSSSSSSSSSIIIIIIIIIIYYYYYYYYYYOOOOOOOO
-    ,C)wDw//u= c:        10 []    CCCCCCCCCCSSSSSSSSSSIIIIIIIIIIYYYYYYYYYYOOOOOOOOOOEEEEEEEEEEUUUUUUUUUUKKKKKKKK
-    75@~?'WdUF c:         1 []    IIIIIIIIIIYYYYYYYYYYOOOOOOOOOOEEEEEEEEEEUUUUUUUUUUKKKKKKKKKKAAAAAAAAAAQQQQQQQQ
-    ;L+!2rT~hd c:         8 []    MMMMMMMMMMCCCCCCCCCCSSSSSSSSSSIIIIIIIIIIYYYYYYYYYYOOOOOOOOOOEEEEEEEEEEUUUUUUUU
-    LsS8)|.ZLD c:         5 []    OOOOOOOOOOEEEEEEEEEEUUUUUUUUUUKKKKKKKKKKAAAAAAAAAAQQQQQQQQQQGGGGGGGGGGWWWWWWWW
-    M^*dDE;6^< c:         9 []    UUUUUUUUUUKKKKKKKKKKAAAAAAAAAAQQQQQQQQQQGGGGGGGGGGWWWWWWWWWWMMMMMMMMMMCCCCCCCC
-    ^Eu)<n#kdP c:         3 []    YYYYYYYYYYOOOOOOOOOOEEEEEEEEEEUUUUUUUUUUKKKKKKKKKKAAAAAAAAAAQQQQQQQQQQGGGGGGGG
-    le5awB.$sm c:         6 []    WWWWWWWWWWMMMMMMMMMMCCCCCCCCCCSSSSSSSSSSIIIIIIIIIIYYYYYYYYYYOOOOOOOOOOEEEEEEEE
-    q__[fwhKFg c:         7 []    EEEEEEEEEEUUUUUUUUUUKKKKKKKKKKAAAAAAAAAAQQQQQQQQQQGGGGGGGGGGWWWWWWWWWWMMMMMMMM
-    w[o||:N&H, c:         2 []    QQQQQQQQQQGGGGGGGGGGWWWWWWWWWWMMMMMMMMMMCCCCCCCCCCSSSSSSSSSSIIIIIIIIIIYYYYYYYY
-
-Of course, a real benchmark would ingest millions of entries.
diff --git a/1.9/examples/index.html b/1.9/examples/visibility.html
similarity index 54%
copy from 1.9/examples/index.html
copy to 1.9/examples/visibility.html
index 3af64b6..a36c3fc 100644
--- a/1.9/examples/index.html
+++ b/1.9/examples/visibility.html
@@ -25,7 +25,7 @@
 <link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/v/bs/jq-2.2.3/dt-1.10.12/datatables.min.css">
 <link href="/css/accumulo.css" rel="stylesheet" type="text/css">
 
-<title>Apache Accumulo Examples</title>
+<title>Apache Accumulo Visibility, Authorizations, and Permissions Example</title>
 
 <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.2.4/jquery.min.js" integrity="sha256-BbhdlvQf/xTY9gja0Dq3HiwQF8LaCRTXxZKRutelT44=" crossorigin="anonymous"></script>
 <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
@@ -136,89 +136,128 @@
         </div>
         <div id="content">
           
-          <h1 class="title">Apache Accumulo Examples</h1>
+          <h1 class="title">Apache Accumulo Visibility, Authorizations, and Permissions Example</h1>
           
-          <p>Before running any of the examples, the following steps must be performed.</p>
-
-<ol>
-  <li>
-    <p>Install and run Accumulo via the instructions found in $ACCUMULO_HOME/README.
-Remember the instance name. It will be referred to as “instance” throughout
-the examples. A comma-separated list of zookeeper servers will be referred
-to as “zookeepers”.</p>
-  </li>
-  <li>
-    <p>Create an Accumulo user (see the <a href="../accumulo_user_manual#_user_administration">user manual</a>), or use the root user.
-The “username” Accumulo user name with password “password” is used
-throughout the examples. This user needs the ability to create tables.</p>
-  </li>
-</ol>
-
-<p>In all commands, you will need to replace “instance”, “zookeepers”,
-“username”, and “password” with the values you set for your Accumulo instance.</p>
-
-<p>Commands intended to be run in bash are prefixed by ‘$’. These are always
-assumed to be run from the $ACCUMULO_HOME directory.</p>
-
-<p>Commands intended to be run in the Accumulo shell are prefixed by ‘&gt;’.</p>
-
-<p>Each README in the examples directory highlights the use of particular
-features of Apache Accumulo.</p>
-
-<p><a href="batch">batch</a>:       Using the batch writer and batch scanner.</p>
-
-<p><a href="bloom">bloom</a>:       Creating a bloom filter enabled table to increase query
-                       performance.</p>
-
-<p><a href="bulkIngest">bulkIngest</a>:  Ingesting bulk data using map/reduce jobs on Hadoop.</p>
-
-<p><a href="classpath">classpath</a>:   Using per-table classpaths.</p>
-
-<p><a href="client">client</a>:      Using table operations, reading and writing data in Java.</p>
-
-<p><a href="combiner">combiner</a>:    Using example StatsCombiner to find min, max, sum, and
-                       count.</p>
-
-<p><a href="constraints">constraints</a>: Using constraints with tables.</p>
-
-<p><a href="dirlist">dirlist</a>:     Storing filesystem information.</p>
-
-<p><a href="export">export</a>:      Exporting and importing tables.</p>
-
-<p><a href="filedata">filedata</a>:    Storing file data.</p>
-
-<p><a href="filter">filter</a>:      Using the AgeOffFilter to remove records more than 30
-                       seconds old.</p>
-
-<p><a href="helloworld">helloworld</a>:  Inserting records both inside map/reduce jobs and
-                       outside. And reading records between two rows.</p>
-
-<p><a href="isolation">isolation</a>:   Using the isolated scanner to ensure partial changes
-                       are not seen.</p>
-
-<p><a href="mapred">mapred</a>:      Using MapReduce to read from and write to Accumulo
-                       tables.</p>
-
-<p><a href="maxmutation">maxmutation</a>: Limiting mutation size to avoid running out of memory.</p>
-
-<p><a href="regex">regex</a>:       Using MapReduce and Accumulo to find data using regular
-                       expressions.</p>
-
-<p><a href="rowhash">rowhash</a>:     Using MapReduce to read a table and write to a new
-                       column in the same table.</p>
-
-<p><a href="sample">sample</a>:      Building and using sample data in Accumulo.</p>
-
-<p><a href="shard">shard</a>:       Using the intersecting iterator with a term index
-                       partitioned by document.</p>
-
-<p><a href="tabletofile">tabletofile</a>: Using MapReduce to read a table and write one of its
-                       columns to a file in HDFS.</p>
-
-<p><a href="terasort">terasort</a>:    Generating random data and sorting it using Accumulo.</p>
-
-<p><a href="visibility">visibility</a> :  Using visibilities (or combinations of authorizations).
-                       Also shows user permissions.</p>
+          <h2 id="creating-a-new-user">Creating a new user</h2>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>root@instance&gt; createuser username
+Enter new password for 'username': ********
+Please confirm new password for 'username': ********
+root@instance&gt; user username
+Enter password for user username: ********
+username@instance&gt; createtable vistest
+06 10:48:47,931 [shell.Shell] ERROR: org.apache.accumulo.core.client.AccumuloSecurityException: Error PERMISSION_DENIED - User does not have permission to perform this action
+username@instance&gt; userpermissions
+System permissions:
+
+Table permissions (accumulo.metadata): Table.READ
+username@instance&gt;
+</code></pre></div></div>
+
+<p>A user does not by default have permission to create a table.</p>
+
+<h2 id="granting-permissions-to-a-user">Granting permissions to a user</h2>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>username@instance&gt; user root
+Enter password for user root: ********
+root@instance&gt; grant -s System.CREATE_TABLE -u username
+root@instance&gt; user username
+Enter password for user username: ********
+username@instance&gt; createtable vistest
+username@instance&gt; userpermissions
+System permissions: System.CREATE_TABLE
+
+Table permissions (accumulo.metadata): Table.READ
+Table permissions (vistest): Table.READ, Table.WRITE, Table.BULK_IMPORT, Table.ALTER_TABLE, Table.GRANT, Table.DROP_TABLE
+username@instance vistest&gt;
+</code></pre></div></div>
+
+<h2 id="inserting-data-with-visibilities">Inserting data with visibilities</h2>
+
+<p>Visibilities are boolean AND (&amp;) and OR (|) combinations of authorization
+tokens. Authorization tokens are arbitrary strings taken from a restricted
+ASCII character set. Parentheses are required to specify order of operations
+in visibilities.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>username@instance vistest&gt; insert row f1 q1 v1 -l A
+username@instance vistest&gt; insert row f2 q2 v2 -l A&amp;B
+username@instance vistest&gt; insert row f3 q3 v3 -l apple&amp;carrot|broccoli|spinach
+06 11:19:01,432 [shell.Shell] ERROR: org.apache.accumulo.core.util.BadArgumentException: cannot mix | and &amp; near index 12
+apple&amp;carrot|broccoli|spinach
+            ^
+username@instance vistest&gt; insert row f3 q3 v3 -l (apple&amp;carrot)|broccoli|spinach
+username@instance vistest&gt;
+</code></pre></div></div>
+
+<h2 id="scanning-with-authorizations">Scanning with authorizations</h2>
+
+<p>Authorizations are sets of authorization tokens. Each Accumulo user has
+authorizations and each Accumulo scan has authorizations. Scan authorizations
+are only allowed to be a subset of the user’s authorizations. By default, a
+user’s authorizations set is empty.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>username@instance vistest&gt; scan
+username@instance vistest&gt; scan -s A
+06 11:43:14,951 [shell.Shell] ERROR: java.lang.RuntimeException: org.apache.accumulo.core.client.AccumuloSecurityException: Error BAD_AUTHORIZATIONS - The user does not have the specified authorizations assigned
+username@instance vistest&gt;
+</code></pre></div></div>
+
+<h2 id="setting-authorizations-for-a-user">Setting authorizations for a user</h2>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>username@instance vistest&gt; setauths -s A
+06 11:53:42,056 [shell.Shell] ERROR: org.apache.accumulo.core.client.AccumuloSecurityException: Error PERMISSION_DENIED - User does not have permission to perform this action
+username@instance vistest&gt;
+</code></pre></div></div>
+
+<p>A user cannot set authorizations unless the user has the System.ALTER_USER permission.
+The root user has this permission.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>username@instance vistest&gt; user root
+Enter password for user root: ********
+root@instance vistest&gt; setauths -s A -u username
+root@instance vistest&gt; user username
+Enter password for user username: ********
+username@instance vistest&gt; scan -s A
+row f1:q1 [A]    v1
+username@instance vistest&gt; scan
+row f1:q1 [A]    v1
+username@instance vistest&gt;
+</code></pre></div></div>
+
+<p>The default authorizations for a scan are the user’s entire set of authorizations.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>username@instance vistest&gt; user root
+Enter password for user root: ********
+root@instance vistest&gt; setauths -s A,B,broccoli -u username
+root@instance vistest&gt; user username
+Enter password for user username: ********
+username@instance vistest&gt; scan
+row f1:q1 [A]    v1
+row f2:q2 [A&amp;B]    v2
+row f3:q3 [(apple&amp;carrot)|broccoli|spinach]    v3
+username@instance vistest&gt; scan -s B
+username@instance vistest&gt;
+</code></pre></div></div>
+
+<p>If you want, you can limit a user to only be able to insert data which they can read themselves.
+It can be set with the following constraint.</p>
+
+<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>username@instance vistest&gt; user root
+Enter password for user root: ******
+root@instance vistest&gt; config -t vistest -s table.constraint.1=org.apache.accumulo.core.security.VisibilityConstraint
+root@instance vistest&gt; user username
+Enter password for user username: ********
+username@instance vistest&gt; insert row f4 q4 v4 -l spinach
+    Constraint Failures:
+        ConstraintViolationSummary(constrainClass:org.apache.accumulo.core.security.VisibilityConstraint, violationCode:2, violationDescription:User does not have authorization on column visibility, numberOfViolatingMutations:1)
+username@instance vistest&gt; insert row f4 q4 v4 -l spinach|broccoli
+username@instance vistest&gt; scan
+row f1:q1 [A]    v1
+row f2:q2 [A&amp;B]    v2
+row f3:q3 [(apple&amp;carrot)|broccoli|spinach]    v3
+row f4:q4 [spinach|broccoli]    v4
+username@instance vistest&gt;
+</code></pre></div></div>
 
 
         </div>
diff --git a/1.9/examples/visibility.md b/1.9/examples/visibility.md
deleted file mode 100644
index b766dba..0000000
--- a/1.9/examples/visibility.md
+++ /dev/null
@@ -1,131 +0,0 @@
-Title: Apache Accumulo Visibility, Authorizations, and Permissions Example
-Notice:    Licensed to the Apache Software Foundation (ASF) under one
-           or more contributor license agreements.  See the NOTICE file
-           distributed with this work for additional information
-           regarding copyright ownership.  The ASF licenses this file
-           to you under the Apache License, Version 2.0 (the
-           "License"); you may not use this file except in compliance
-           with the License.  You may obtain a copy of the License at
-           .
-             http://www.apache.org/licenses/LICENSE-2.0
-           .
-           Unless required by applicable law or agreed to in writing,
-           software distributed under the License is distributed on an
-           "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-           KIND, either express or implied.  See the License for the
-           specific language governing permissions and limitations
-           under the License.
-
-## Creating a new user
-
-    root@instance> createuser username
-    Enter new password for 'username': ********
-    Please confirm new password for 'username': ********
-    root@instance> user username
-    Enter password for user username: ********
-    username@instance> createtable vistest
-    06 10:48:47,931 [shell.Shell] ERROR: org.apache.accumulo.core.client.AccumuloSecurityException: Error PERMISSION_DENIED - User does not have permission to perform this action
-    username@instance> userpermissions
-    System permissions:
-
-    Table permissions (accumulo.metadata): Table.READ
-    username@instance>
-
-A user does not by default have permission to create a table.
-
-## Granting permissions to a user
-
-    username@instance> user root
-    Enter password for user root: ********
-    root@instance> grant -s System.CREATE_TABLE -u username
-    root@instance> user username
-    Enter password for user username: ********
-    username@instance> createtable vistest
-    username@instance> userpermissions
-    System permissions: System.CREATE_TABLE
-
-    Table permissions (accumulo.metadata): Table.READ
-    Table permissions (vistest): Table.READ, Table.WRITE, Table.BULK_IMPORT, Table.ALTER_TABLE, Table.GRANT, Table.DROP_TABLE
-    username@instance vistest>
-
-## Inserting data with visibilities
-
-Visibilities are boolean AND (&) and OR (|) combinations of authorization
-tokens. Authorization tokens are arbitrary strings taken from a restricted
-ASCII character set. Parentheses are required to specify order of operations
-in visibilities.
-
-    username@instance vistest> insert row f1 q1 v1 -l A
-    username@instance vistest> insert row f2 q2 v2 -l A&B
-    username@instance vistest> insert row f3 q3 v3 -l apple&carrot|broccoli|spinach
-    06 11:19:01,432 [shell.Shell] ERROR: org.apache.accumulo.core.util.BadArgumentException: cannot mix | and & near index 12
-    apple&carrot|broccoli|spinach
-                ^
-    username@instance vistest> insert row f3 q3 v3 -l (apple&carrot)|broccoli|spinach
-    username@instance vistest>
-
-## Scanning with authorizations
-
-Authorizations are sets of authorization tokens. Each Accumulo user has
-authorizations and each Accumulo scan has authorizations. Scan authorizations
-are only allowed to be a subset of the user's authorizations. By default, a
-user's authorizations set is empty.
-
-    username@instance vistest> scan
-    username@instance vistest> scan -s A
-    06 11:43:14,951 [shell.Shell] ERROR: java.lang.RuntimeException: org.apache.accumulo.core.client.AccumuloSecurityException: Error BAD_AUTHORIZATIONS - The user does not have the specified authorizations assigned
-    username@instance vistest>
-
-## Setting authorizations for a user
-
-    username@instance vistest> setauths -s A
-    06 11:53:42,056 [shell.Shell] ERROR: org.apache.accumulo.core.client.AccumuloSecurityException: Error PERMISSION_DENIED - User does not have permission to perform this action
-    username@instance vistest>
-
-A user cannot set authorizations unless the user has the System.ALTER_USER permission.
-The root user has this permission.
-
-    username@instance vistest> user root
-    Enter password for user root: ********
-    root@instance vistest> setauths -s A -u username
-    root@instance vistest> user username
-    Enter password for user username: ********
-    username@instance vistest> scan -s A
-    row f1:q1 [A]    v1
-    username@instance vistest> scan
-    row f1:q1 [A]    v1
-    username@instance vistest>
-
-The default authorizations for a scan are the user's entire set of authorizations.
-
-    username@instance vistest> user root
-    Enter password for user root: ********
-    root@instance vistest> setauths -s A,B,broccoli -u username
-    root@instance vistest> user username
-    Enter password for user username: ********
-    username@instance vistest> scan
-    row f1:q1 [A]    v1
-    row f2:q2 [A&B]    v2
-    row f3:q3 [(apple&carrot)|broccoli|spinach]    v3
-    username@instance vistest> scan -s B
-    username@instance vistest>
-
-If you want, you can limit a user to only be able to insert data which they can read themselves.
-It can be set with the following constraint.
-
-    username@instance vistest> user root
-    Enter password for user root: ******
-    root@instance vistest> config -t vistest -s table.constraint.1=org.apache.accumulo.core.security.VisibilityConstraint
-    root@instance vistest> user username
-    Enter password for user username: ********
-    username@instance vistest> insert row f4 q4 v4 -l spinach
-        Constraint Failures:
-            ConstraintViolationSummary(constrainClass:org.apache.accumulo.core.security.VisibilityConstraint, violationCode:2, violationDescription:User does not have authorization on column visibility, numberOfViolatingMutations:1)
-    username@instance vistest> insert row f4 q4 v4 -l spinach|broccoli
-    username@instance vistest> scan
-    row f1:q1 [A]    v1
-    row f2:q2 [A&B]    v2
-    row f3:q3 [(apple&carrot)|broccoli|spinach]    v3
-    row f4:q4 [spinach|broccoli]    v4
-    username@instance vistest>
-
diff --git a/feed.xml b/feed.xml
index 9614d7b..7f9dbb8 100644
--- a/feed.xml
+++ b/feed.xml
@@ -6,8 +6,8 @@
 </description>
     <link>https://accumulo.apache.org/</link>
     <atom:link href="https://accumulo.apache.org/feed.xml" rel="self" type="application/rss+xml"/>
-    <pubDate>Thu, 11 Apr 2019 20:09:10 -0400</pubDate>
-    <lastBuildDate>Thu, 11 Apr 2019 20:09:10 -0400</lastBuildDate>
+    <pubDate>Thu, 11 Apr 2019 21:44:37 -0400</pubDate>
+    <lastBuildDate>Thu, 11 Apr 2019 21:44:37 -0400</lastBuildDate>
     <generator>Jekyll v3.7.3</generator>
     
     
diff --git a/redirects.json b/redirects.json
index 47e1f2a..9c19363 100644
--- a/redirects.json
+++ b/redirects.json
@@ -1 +1 @@
-{"/release_notes/1.5.1.html":"https://accumulo.apache.org/release/accumulo-1.5.1/","/release_notes/1.6.0.html":"https://accumulo.apache.org/release/accumulo-1.6.0/","/release_notes/1.6.1.html":"https://accumulo.apache.org/release/accumulo-1.6.1/","/release_notes/1.6.2.html":"https://accumulo.apache.org/release/accumulo-1.6.2/","/release_notes/1.7.0.html":"https://accumulo.apache.org/release/accumulo-1.7.0/","/release_notes/1.5.3.html":"https://accumulo.apache.org/release/accumulo-1.5.3/" [...]
\ No newline at end of file
+{"/release_notes/1.5.1.html":"https://accumulo.apache.org/release/accumulo-1.5.1/","/release_notes/1.6.0.html":"https://accumulo.apache.org/release/accumulo-1.6.0/","/release_notes/1.6.1.html":"https://accumulo.apache.org/release/accumulo-1.6.1/","/release_notes/1.6.2.html":"https://accumulo.apache.org/release/accumulo-1.6.2/","/release_notes/1.7.0.html":"https://accumulo.apache.org/release/accumulo-1.7.0/","/release_notes/1.5.3.html":"https://accumulo.apache.org/release/accumulo-1.5.3/" [...]
\ No newline at end of file