You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ct...@apache.org on 2016/01/09 04:38:17 UTC

[14/19] accumulo git commit: Merge branch 'javadoc-jdk8-1.6' into javadoc-jdk8-1.7

Merge branch 'javadoc-jdk8-1.6' into javadoc-jdk8-1.7

* Merge to 1.7 branch, with additional javadoc fixes so build works
* Prevent merging maven-plugin-plugin version 3.4 specification (as it only applied to 1.6 branch)


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/6becfbd3
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/6becfbd3
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/6becfbd3

Branch: refs/heads/1.7
Commit: 6becfbd3852dc10f46658827d064f7d1e9ee6c45
Parents: d505843 c8c0cf7
Author: Christopher Tubbs <ct...@apache.org>
Authored: Fri Jan 8 22:04:57 2016 -0500
Committer: Christopher Tubbs <ct...@apache.org>
Committed: Fri Jan 8 22:04:57 2016 -0500

----------------------------------------------------------------------
 .../core/bloomfilter/DynamicBloomFilter.java    |  4 +--
 .../accumulo/core/client/BatchWriterConfig.java | 10 +++---
 .../core/client/ConditionalWriterConfig.java    |  4 +--
 .../client/mapred/AccumuloFileOutputFormat.java |  4 +--
 .../mapreduce/AccumuloFileOutputFormat.java     |  4 +--
 .../lib/impl/FileOutputConfigurator.java        |  4 +--
 .../lib/util/FileOutputConfigurator.java        |  4 +--
 .../security/tokens/AuthenticationToken.java    |  2 +-
 .../core/constraints/VisibilityConstraint.java  |  1 -
 .../java/org/apache/accumulo/core/data/Key.java |  2 +-
 .../org/apache/accumulo/core/data/Range.java    |  6 ++--
 .../file/blockfile/cache/CachedBlockQueue.java  |  2 +-
 .../core/file/blockfile/cache/ClassSize.java    |  4 +--
 .../accumulo/core/file/rfile/bcfile/Utils.java  | 35 +++++++++++---------
 .../user/WholeColumnFamilyIterator.java         |  4 +--
 .../core/metadata/ServicerForMetadataTable.java |  2 +-
 .../core/metadata/ServicerForRootTable.java     |  2 +-
 .../core/metadata/ServicerForUserTables.java    |  2 +-
 .../core/metadata/schema/MetadataSchema.java    |  2 +-
 .../core/replication/ReplicationSchema.java     |  6 ++--
 .../core/security/ColumnVisibility.java         |  8 ++---
 .../security/crypto/CryptoModuleParameters.java |  7 +---
 .../accumulo/core/conf/config-header.html       | 12 +++----
 .../examples/simple/filedata/ChunkCombiner.java | 18 +++++-----
 pom.xml                                         | 26 +++++++++++++++
 .../apache/accumulo/server/ServerConstants.java |  2 +-
 .../server/master/balancer/GroupBalancer.java   |  4 +--
 .../master/balancer/RegexGroupBalancer.java     |  6 ++--
 .../server/security/SecurityOperation.java      |  6 ++--
 .../server/security/UserImpersonation.java      |  2 +-
 .../server/security/SystemCredentialsTest.java  |  2 +-
 .../replication/SequentialWorkAssigner.java     |  2 +-
 .../monitor/servlets/DefaultServlet.java        |  2 +-
 .../monitor/servlets/ReplicationServlet.java    |  2 +-
 .../monitor/servlets/TablesServlet.java         |  4 +--
 .../tserver/compaction/CompactionStrategy.java  |  6 ++--
 .../test/replication/merkle/package-info.java   |  9 ++---
 .../replication/merkle/skvi/DigestIterator.java |  2 +-
 38 files changed, 124 insertions(+), 100 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/6becfbd3/core/src/main/java/org/apache/accumulo/core/client/BatchWriterConfig.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/client/BatchWriterConfig.java
index 6ceefad,320ecf4..3421f76
--- a/core/src/main/java/org/apache/accumulo/core/client/BatchWriterConfig.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/BatchWriterConfig.java
@@@ -49,10 -48,8 +49,10 @@@ public class BatchWriterConfig implemen
    private static final Integer DEFAULT_MAX_WRITE_THREADS = 3;
    private Integer maxWriteThreads = null;
  
 +  private Durability durability = Durability.DEFAULT;
 +
    /**
-    * Sets the maximum memory to batch before writing. The smaller this value, the more frequently the {@link BatchWriter} will write.<br />
+    * Sets the maximum memory to batch before writing. The smaller this value, the more frequently the {@link BatchWriter} will write.<br>
     * If set to a value smaller than a single mutation, then it will {@link BatchWriter#flush()} after each added mutation. Must be non-negative.
     *
     * <p>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6becfbd3/core/src/main/java/org/apache/accumulo/core/client/ConditionalWriterConfig.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6becfbd3/core/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloFileOutputFormat.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6becfbd3/core/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloFileOutputFormat.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6becfbd3/core/src/main/java/org/apache/accumulo/core/constraints/VisibilityConstraint.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/constraints/VisibilityConstraint.java
index 91bc22f,0000000..648d044
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/constraints/VisibilityConstraint.java
+++ b/core/src/main/java/org/apache/accumulo/core/constraints/VisibilityConstraint.java
@@@ -1,93 -1,0 +1,92 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.constraints;
 +
 +import static java.nio.charset.StandardCharsets.UTF_8;
 +
 +import java.util.Collections;
 +import java.util.HashSet;
 +import java.util.List;
 +
 +import org.apache.accumulo.core.data.ColumnUpdate;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.security.ColumnVisibility;
 +import org.apache.accumulo.core.security.VisibilityEvaluator;
 +import org.apache.accumulo.core.security.VisibilityParseException;
 +import org.apache.accumulo.core.util.BadArgumentException;
 +
 +/**
 + * A constraint that checks the visibility of columns against the actor's authorizations. Violation codes:
-  * <p>
 + * <ul>
 + * <li>1 = failure to parse visibility expression</li>
 + * <li>2 = insufficient authorization</li>
 + * </ul>
 + */
 +public class VisibilityConstraint implements Constraint {
 +
 +  @Override
 +  public String getViolationDescription(short violationCode) {
 +    switch (violationCode) {
 +      case 1:
 +        return "Malformed column visibility";
 +      case 2:
 +        return "User does not have authorization on column visibility";
 +    }
 +
 +    return null;
 +  }
 +
 +  @Override
 +  public List<Short> check(Environment env, Mutation mutation) {
 +    List<ColumnUpdate> updates = mutation.getUpdates();
 +
 +    HashSet<String> ok = null;
 +    if (updates.size() > 1)
 +      ok = new HashSet<String>();
 +
 +    VisibilityEvaluator ve = null;
 +
 +    for (ColumnUpdate update : updates) {
 +
 +      byte[] cv = update.getColumnVisibility();
 +      if (cv.length > 0) {
 +        String key = null;
 +        if (ok != null && ok.contains(key = new String(cv, UTF_8)))
 +          continue;
 +
 +        try {
 +
 +          if (ve == null)
 +            ve = new VisibilityEvaluator(env.getAuthorizationsContainer());
 +
 +          if (!ve.evaluate(new ColumnVisibility(cv)))
 +            return Collections.singletonList(Short.valueOf((short) 2));
 +
 +        } catch (BadArgumentException bae) {
 +          return Collections.singletonList(new Short((short) 1));
 +        } catch (VisibilityParseException e) {
 +          return Collections.singletonList(new Short((short) 1));
 +        }
 +
 +        if (ok != null)
 +          ok.add(key);
 +      }
 +    }
 +
 +    return null;
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6becfbd3/core/src/main/java/org/apache/accumulo/core/data/Key.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/data/Key.java
index f88ddaa,f605c98..758436d
--- a/core/src/main/java/org/apache/accumulo/core/data/Key.java
+++ b/core/src/main/java/org/apache/accumulo/core/data/Key.java
@@@ -786,23 -660,6 +786,23 @@@ public class Key implements WritableCom
      return appendPrintableString(ba, offset, len, maxLen, new StringBuilder()).toString();
    }
  
 +  /**
 +   * Appends ASCII printable characters to a string, based on the given byte array, treating the bytes as ASCII characters. If a byte can be converted to a
-    * ASCII printable character it is appended as is; otherwise, it is appended as a character code, e.g., %05; for byte value 5. If len > maxlen, the string
++   * ASCII printable character it is appended as is; otherwise, it is appended as a character code, e.g., %05; for byte value 5. If len &gt; maxlen, the string
 +   * includes a "TRUNCATED" note at the end.
 +   *
 +   * @param ba
 +   *          byte array
 +   * @param offset
 +   *          offset to start with in byte array (inclusive)
 +   * @param len
 +   *          number of bytes to print
 +   * @param maxLen
 +   *          maximum number of bytes to convert to printable form
 +   * @param sb
 +   *          <code>StringBuilder</code> to append to
 +   * @return given <code>StringBuilder</code>
 +   */
    public static StringBuilder appendPrintableString(byte ba[], int offset, int len, int maxLen, StringBuilder sb) {
      int plen = Math.min(len, maxLen);
  

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6becfbd3/core/src/main/java/org/apache/accumulo/core/data/Range.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/data/Range.java
index 0fcfee6,7ccfe3d..c114e2b
--- a/core/src/main/java/org/apache/accumulo/core/data/Range.java
+++ b/core/src/main/java/org/apache/accumulo/core/data/Range.java
@@@ -555,17 -506,14 +555,17 @@@ public class Range implements WritableC
    }
  
    /**
-    * Creates a new range that is bounded by the columns passed in. The start key in the returned range will have a column >= to the minimum column. The end key
-    * in the returned range will have a column <= the max column.
 -   * Creates a new range that is bounded by the columns passed in. The stary key in the returned range will have a column &gt;= to the minimum column. The end
++   * Creates a new range that is bounded by the columns passed in. The start key in the returned range will have a column &gt;= to the minimum column. The end
+    * key in the returned range will have a column &lt;= the max column.
     *
 +   * @param min
 +   *          minimum column
 +   * @param max
 +   *          maximum column
     * @return a column bounded range
     * @throws IllegalArgumentException
 -   *           if min &gt; max
 +   *           if the minimum column compares greater than the maximum column
     */
 -
    public Range bound(Column min, Column max) {
  
      if (min.compareTo(max) > 0) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6becfbd3/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/CachedBlockQueue.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6becfbd3/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/ClassSize.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6becfbd3/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/Utils.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6becfbd3/core/src/main/java/org/apache/accumulo/core/iterators/user/WholeColumnFamilyIterator.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6becfbd3/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForMetadataTable.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/metadata/ServicerForMetadataTable.java
index 525e2a2,af48770..5a96c20
--- a/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForMetadataTable.java
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForMetadataTable.java
@@@ -16,10 -16,11 +16,10 @@@
   */
  package org.apache.accumulo.core.metadata;
  
 -import org.apache.accumulo.core.client.Instance;
 -import org.apache.accumulo.core.security.Credentials;
 +import org.apache.accumulo.core.client.impl.ClientContext;
  
  /**
-  * A metadata servicer for the metadata table (which holds metadata for user tables).<br />
+  * A metadata servicer for the metadata table (which holds metadata for user tables).<br>
   * The metadata table's metadata is serviced in the root table.
   */
  class ServicerForMetadataTable extends TableMetadataServicer {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6becfbd3/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForRootTable.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/metadata/ServicerForRootTable.java
index 73a943d,b279d01..32b5824
--- a/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForRootTable.java
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForRootTable.java
@@@ -22,11 -22,11 +22,11 @@@ import org.apache.accumulo.core.client.
  import org.apache.accumulo.core.client.AccumuloSecurityException;
  import org.apache.accumulo.core.client.Instance;
  import org.apache.accumulo.core.client.TableNotFoundException;
 -import org.apache.accumulo.core.data.KeyExtent;
 -import org.apache.accumulo.core.security.Credentials;
 +import org.apache.accumulo.core.client.impl.ClientContext;
 +import org.apache.accumulo.core.data.impl.KeyExtent;
  
  /**
-  * A metadata servicer for the root table.<br />
+  * A metadata servicer for the root table.<br>
   * The root table's metadata is serviced in zookeeper.
   */
  class ServicerForRootTable extends MetadataServicer {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6becfbd3/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForUserTables.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/metadata/ServicerForUserTables.java
index 5efa8a6,607dfbd..73f9188
--- a/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForUserTables.java
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForUserTables.java
@@@ -16,10 -16,11 +16,10 @@@
   */
  package org.apache.accumulo.core.metadata;
  
 -import org.apache.accumulo.core.client.Instance;
 -import org.apache.accumulo.core.security.Credentials;
 +import org.apache.accumulo.core.client.impl.ClientContext;
  
  /**
-  * A metadata servicer for user tables.<br />
+  * A metadata servicer for user tables.<br>
   * Metadata for user tables are serviced in the metadata table.
   */
  class ServicerForUserTables extends TableMetadataServicer {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6becfbd3/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java
index 6baae17,f20fce1..3970c49
--- a/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java
@@@ -227,55 -233,4 +227,55 @@@ public class MetadataSchema 
  
    }
  
 +  /**
 +   * Holds references to files that need replication
 +   * <p>
-    * <code>~replhdfs://localhost:8020/accumulo/wal/tserver+port/WAL stat:local_table_id [] -> protobuf</code>
++   * <code>~replhdfs://localhost:8020/accumulo/wal/tserver+port/WAL stat:local_table_id [] -&gt; protobuf</code>
 +   */
 +  public static class ReplicationSection {
 +    public static final Text COLF = new Text("stat");
 +    private static final ArrayByteSequence COLF_BYTE_SEQ = new ArrayByteSequence(COLF.toString());
 +    private static final Section section = new Section(RESERVED_PREFIX + "repl", true, RESERVED_PREFIX + "repm", false);
 +
 +    public static Range getRange() {
 +      return section.getRange();
 +    }
 +
 +    public static String getRowPrefix() {
 +      return section.getRowPrefix();
 +    }
 +
 +    /**
 +     * Extract the table ID from the colfam into the given {@link Text}
 +     *
 +     * @param k
 +     *          Key to extract from
 +     * @param buff
 +     *          Text to place table ID into
 +     */
 +    public static void getTableId(Key k, Text buff) {
 +      Preconditions.checkNotNull(k);
 +      Preconditions.checkNotNull(buff);
 +
 +      k.getColumnQualifier(buff);
 +    }
 +
 +    /**
 +     * Extract the file name from the row suffix into the given {@link Text}
 +     *
 +     * @param k
 +     *          Key to extract from
 +     * @param buff
 +     *          Text to place file name into
 +     */
 +    public static void getFile(Key k, Text buff) {
 +      Preconditions.checkNotNull(k);
 +      Preconditions.checkNotNull(buff);
 +      Preconditions.checkArgument(COLF_BYTE_SEQ.equals(k.getColumnFamilyData()), "Given metadata replication status key with incorrect colfam");
 +
 +      k.getRow(buff);
 +
 +      buff.set(buff.getBytes(), section.getRowPrefix().length(), buff.getLength() - section.getRowPrefix().length());
 +    }
 +  }
  }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6becfbd3/core/src/main/java/org/apache/accumulo/core/replication/ReplicationSchema.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/replication/ReplicationSchema.java
index ed46130,0000000..b352957
mode 100644,000000..100644
--- a/core/src/main/java/org/apache/accumulo/core/replication/ReplicationSchema.java
+++ b/core/src/main/java/org/apache/accumulo/core/replication/ReplicationSchema.java
@@@ -1,299 -1,0 +1,299 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.core.replication;
 +
 +import static java.nio.charset.StandardCharsets.UTF_8;
 +
 +import java.nio.charset.CharacterCodingException;
 +
 +import org.apache.accumulo.core.client.ScannerBase;
 +import org.apache.accumulo.core.client.lexicoder.ULongLexicoder;
 +import org.apache.accumulo.core.data.ArrayByteSequence;
 +import org.apache.accumulo.core.data.ByteSequence;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.hadoop.io.Text;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +import com.google.common.base.Preconditions;
 +
 +/**
 + *
 + */
 +public class ReplicationSchema {
 +  private static final Logger log = LoggerFactory.getLogger(ReplicationSchema.class);
 +
 +  /**
 +   * Portion of a file that must be replication to the given target: peer and some identifying location on that peer, e.g. remote table ID
 +   * <p>
-    * <code>hdfs://localhost:8020/accumulo/wal/tserver+port/WAL work:serialized_ReplicationTarget [] -> Status Protobuf</code>
++   * <code>hdfs://localhost:8020/accumulo/wal/tserver+port/WAL work:serialized_ReplicationTarget [] -&gt; Status Protobuf</code>
 +   */
 +  public static class WorkSection {
 +    public static final Text NAME = new Text("work");
 +    private static final ByteSequence BYTE_SEQ_NAME = new ArrayByteSequence("work");
 +
 +    public static void getFile(Key k, Text buff) {
 +      Preconditions.checkNotNull(k);
 +      Preconditions.checkNotNull(buff);
 +      Preconditions.checkArgument(BYTE_SEQ_NAME.equals(k.getColumnFamilyData()), "Given replication work key with incorrect colfam");
 +      _getFile(k, buff);
 +    }
 +
 +    public static ReplicationTarget getTarget(Key k) {
 +      return getTarget(k, new Text());
 +    }
 +
 +    public static ReplicationTarget getTarget(Key k, Text buff) {
 +      Preconditions.checkArgument(BYTE_SEQ_NAME.equals(k.getColumnFamilyData()), "Given replication work key with incorrect colfam");
 +      k.getColumnQualifier(buff);
 +
 +      return ReplicationTarget.from(buff);
 +    }
 +
 +    /**
 +     * Limit the scanner to only pull replication work records
 +     */
 +    public static void limit(ScannerBase scanner) {
 +      scanner.fetchColumnFamily(NAME);
 +    }
 +
 +    public static Mutation add(Mutation m, Text serializedTarget, Value v) {
 +      m.put(NAME, serializedTarget, v);
 +      return m;
 +    }
 +  }
 +
 +  /**
 +   * Holds replication markers tracking status for files
 +   * <p>
-    * <code>hdfs://localhost:8020/accumulo/wal/tserver+port/WAL repl:local_table_id [] -> Status Protobuf</code>
++   * <code>hdfs://localhost:8020/accumulo/wal/tserver+port/WAL repl:local_table_id [] -&gt; Status Protobuf</code>
 +   */
 +  public static class StatusSection {
 +    public static final Text NAME = new Text("repl");
 +    private static final ByteSequence BYTE_SEQ_NAME = new ArrayByteSequence("repl");
 +
 +    /**
 +     * Extract the table ID from the key (inefficiently if called repeatedly)
 +     *
 +     * @param k
 +     *          Key to extract from
 +     * @return The table ID
 +     * @see #getTableId(Key,Text)
 +     */
 +    public static String getTableId(Key k) {
 +      Text buff = new Text();
 +      getTableId(k, buff);
 +      return buff.toString();
 +    }
 +
 +    /**
 +     * Extract the table ID from the key into the given {@link Text}
 +     *
 +     * @param k
 +     *          Key to extract from
 +     * @param buff
 +     *          Text to place table ID into
 +     */
 +    public static void getTableId(Key k, Text buff) {
 +      Preconditions.checkNotNull(k);
 +      Preconditions.checkNotNull(buff);
 +
 +      k.getColumnQualifier(buff);
 +    }
 +
 +    /**
 +     * Extract the file name from the row suffix into the given {@link Text}
 +     *
 +     * @param k
 +     *          Key to extract from
 +     * @param buff
 +     *          Text to place file name into
 +     */
 +    public static void getFile(Key k, Text buff) {
 +      Preconditions.checkNotNull(k);
 +      Preconditions.checkNotNull(buff);
 +      Preconditions.checkArgument(BYTE_SEQ_NAME.equals(k.getColumnFamilyData()), "Given replication status key with incorrect colfam");
 +
 +      _getFile(k, buff);
 +    }
 +
 +    /**
 +     * Limit the scanner to only return Status records
 +     */
 +    public static void limit(ScannerBase scanner) {
 +      scanner.fetchColumnFamily(NAME);
 +    }
 +
 +    public static Mutation add(Mutation m, Text tableId, Value v) {
 +      m.put(NAME, tableId, v);
 +      return m;
 +    }
 +  }
 +
 +  /**
 +   * Holds the order in which files needed for replication were closed. The intent is to be able to guarantee that files which were closed earlier were
 +   * replicated first and we don't replay data in the wrong order on our peers
 +   * <p>
-    * <code>encodedTimeOfClosure\x00hdfs://localhost:8020/accumulo/wal/tserver+port/WAL order:source_table_id [] -> Status Protobuf</code>
++   * <code>encodedTimeOfClosure\x00hdfs://localhost:8020/accumulo/wal/tserver+port/WAL order:source_table_id [] -&gt; Status Protobuf</code>
 +   */
 +  public static class OrderSection {
 +    public static final Text NAME = new Text("order");
 +    public static final Text ROW_SEPARATOR = new Text(new byte[] {0});
 +    private static final ULongLexicoder longEncoder = new ULongLexicoder();
 +
 +    /**
 +     * Extract the table ID from the given key (inefficiently if called repeatedly)
 +     *
 +     * @param k
 +     *          OrderSection Key
 +     * @return source table id
 +     */
 +    public static String getTableId(Key k) {
 +      Text buff = new Text();
 +      getTableId(k, buff);
 +      return buff.toString();
 +    }
 +
 +    /**
 +     * Extract the table ID from the given key
 +     *
 +     * @param k
 +     *          OrderSection key
 +     * @param buff
 +     *          Text to place table ID into
 +     */
 +    public static void getTableId(Key k, Text buff) {
 +      Preconditions.checkNotNull(k);
 +      Preconditions.checkNotNull(buff);
 +
 +      k.getColumnQualifier(buff);
 +    }
 +
 +    /**
 +     * Limit the scanner to only return Order records
 +     */
 +    public static void limit(ScannerBase scanner) {
 +      scanner.fetchColumnFamily(NAME);
 +    }
 +
 +    /**
 +     * Creates the Mutation for the Order section for the given file and time
 +     *
 +     * @param file
 +     *          Filename
 +     * @param timeInMillis
 +     *          Time in millis that the file was closed
 +     * @return Mutation for the Order section
 +     */
 +    public static Mutation createMutation(String file, long timeInMillis) {
 +      Preconditions.checkNotNull(file);
 +      Preconditions.checkArgument(timeInMillis >= 0, "timeInMillis must be greater than zero");
 +
 +      // Encode the time so it sorts properly
 +      byte[] rowPrefix = longEncoder.encode(timeInMillis);
 +      Text row = new Text(rowPrefix);
 +
 +      // Normalize the file using Path
 +      Path p = new Path(file);
 +      String pathString = p.toUri().toString();
 +
 +      log.trace("Normalized {} into {}", file, pathString);
 +
 +      // Append the file as a suffix to the row
 +      row.append((ROW_SEPARATOR + pathString).getBytes(UTF_8), 0, pathString.length() + ROW_SEPARATOR.getLength());
 +
 +      // Make the mutation and add the column update
 +      return new Mutation(row);
 +    }
 +
 +    /**
 +     * Add a column update to the given mutation with the provided tableId and value
 +     *
 +     * @param m
 +     *          Mutation for OrderSection
 +     * @param tableId
 +     *          Source table id
 +     * @param v
 +     *          Serialized Status msg
 +     * @return The original Mutation
 +     */
 +    public static Mutation add(Mutation m, Text tableId, Value v) {
 +      m.put(NAME, tableId, v);
 +      return m;
 +    }
 +
 +    public static long getTimeClosed(Key k) {
 +      return getTimeClosed(k, new Text());
 +    }
 +
 +    public static long getTimeClosed(Key k, Text buff) {
 +      k.getRow(buff);
 +      int offset = 0;
 +      // find the last offset
 +      while (true) {
 +        int nextOffset = buff.find(ROW_SEPARATOR.toString(), offset + 1);
 +        if (-1 == nextOffset) {
 +          break;
 +        }
 +        offset = nextOffset;
 +      }
 +
 +      if (-1 == offset) {
 +        throw new IllegalArgumentException("Row does not contain expected separator for OrderSection");
 +      }
 +
 +      byte[] encodedLong = new byte[offset];
 +      System.arraycopy(buff.getBytes(), 0, encodedLong, 0, offset);
 +      return longEncoder.decode(encodedLong);
 +    }
 +
 +    public static String getFile(Key k) {
 +      Text buff = new Text();
 +      return getFile(k, buff);
 +    }
 +
 +    public static String getFile(Key k, Text buff) {
 +      k.getRow(buff);
 +      int offset = 0;
 +      // find the last offset
 +      while (true) {
 +        int nextOffset = buff.find(ROW_SEPARATOR.toString(), offset + 1);
 +        if (-1 == nextOffset) {
 +          break;
 +        }
 +        offset = nextOffset;
 +      }
 +
 +      if (-1 == offset) {
 +        throw new IllegalArgumentException("Row does not contain expected separator for OrderSection");
 +      }
 +
 +      try {
 +        return Text.decode(buff.getBytes(), offset + 1, buff.getLength() - (offset + 1));
 +      } catch (CharacterCodingException e) {
 +        throw new IllegalArgumentException("Could not decode file path", e);
 +      }
 +    }
 +  }
 +
 +  private static void _getFile(Key k, Text buff) {
 +    k.getRow(buff);
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6becfbd3/core/src/main/java/org/apache/accumulo/core/security/ColumnVisibility.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6becfbd3/core/src/main/resources/org/apache/accumulo/core/conf/config-header.html
----------------------------------------------------------------------
diff --cc core/src/main/resources/org/apache/accumulo/core/conf/config-header.html
index 9c770b1,8270ad2..49291fc
--- a/core/src/main/resources/org/apache/accumulo/core/conf/config-header.html
+++ b/core/src/main/resources/org/apache/accumulo/core/conf/config-header.html
@@@ -28,23 -28,23 +28,23 @@@
    below (from highest to lowest):</p>
    <table>
     <tr><th>Location</th><th>Description</th></tr>
--   <tr class='highlight'><td><b>Zookeeper<br/>table properties</b></td>
--       <td>Table properties are applied to the entire cluster when set in zookeeper using the accumulo API or shell.  While table properties take precedent over system properties, both will override properties set in accumulo-site.xml<br/><br/>
++   <tr class='highlight'><td><b>Zookeeper<br />table properties</b></td>
++       <td>Table properties are applied to the entire cluster when set in zookeeper using the accumulo API or shell.  While table properties take precedent over system properties, both will override properties set in accumulo-site.xml<br /><br />
             Table properties consist of all properties with the table.* prefix.  Table properties are configured on a per-table basis using the following shell commmand:
          <pre>config -t TABLE -s PROPERTY=VALUE</pre></td>
     </tr>
--   <tr><td><b>Zookeeper<br/>system properties</b></td>
++   <tr><td><b>Zookeeper<br />system properties</b></td>
        <td>System properties are applied to the entire cluster when set in zookeeper using the accumulo API or shell.  System properties consist of all properties with a 'yes' in the 'Zookeeper Mutable' column in the table below.  They are set with the following shell command:
          <pre>config -s PROPERTY=VALUE</pre>
--      If a table.* property is set using this method, the value will apply to all tables except those configured on per-table basis (which have higher precedence).<br/><br/>
++      If a table.* property is set using this method, the value will apply to all tables except those configured on per-table basis (which have higher precedence).<br /><br />
        While most system properties take effect immediately, some require a restart of the process which is indicated in 'Zookeeper Mutable'.</td>
     </tr>
     <tr class='highlight'><td><b>accumulo-site.xml</b></td>
--       <td>Accumulo processes (master, tserver, etc) read their local accumulo-site.xml on start up.  Therefore, changes made to accumulo-site.xml must rsynced across the cluster and processes must be restarted to apply changes.<br/><br/>
++       <td>Accumulo processes (master, tserver, etc) read their local accumulo-site.xml on start up.  Therefore, changes made to accumulo-site.xml must rsynced across the cluster and processes must be restarted to apply changes.<br /><br />
             Certain properties (indicated by a 'no' in 'Zookeeper Mutable') cannot be set in zookeeper and only set in this file.  The accumulo-site.xml also allows you to configure tablet servers with different settings.</td>
     </tr>
     <tr><td><b>Default</b></td>
--        <td>All properties have a default value in the source code.  This value has the lowest precedence and is overriden if set in accumulo-site.xml or zookeeper.<br/><br/>While the default value is usually optimal, there are cases where a change can increase query and ingest performance.</td>
++        <td>All properties have a default value in the source code.  This value has the lowest precedence and is overriden if set in accumulo-site.xml or zookeeper.<br /><br />While the default value is usually optimal, there are cases where a change can increase query and ingest performance.</td>
     </tr>
    </table>
  

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6becfbd3/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6becfbd3/server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6becfbd3/server/base/src/main/java/org/apache/accumulo/server/master/balancer/GroupBalancer.java
----------------------------------------------------------------------
diff --cc server/base/src/main/java/org/apache/accumulo/server/master/balancer/GroupBalancer.java
index fb4e0d9,0000000..9734528
mode 100644,000000..100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/GroupBalancer.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/GroupBalancer.java
@@@ -1,788 -1,0 +1,788 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.accumulo.server.master.balancer;
 +
 +import java.util.ArrayList;
 +import java.util.Collection;
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.Iterator;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import java.util.Objects;
 +import java.util.Set;
 +import java.util.SortedMap;
 +
 +import org.apache.accumulo.core.client.IsolatedScanner;
 +import org.apache.accumulo.core.client.RowIterator;
 +import org.apache.accumulo.core.client.Scanner;
 +import org.apache.accumulo.core.data.Key;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.data.impl.KeyExtent;
 +import org.apache.accumulo.core.master.thrift.TabletServerStatus;
 +import org.apache.accumulo.core.metadata.MetadataTable;
 +import org.apache.accumulo.core.metadata.schema.MetadataSchema;
 +import org.apache.accumulo.core.security.Authorizations;
 +import org.apache.accumulo.core.util.ComparablePair;
 +import org.apache.accumulo.core.util.MapCounter;
 +import org.apache.accumulo.core.util.Pair;
 +import org.apache.accumulo.server.master.state.TServerInstance;
 +import org.apache.accumulo.server.master.state.TabletMigration;
 +import org.apache.commons.lang.mutable.MutableInt;
 +import org.apache.hadoop.io.Text;
 +
 +import com.google.common.base.Function;
 +import com.google.common.base.Preconditions;
 +import com.google.common.collect.HashBasedTable;
 +import com.google.common.collect.HashMultimap;
 +import com.google.common.collect.Iterators;
 +import com.google.common.collect.Multimap;
 +import com.google.common.collect.Table;
 +
 +/**
 + * A balancer that evenly spreads groups of tablets across all tablet server. This balancer accomplishes the following two goals :
 + *
 + * <ul>
-  * <li/>Evenly spreads each group across all tservers.
-  * <li/>Minimizes the total number of groups on each tserver.
++ * <li>Evenly spreads each group across all tservers.
++ * <li>Minimizes the total number of groups on each tserver.
 + * </ul>
 + *
 + * <p>
 + * To use this balancer you must extend it and implement {@link #getPartitioner()}. See {@link RegexGroupBalancer} as an example.
 + */
 +
 +public abstract class GroupBalancer extends TabletBalancer {
 +
 +  private final String tableId;
 +  private final Text textTableId;
 +  private long lastRun = 0;
 +
 +  /**
 +   * @return A function that groups tablets into named groups.
 +   */
 +  protected abstract Function<KeyExtent,String> getPartitioner();
 +
 +  public GroupBalancer(String tableId) {
 +    this.tableId = tableId;
 +    this.textTableId = new Text(tableId);
 +  }
 +
 +  protected Iterable<Pair<KeyExtent,Location>> getLocationProvider() {
 +    return new MetadataLocationProvider();
 +  }
 +
 +  /**
 +   * The amount of time to wait between balancing.
 +   */
 +  protected long getWaitTime() {
 +    return 60000;
 +  }
 +
 +  /**
 +   * The maximum number of migrations to perform in a single pass.
 +   */
 +  protected int getMaxMigrations() {
 +    return 1000;
 +  }
 +
 +  /**
 +   * @return Examine current tserver and migrations and return true if balancing should occur.
 +   */
 +  protected boolean shouldBalance(SortedMap<TServerInstance,TabletServerStatus> current, Set<KeyExtent> migrations) {
 +
 +    if (current.size() < 2) {
 +      return false;
 +    }
 +
 +    for (KeyExtent keyExtent : migrations) {
 +      if (keyExtent.getTableId().equals(textTableId)) {
 +        return false;
 +      }
 +    }
 +
 +    return true;
 +  }
 +
 +  @Override
 +  public void getAssignments(SortedMap<TServerInstance,TabletServerStatus> current, Map<KeyExtent,TServerInstance> unassigned,
 +      Map<KeyExtent,TServerInstance> assignments) {
 +
 +    if (current.size() == 0) {
 +      return;
 +    }
 +
 +    Function<KeyExtent,String> partitioner = getPartitioner();
 +
 +    List<ComparablePair<String,KeyExtent>> tabletsByGroup = new ArrayList<>();
 +    for (Entry<KeyExtent,TServerInstance> entry : unassigned.entrySet()) {
 +      TServerInstance last = entry.getValue();
 +      if (last != null) {
 +        // Maintain locality
 +        String fakeSessionID = " ";
 +        TServerInstance simple = new TServerInstance(last.getLocation(), fakeSessionID);
 +        Iterator<TServerInstance> find = current.tailMap(simple).keySet().iterator();
 +        if (find.hasNext()) {
 +          TServerInstance tserver = find.next();
 +          if (tserver.host().equals(last.host())) {
 +            assignments.put(entry.getKey(), tserver);
 +            continue;
 +          }
 +        }
 +      }
 +
 +      tabletsByGroup.add(new ComparablePair<String,KeyExtent>(partitioner.apply(entry.getKey()), entry.getKey()));
 +    }
 +
 +    Collections.sort(tabletsByGroup);
 +
 +    Iterator<TServerInstance> tserverIter = Iterators.cycle(current.keySet());
 +    for (ComparablePair<String,KeyExtent> pair : tabletsByGroup) {
 +      KeyExtent ke = pair.getSecond();
 +      assignments.put(ke, tserverIter.next());
 +    }
 +
 +  }
 +
 +  @Override
 +  public long balance(SortedMap<TServerInstance,TabletServerStatus> current, Set<KeyExtent> migrations, List<TabletMigration> migrationsOut) {
 +
 +    // The terminology extra and expected are used in this code. Expected tablets is the number of tablets a tserver must have for a given group and is
 +    // numInGroup/numTservers. Extra tablets are any tablets more than the number expected for a given group. If numInGroup % numTservers > 0, then a tserver
 +    // may have one extra tablet for a group.
 +    //
 +    // Assume we have 4 tservers and group A has 11 tablets.
 +    // * expected tablets : group A is expected to have 2 tablets on each tservers
 +    // * extra tablets : group A may have an additional tablet on each tserver. Group A has a total of 3 extra tablets.
 +    //
 +    // This balancer also evens out the extra tablets across all groups. The terminology extraExpected and extraExtra is used to describe these tablets.
 +    // ExtraExpected is totalExtra/numTservers. ExtraExtra is totalExtra%numTservers. Each tserver should have at least expectedExtra extra tablets and at most
 +    // one extraExtra tablets. All extra tablets on a tserver must be from different groups.
 +    //
 +    // Assume we have 6 tservers and three groups (G1, G2, G3) with 9 tablets each. Each tserver is expected to have one tablet from each group and could
 +    // possibly have 2 tablets from a group. Below is an illustration of an ideal balancing of extra tablets. To understand the illustration, the first column
 +    // shows tserver T1 with 2 tablets from G1, 1 tablet from G2, and two tablets from G3. EE means empty, put it there so eclipse formating would not mess up
 +    // table.
 +    //
 +    // T1 | T2 | T3 | T4 | T5 | T6
 +    // ---+----+----+----+----+-----
 +    // G3 | G2 | G3 | EE | EE | EE <-- extra extra tablets
 +    // G1 | G1 | G1 | G2 | G3 | G2 <-- extra expected tablets.
 +    // G1 | G1 | G1 | G1 | G1 | G1 <-- expected tablets for group 1
 +    // G2 | G2 | G2 | G2 | G2 | G2 <-- expected tablets for group 2
 +    // G3 | G3 | G3 | G3 | G3 | G3 <-- expected tablets for group 3
 +    //
 +    // Do not want to balance the extra tablets like the following. There are two problem with this. First extra tablets are not evenly spread. Since there are
 +    // a total of 9 extra tablets, every tserver is expected to have at least one extra tablet. Second tserver T1 has two extra tablet for group G1. This
 +    // violates the principal that a tserver can only have one extra tablet for a given group.
 +    //
 +    // T1 | T2 | T3 | T4 | T5 | T6
 +    // ---+----+----+----+----+-----
 +    // G1 | EE | EE | EE | EE | EE <--- one extra tablets from group 1
 +    // G3 | G3 | G3 | EE | EE | EE <--- three extra tablets from group 3
 +    // G2 | G2 | G2 | EE | EE | EE <--- three extra tablets from group 2
 +    // G1 | G1 | EE | EE | EE | EE <--- two extra tablets from group 1
 +    // G1 | G1 | G1 | G1 | G1 | G1 <-- expected tablets for group 1
 +    // G2 | G2 | G2 | G2 | G2 | G2 <-- expected tablets for group 2
 +    // G3 | G3 | G3 | G3 | G3 | G3 <-- expected tablets for group 3
 +
 +    if (!shouldBalance(current, migrations)) {
 +      return 5000;
 +    }
 +
 +    if (System.currentTimeMillis() - lastRun < getWaitTime()) {
 +      return 5000;
 +    }
 +
 +    MapCounter<String> groupCounts = new MapCounter<>();
 +    Map<TServerInstance,TserverGroupInfo> tservers = new HashMap<>();
 +
 +    for (TServerInstance tsi : current.keySet()) {
 +      tservers.put(tsi, new TserverGroupInfo(tsi));
 +    }
 +
 +    Function<KeyExtent,String> partitioner = getPartitioner();
 +
 +    // collect stats about current state
 +    for (Pair<KeyExtent,Location> entry : getLocationProvider()) {
 +      String group = partitioner.apply(entry.getFirst());
 +      Location loc = entry.getSecond();
 +
 +      if (loc.equals(Location.NONE) || !tservers.containsKey(loc.getTserverInstance())) {
 +        return 5000;
 +      }
 +
 +      groupCounts.increment(group, 1);
 +      TserverGroupInfo tgi = tservers.get(loc.getTserverInstance());
 +      tgi.addGroup(group);
 +    }
 +
 +    Map<String,Integer> expectedCounts = new HashMap<>();
 +
 +    int totalExtra = 0;
 +    for (String group : groupCounts.keySet()) {
 +      long groupCount = groupCounts.get(group);
 +      totalExtra += groupCount % current.size();
 +      expectedCounts.put(group, (int) (groupCount / current.size()));
 +    }
 +
 +    // The number of extra tablets from all groups that each tserver must have.
 +    int expectedExtra = totalExtra / current.size();
 +    int maxExtraGroups = expectedExtra + 1;
 +
 +    expectedCounts = Collections.unmodifiableMap(expectedCounts);
 +    tservers = Collections.unmodifiableMap(tservers);
 +
 +    for (TserverGroupInfo tgi : tservers.values()) {
 +      tgi.finishedAdding(expectedCounts);
 +    }
 +
 +    Moves moves = new Moves();
 +
 +    // The order of the following steps is important, because as ordered each step should not move any tablets moved by a previous step.
 +    balanceExpected(tservers, moves);
 +    if (moves.size() < getMaxMigrations()) {
 +      balanceExtraExpected(tservers, expectedExtra, moves);
 +      if (moves.size() < getMaxMigrations()) {
 +        boolean cont = balanceExtraMultiple(tservers, maxExtraGroups, moves);
 +        if (cont && moves.size() < getMaxMigrations()) {
 +          balanceExtraExtra(tservers, maxExtraGroups, moves);
 +        }
 +      }
 +    }
 +
 +    populateMigrations(tservers.keySet(), migrationsOut, moves);
 +
 +    lastRun = System.currentTimeMillis();
 +
 +    return 5000;
 +  }
 +
 +  public static class Location {
 +    public static final Location NONE = new Location();
 +    private final TServerInstance tserverInstance;
 +
 +    public Location() {
 +      this(null);
 +    }
 +
 +    public Location(TServerInstance tsi) {
 +      tserverInstance = tsi;
 +    }
 +
 +    public TServerInstance getTserverInstance() {
 +      return tserverInstance;
 +    }
 +
 +    @Override
 +    public int hashCode() {
 +      return Objects.hashCode(tserverInstance);
 +    }
 +
 +    @Override
 +    public boolean equals(Object o) {
 +      if (o instanceof Location) {
 +        Location ol = ((Location) o);
 +        if (tserverInstance == ol.tserverInstance) {
 +          return true;
 +        }
 +        return tserverInstance.equals(ol.tserverInstance);
 +      }
 +      return false;
 +    }
 +  }
 +
 +  static class TserverGroupInfo {
 +
 +    private Map<String,Integer> expectedCounts;
 +    private final Map<String,MutableInt> initialCounts = new HashMap<>();
 +    private final Map<String,Integer> extraCounts = new HashMap<>();
 +    private final Map<String,Integer> expectedDeficits = new HashMap<>();
 +
 +    private final TServerInstance tsi;
 +    private boolean finishedAdding = false;
 +
 +    TserverGroupInfo(TServerInstance tsi) {
 +      this.tsi = tsi;
 +    }
 +
 +    public void addGroup(String group) {
 +      Preconditions.checkState(!finishedAdding);
 +
 +      MutableInt mi = initialCounts.get(group);
 +      if (mi == null) {
 +        mi = new MutableInt();
 +        initialCounts.put(group, mi);
 +      }
 +
 +      mi.increment();
 +    }
 +
 +    public void finishedAdding(Map<String,Integer> expectedCounts) {
 +      Preconditions.checkState(!finishedAdding);
 +      finishedAdding = true;
 +      this.expectedCounts = expectedCounts;
 +
 +      for (Entry<String,Integer> entry : expectedCounts.entrySet()) {
 +        String group = entry.getKey();
 +        int expected = entry.getValue();
 +
 +        MutableInt count = initialCounts.get(group);
 +        int num = count == null ? 0 : count.intValue();
 +
 +        if (num < expected) {
 +          expectedDeficits.put(group, expected - num);
 +        } else if (num > expected) {
 +          extraCounts.put(group, num - expected);
 +        }
 +      }
 +
 +    }
 +
 +    public void moveOff(String group, int num) {
 +      Preconditions.checkArgument(num > 0);
 +      Preconditions.checkState(finishedAdding);
 +
 +      Integer extraCount = extraCounts.get(group);
 +
 +      Preconditions.checkArgument(extraCount != null && extraCount >= num, "group=%s num=%s extraCount=%s", group, num, extraCount);
 +
 +      MutableInt initialCount = initialCounts.get(group);
 +
 +      Preconditions.checkArgument(initialCount.intValue() >= num);
 +
 +      initialCount.subtract(num);
 +
 +      if (extraCount - num == 0) {
 +        extraCounts.remove(group);
 +      } else {
 +        extraCounts.put(group, extraCount - num);
 +      }
 +    }
 +
 +    public void moveTo(String group, int num) {
 +      Preconditions.checkArgument(num > 0);
 +      Preconditions.checkArgument(expectedCounts.containsKey(group));
 +      Preconditions.checkState(finishedAdding);
 +
 +      Integer deficit = expectedDeficits.get(group);
 +      if (deficit != null) {
 +        if (num >= deficit) {
 +          expectedDeficits.remove(group);
 +          num -= deficit;
 +        } else {
 +          expectedDeficits.put(group, deficit - num);
 +          num = 0;
 +        }
 +      }
 +
 +      if (num > 0) {
 +        Integer extra = extraCounts.get(group);
 +        if (extra == null) {
 +          extra = 0;
 +        }
 +
 +        extraCounts.put(group, extra + num);
 +      }
 +
 +      // TODO could check extra constraints
 +    }
 +
 +    public Map<String,Integer> getExpectedDeficits() {
 +      Preconditions.checkState(finishedAdding);
 +      return Collections.unmodifiableMap(expectedDeficits);
 +    }
 +
 +    public Map<String,Integer> getExtras() {
 +      Preconditions.checkState(finishedAdding);
 +      return Collections.unmodifiableMap(extraCounts);
 +    }
 +
 +    public TServerInstance getTserverInstance() {
 +      return tsi;
 +    }
 +
 +    @Override
 +    public int hashCode() {
 +      return tsi.hashCode();
 +    }
 +
 +    @Override
 +    public boolean equals(Object o) {
 +      if (o instanceof TserverGroupInfo) {
 +        TserverGroupInfo otgi = (TserverGroupInfo) o;
 +        return tsi.equals(otgi.tsi);
 +      }
 +
 +      return false;
 +    }
 +
 +    @Override
 +    public String toString() {
 +      return tsi.toString();
 +    }
 +
 +  }
 +
 +  private static class Move {
 +    TserverGroupInfo dest;
 +    int count;
 +
 +    public Move(TserverGroupInfo dest, int num) {
 +      this.dest = dest;
 +      this.count = num;
 +    }
 +  }
 +
 +  private static class Moves {
 +
 +    private final Table<TServerInstance,String,List<Move>> moves = HashBasedTable.create();
 +    private int totalMoves = 0;
 +
 +    public void move(String group, int num, TserverGroupInfo src, TserverGroupInfo dest) {
 +      Preconditions.checkArgument(num > 0);
 +      Preconditions.checkArgument(!src.equals(dest));
 +
 +      src.moveOff(group, num);
 +      dest.moveTo(group, num);
 +
 +      List<Move> srcMoves = moves.get(src.getTserverInstance(), group);
 +      if (srcMoves == null) {
 +        srcMoves = new ArrayList<>();
 +        moves.put(src.getTserverInstance(), group, srcMoves);
 +      }
 +
 +      srcMoves.add(new Move(dest, num));
 +      totalMoves += num;
 +    }
 +
 +    public TServerInstance removeMove(TServerInstance src, String group) {
 +      List<Move> srcMoves = moves.get(src, group);
 +      if (srcMoves == null) {
 +        return null;
 +      }
 +
 +      Move move = srcMoves.get(srcMoves.size() - 1);
 +      TServerInstance ret = move.dest.getTserverInstance();
 +      totalMoves--;
 +
 +      move.count--;
 +      if (move.count == 0) {
 +        srcMoves.remove(srcMoves.size() - 1);
 +        if (srcMoves.size() == 0) {
 +          moves.remove(src, group);
 +        }
 +      }
 +
 +      return ret;
 +    }
 +
 +    public int size() {
 +      return totalMoves;
 +    }
 +  }
 +
 +  private void balanceExtraExtra(Map<TServerInstance,TserverGroupInfo> tservers, int maxExtraGroups, Moves moves) {
 +    Table<String,TServerInstance,TserverGroupInfo> surplusExtra = HashBasedTable.create();
 +    for (TserverGroupInfo tgi : tservers.values()) {
 +      Map<String,Integer> extras = tgi.getExtras();
 +      if (extras.size() > maxExtraGroups) {
 +        for (String group : extras.keySet()) {
 +          surplusExtra.put(group, tgi.getTserverInstance(), tgi);
 +        }
 +      }
 +    }
 +
 +    ArrayList<Pair<String,TServerInstance>> serversGroupsToRemove = new ArrayList<>();
 +    ArrayList<TServerInstance> serversToRemove = new ArrayList<>();
 +
 +    for (TserverGroupInfo destTgi : tservers.values()) {
 +      if (surplusExtra.size() == 0) {
 +        break;
 +      }
 +
 +      Map<String,Integer> extras = destTgi.getExtras();
 +      if (extras.size() < maxExtraGroups) {
 +        serversToRemove.clear();
 +        serversGroupsToRemove.clear();
 +        for (String group : surplusExtra.rowKeySet()) {
 +          if (!extras.containsKey(group)) {
 +            TserverGroupInfo srcTgi = surplusExtra.row(group).values().iterator().next();
 +
 +            moves.move(group, 1, srcTgi, destTgi);
 +
 +            if (srcTgi.getExtras().size() <= maxExtraGroups) {
 +              serversToRemove.add(srcTgi.getTserverInstance());
 +            } else {
 +              serversGroupsToRemove.add(new Pair<String,TServerInstance>(group, srcTgi.getTserverInstance()));
 +            }
 +
 +            if (destTgi.getExtras().size() >= maxExtraGroups || moves.size() >= getMaxMigrations()) {
 +              break;
 +            }
 +          }
 +        }
 +
 +        if (serversToRemove.size() > 0) {
 +          surplusExtra.columnKeySet().removeAll(serversToRemove);
 +        }
 +
 +        for (Pair<String,TServerInstance> pair : serversGroupsToRemove) {
 +          surplusExtra.remove(pair.getFirst(), pair.getSecond());
 +        }
 +
 +        if (moves.size() >= getMaxMigrations()) {
 +          break;
 +        }
 +      }
 +    }
 +  }
 +
 +  private boolean balanceExtraMultiple(Map<TServerInstance,TserverGroupInfo> tservers, int maxExtraGroups, Moves moves) {
 +    Multimap<String,TserverGroupInfo> extraMultiple = HashMultimap.create();
 +
 +    for (TserverGroupInfo tgi : tservers.values()) {
 +      Map<String,Integer> extras = tgi.getExtras();
 +      for (Entry<String,Integer> entry : extras.entrySet()) {
 +        if (entry.getValue() > 1) {
 +          extraMultiple.put(entry.getKey(), tgi);
 +        }
 +      }
 +    }
 +
 +    balanceExtraMultiple(tservers, maxExtraGroups, moves, extraMultiple, false);
 +    if (moves.size() < getMaxMigrations() && extraMultiple.size() > 0) {
 +      // no place to move so must exceed maxExtra temporarily... subsequent balancer calls will smooth things out
 +      balanceExtraMultiple(tservers, maxExtraGroups, moves, extraMultiple, true);
 +      return false;
 +    } else {
 +      return true;
 +    }
 +  }
 +
 +  private void balanceExtraMultiple(Map<TServerInstance,TserverGroupInfo> tservers, int maxExtraGroups, Moves moves,
 +      Multimap<String,TserverGroupInfo> extraMultiple, boolean alwaysAdd) {
 +
 +    ArrayList<Pair<String,TserverGroupInfo>> serversToRemove = new ArrayList<>();
 +    for (TserverGroupInfo destTgi : tservers.values()) {
 +      Map<String,Integer> extras = destTgi.getExtras();
 +      if (alwaysAdd || extras.size() < maxExtraGroups) {
 +        serversToRemove.clear();
 +        for (String group : extraMultiple.keySet()) {
 +          if (!extras.containsKey(group)) {
 +            Collection<TserverGroupInfo> sources = extraMultiple.get(group);
 +            Iterator<TserverGroupInfo> iter = sources.iterator();
 +            TserverGroupInfo srcTgi = iter.next();
 +
 +            int num = srcTgi.getExtras().get(group);
 +
 +            moves.move(group, 1, srcTgi, destTgi);
 +
 +            if (num == 2) {
 +              serversToRemove.add(new Pair<String,TserverGroupInfo>(group, srcTgi));
 +            }
 +
 +            if (destTgi.getExtras().size() >= maxExtraGroups || moves.size() >= getMaxMigrations()) {
 +              break;
 +            }
 +          }
 +        }
 +
 +        for (Pair<String,TserverGroupInfo> pair : serversToRemove) {
 +          extraMultiple.remove(pair.getFirst(), pair.getSecond());
 +        }
 +
 +        if (extraMultiple.size() == 0 || moves.size() >= getMaxMigrations()) {
 +          break;
 +        }
 +      }
 +    }
 +  }
 +
 +  private void balanceExtraExpected(Map<TServerInstance,TserverGroupInfo> tservers, int expectedExtra, Moves moves) {
 +
 +    Table<String,TServerInstance,TserverGroupInfo> extraSurplus = HashBasedTable.create();
 +
 +    for (TserverGroupInfo tgi : tservers.values()) {
 +      Map<String,Integer> extras = tgi.getExtras();
 +      if (extras.size() > expectedExtra) {
 +        for (String group : extras.keySet()) {
 +          extraSurplus.put(group, tgi.getTserverInstance(), tgi);
 +        }
 +      }
 +    }
 +
 +    ArrayList<TServerInstance> emptyServers = new ArrayList<>();
 +    ArrayList<Pair<String,TServerInstance>> emptyServerGroups = new ArrayList<>();
 +    for (TserverGroupInfo destTgi : tservers.values()) {
 +      if (extraSurplus.size() == 0) {
 +        break;
 +      }
 +
 +      Map<String,Integer> extras = destTgi.getExtras();
 +      if (extras.size() < expectedExtra) {
 +        emptyServers.clear();
 +        emptyServerGroups.clear();
 +        nextGroup: for (String group : extraSurplus.rowKeySet()) {
 +          if (!extras.containsKey(group)) {
 +            Iterator<TserverGroupInfo> iter = extraSurplus.row(group).values().iterator();
 +            TserverGroupInfo srcTgi = iter.next();
 +
 +            while (srcTgi.getExtras().size() <= expectedExtra) {
 +              if (iter.hasNext()) {
 +                srcTgi = iter.next();
 +              } else {
 +                continue nextGroup;
 +              }
 +            }
 +
 +            moves.move(group, 1, srcTgi, destTgi);
 +
 +            if (srcTgi.getExtras().size() <= expectedExtra) {
 +              emptyServers.add(srcTgi.getTserverInstance());
 +            } else if (srcTgi.getExtras().get(group) == null) {
 +              emptyServerGroups.add(new Pair<String,TServerInstance>(group, srcTgi.getTserverInstance()));
 +            }
 +
 +            if (destTgi.getExtras().size() >= expectedExtra || moves.size() >= getMaxMigrations()) {
 +              break;
 +            }
 +          }
 +        }
 +
 +        if (emptyServers.size() > 0) {
 +          extraSurplus.columnKeySet().removeAll(emptyServers);
 +        }
 +
 +        for (Pair<String,TServerInstance> pair : emptyServerGroups) {
 +          extraSurplus.remove(pair.getFirst(), pair.getSecond());
 +        }
 +
 +        if (moves.size() >= getMaxMigrations()) {
 +          break;
 +        }
 +      }
 +    }
 +  }
 +
 +  private void balanceExpected(Map<TServerInstance,TserverGroupInfo> tservers, Moves moves) {
 +    Multimap<String,TserverGroupInfo> groupDefecits = HashMultimap.create();
 +    Multimap<String,TserverGroupInfo> groupSurplus = HashMultimap.create();
 +
 +    for (TserverGroupInfo tgi : tservers.values()) {
 +      for (String group : tgi.getExpectedDeficits().keySet()) {
 +        groupDefecits.put(group, tgi);
 +      }
 +
 +      for (String group : tgi.getExtras().keySet()) {
 +        groupSurplus.put(group, tgi);
 +      }
 +    }
 +
 +    for (String group : groupDefecits.keySet()) {
 +      Collection<TserverGroupInfo> defecitServers = groupDefecits.get(group);
 +      for (TserverGroupInfo defecitTsi : defecitServers) {
 +        int numToMove = defecitTsi.getExpectedDeficits().get(group);
 +
 +        Iterator<TserverGroupInfo> surplusIter = groupSurplus.get(group).iterator();
 +        while (numToMove > 0) {
 +          TserverGroupInfo surplusTsi = surplusIter.next();
 +
 +          int available = surplusTsi.getExtras().get(group);
 +
 +          if (numToMove >= available) {
 +            surplusIter.remove();
 +          }
 +
 +          int transfer = Math.min(numToMove, available);
 +
 +          numToMove -= transfer;
 +
 +          moves.move(group, transfer, surplusTsi, defecitTsi);
 +          if (moves.size() >= getMaxMigrations()) {
 +            return;
 +          }
 +        }
 +      }
 +    }
 +  }
 +
 +  private void populateMigrations(Set<TServerInstance> current, List<TabletMigration> migrationsOut, Moves moves) {
 +    if (moves.size() == 0) {
 +      return;
 +    }
 +
 +    Function<KeyExtent,String> partitioner = getPartitioner();
 +
 +    for (Pair<KeyExtent,Location> entry : getLocationProvider()) {
 +      String group = partitioner.apply(entry.getFirst());
 +      Location loc = entry.getSecond();
 +
 +      if (loc.equals(Location.NONE) || !current.contains(loc.getTserverInstance())) {
 +        migrationsOut.clear();
 +        return;
 +      }
 +
 +      TServerInstance dest = moves.removeMove(loc.getTserverInstance(), group);
 +      if (dest != null) {
 +        migrationsOut.add(new TabletMigration(entry.getFirst(), loc.getTserverInstance(), dest));
 +        if (moves.size() == 0) {
 +          break;
 +        }
 +      }
 +    }
 +  }
 +
 +  static class LocationFunction implements Function<Iterator<Entry<Key,Value>>,Pair<KeyExtent,Location>> {
 +    @Override
 +    public Pair<KeyExtent,Location> apply(Iterator<Entry<Key,Value>> input) {
 +      Location loc = Location.NONE;
 +      KeyExtent extent = null;
 +      while (input.hasNext()) {
 +        Entry<Key,Value> entry = input.next();
 +        if (entry.getKey().getColumnFamily().equals(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME)) {
 +          loc = new Location(new TServerInstance(entry.getValue(), entry.getKey().getColumnQualifier()));
 +        } else if (MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(entry.getKey())) {
 +          extent = new KeyExtent(entry.getKey().getRow(), entry.getValue());
 +        }
 +      }
 +
 +      return new Pair<KeyExtent,Location>(extent, loc);
 +    }
 +
 +  }
 +
 +  class MetadataLocationProvider implements Iterable<Pair<KeyExtent,Location>> {
 +
 +    @Override
 +    public Iterator<Pair<KeyExtent,Location>> iterator() {
 +      try {
 +        Scanner scanner = new IsolatedScanner(context.getConnector().createScanner(MetadataTable.NAME, Authorizations.EMPTY));
 +        scanner.fetchColumnFamily(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME);
 +        MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
 +        scanner.setRange(MetadataSchema.TabletsSection.getRange(tableId));
 +
 +        RowIterator rowIter = new RowIterator(scanner);
 +
 +        return Iterators.transform(rowIter, new LocationFunction());
 +      } catch (Exception e) {
 +        throw new RuntimeException(e);
 +      }
 +    }
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6becfbd3/server/base/src/main/java/org/apache/accumulo/server/master/balancer/RegexGroupBalancer.java
----------------------------------------------------------------------
diff --cc server/base/src/main/java/org/apache/accumulo/server/master/balancer/RegexGroupBalancer.java
index 724a606,0000000..0d07a77
mode 100644,000000..100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/RegexGroupBalancer.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/RegexGroupBalancer.java
@@@ -1,96 -1,0 +1,96 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.accumulo.server.master.balancer;
 +
 +import java.util.Map;
 +import java.util.regex.Matcher;
 +import java.util.regex.Pattern;
 +
 +import org.apache.accumulo.core.conf.AccumuloConfiguration;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.data.impl.KeyExtent;
 +import org.apache.hadoop.io.Text;
 +
 +import com.google.common.base.Function;
 +
 +/**
 + * A {@link GroupBalancer} that groups tablets using a configurable regex. To use this balancer configure the following settings for your table then configure
 + * this balancer for your table.
 + *
 + * <ul>
-  * <li/>Set {@code table.custom.balancer.group.regex.pattern} to a regular expression. This regular expression must have one group. The regex is applied to the
++ * <li>Set {@code table.custom.balancer.group.regex.pattern} to a regular expression. This regular expression must have one group. The regex is applied to the
 + * tablet end row and whatever the regex group matches is used as the group. For example with a regex of {@code (\d\d).*} and an end row of {@code 12abc}, the
 + * group for the tablet would be {@code 12}.
-  * <li/>Set {@code table.custom.balancer.group.regex.default} to a default group. This group is returned for the last tablet in the table and tablets for which
++ * <li>Set {@code table.custom.balancer.group.regex.default} to a default group. This group is returned for the last tablet in the table and tablets for which
 + * the regex does not match.
-  * <li/>Optionally set {@code table.custom.balancer.group.regex.wait.time} to time (can use time suffixes). This determines how long to wait between balancing.
++ * <li>Optionally set {@code table.custom.balancer.group.regex.wait.time} to time (can use time suffixes). This determines how long to wait between balancing.
 + * Since this balancer scans the metadata table, may want to set this higher for large tables.
 + * </ul>
 + */
 +
 +public class RegexGroupBalancer extends GroupBalancer {
 +
 +  public static final String REGEX_PROPERTY = Property.TABLE_ARBITRARY_PROP_PREFIX.getKey() + "balancer.group.regex.pattern";
 +  public static final String DEFAUT_GROUP_PROPERTY = Property.TABLE_ARBITRARY_PROP_PREFIX.getKey() + "balancer.group.regex.default";
 +  public static final String WAIT_TIME_PROPERTY = Property.TABLE_ARBITRARY_PROP_PREFIX.getKey() + "balancer.group.regex.wait.time";
 +
 +  private final String tableId;
 +
 +  public RegexGroupBalancer(String tableId) {
 +    super(tableId);
 +    this.tableId = tableId;
 +  }
 +
 +  @Override
 +  protected long getWaitTime() {
 +    Map<String,String> customProps = configuration.getTableConfiguration(tableId).getAllPropertiesWithPrefix(Property.TABLE_ARBITRARY_PROP_PREFIX);
 +    if (customProps.containsKey(WAIT_TIME_PROPERTY)) {
 +      return AccumuloConfiguration.getTimeInMillis(customProps.get(WAIT_TIME_PROPERTY));
 +    }
 +
 +    return super.getWaitTime();
 +  }
 +
 +  @Override
 +  protected Function<KeyExtent,String> getPartitioner() {
 +
 +    Map<String,String> customProps = configuration.getTableConfiguration(tableId).getAllPropertiesWithPrefix(Property.TABLE_ARBITRARY_PROP_PREFIX);
 +    String regex = customProps.get(REGEX_PROPERTY);
 +    final String defaultGroup = customProps.get(DEFAUT_GROUP_PROPERTY);
 +
 +    final Pattern pattern = Pattern.compile(regex);
 +
 +    return new Function<KeyExtent,String>() {
 +
 +      @Override
 +      public String apply(KeyExtent input) {
 +        Text er = input.getEndRow();
 +        if (er == null) {
 +          return defaultGroup;
 +        }
 +
 +        Matcher matcher = pattern.matcher(er.toString());
 +        if (matcher.matches() && matcher.groupCount() == 1) {
 +          return matcher.group(1);
 +        }
 +
 +        return defaultGroup;
 +      }
 +    };
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6becfbd3/server/base/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6becfbd3/server/base/src/main/java/org/apache/accumulo/server/security/UserImpersonation.java
----------------------------------------------------------------------
diff --cc server/base/src/main/java/org/apache/accumulo/server/security/UserImpersonation.java
index fada1ad,0000000..2a1fd00
mode 100644,000000..100644
--- a/server/base/src/main/java/org/apache/accumulo/server/security/UserImpersonation.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/security/UserImpersonation.java
@@@ -1,228 -1,0 +1,228 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.server.security;
 +
 +import java.util.Arrays;
 +import java.util.Collection;
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.Iterator;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import java.util.Set;
 +
 +import org.apache.accumulo.core.conf.AccumuloConfiguration;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.commons.lang.StringUtils;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +/**
 + * When SASL is enabled, this parses properties from the site configuration to build up a set of all users capable of impersonating another user, the users
 + * which may be impersonated and the hosts in which the impersonator may issue requests from.
 + *
-  * <code>rpc_user=>{allowed_accumulo_users=[...], allowed_client_hosts=[...]</code>
++ * <code>rpc_user=&gt;{allowed_accumulo_users=[...], allowed_client_hosts=[...]</code>
 + *
 + * @see Property#INSTANCE_RPC_SASL_PROXYUSERS
 + */
 +public class UserImpersonation {
 +
 +  private static final Logger log = LoggerFactory.getLogger(UserImpersonation.class);
 +  private static final Set<String> ALWAYS_TRUE = new AlwaysTrueSet<>();
 +  private static final String ALL = "*", USERS = "users", HOSTS = "hosts";
 +
 +  public static class AlwaysTrueSet<T> implements Set<T> {
 +
 +    @Override
 +    public int size() {
 +      throw new UnsupportedOperationException();
 +    }
 +
 +    @Override
 +    public boolean isEmpty() {
 +      throw new UnsupportedOperationException();
 +    }
 +
 +    @Override
 +    public boolean contains(Object o) {
 +      return true;
 +    }
 +
 +    @Override
 +    public Iterator<T> iterator() {
 +      throw new UnsupportedOperationException();
 +    }
 +
 +    @Override
 +    public Object[] toArray() {
 +      throw new UnsupportedOperationException();
 +    }
 +
 +    @Override
 +    public <E> E[] toArray(E[] a) {
 +      throw new UnsupportedOperationException();
 +    }
 +
 +    @Override
 +    public boolean add(T e) {
 +      throw new UnsupportedOperationException();
 +    }
 +
 +    @Override
 +    public boolean remove(Object o) {
 +      throw new UnsupportedOperationException();
 +    }
 +
 +    @Override
 +    public boolean containsAll(Collection<?> c) {
 +      return true;
 +    }
 +
 +    @Override
 +    public boolean addAll(Collection<? extends T> c) {
 +      throw new UnsupportedOperationException();
 +    }
 +
 +    @Override
 +    public boolean retainAll(Collection<?> c) {
 +      throw new UnsupportedOperationException();
 +    }
 +
 +    @Override
 +    public boolean removeAll(Collection<?> c) {
 +      throw new UnsupportedOperationException();
 +    }
 +
 +    @Override
 +    public void clear() {
 +      throw new UnsupportedOperationException();
 +    }
 +  }
 +
 +  public static class UsersWithHosts {
 +    private Set<String> users = new HashSet<>(), hosts = new HashSet<>();
 +    private boolean allUsers, allHosts;
 +
 +    public UsersWithHosts() {
 +      allUsers = allHosts = false;
 +    }
 +
 +    public UsersWithHosts(Set<String> users, Set<String> hosts) {
 +      this();
 +      this.users = users;
 +      this.hosts = hosts;
 +    }
 +
 +    public Set<String> getUsers() {
 +      if (allUsers) {
 +        return ALWAYS_TRUE;
 +      }
 +      return users;
 +    }
 +
 +    public Set<String> getHosts() {
 +      if (allHosts) {
 +        return ALWAYS_TRUE;
 +      }
 +      return hosts;
 +    }
 +
 +    public boolean acceptsAllUsers() {
 +      return allUsers;
 +    }
 +
 +    public void setAcceptAllUsers(boolean allUsers) {
 +      this.allUsers = allUsers;
 +    }
 +
 +    public boolean acceptsAllHosts() {
 +      return allHosts;
 +    }
 +
 +    public void setAcceptAllHosts(boolean allHosts) {
 +      this.allHosts = allHosts;
 +    }
 +
 +    public void setUsers(Set<String> users) {
 +      this.users = users;
 +      allUsers = false;
 +    }
 +
 +    public void setHosts(Set<String> hosts) {
 +      this.hosts = hosts;
 +      allHosts = false;
 +    }
 +  }
 +
 +  private final Map<String,UsersWithHosts> proxyUsers;
 +
 +  public UserImpersonation(AccumuloConfiguration conf) {
 +    Map<String,String> entries = conf.getAllPropertiesWithPrefix(Property.INSTANCE_RPC_SASL_PROXYUSERS);
 +    proxyUsers = new HashMap<>();
 +    final String configKey = Property.INSTANCE_RPC_SASL_PROXYUSERS.getKey();
 +    for (Entry<String,String> entry : entries.entrySet()) {
 +      String aclKey = entry.getKey().substring(configKey.length());
 +      int index = aclKey.lastIndexOf('.');
 +
 +      if (-1 == index) {
 +        throw new RuntimeException("Expected 2 elements in key suffix: " + aclKey);
 +      }
 +
 +      final String remoteUser = aclKey.substring(0, index).trim(), usersOrHosts = aclKey.substring(index + 1).trim();
 +      UsersWithHosts usersWithHosts = proxyUsers.get(remoteUser);
 +      if (null == usersWithHosts) {
 +        usersWithHosts = new UsersWithHosts();
 +        proxyUsers.put(remoteUser, usersWithHosts);
 +      }
 +
 +      if (USERS.equals(usersOrHosts)) {
 +        String userString = entry.getValue().trim();
 +        if (ALL.equals(userString)) {
 +          usersWithHosts.setAcceptAllUsers(true);
 +        } else if (!usersWithHosts.acceptsAllUsers()) {
 +          Set<String> users = usersWithHosts.getUsers();
 +          if (null == users) {
 +            users = new HashSet<>();
 +            usersWithHosts.setUsers(users);
 +          }
 +          String[] userValues = StringUtils.split(userString, ',');
 +          users.addAll(Arrays.<String> asList(userValues));
 +        }
 +      } else if (HOSTS.equals(usersOrHosts)) {
 +        String hostsString = entry.getValue().trim();
 +        if (ALL.equals(hostsString)) {
 +          usersWithHosts.setAcceptAllHosts(true);
 +        } else if (!usersWithHosts.acceptsAllHosts()) {
 +          Set<String> hosts = usersWithHosts.getHosts();
 +          if (null == hosts) {
 +            hosts = new HashSet<>();
 +            usersWithHosts.setHosts(hosts);
 +          }
 +          String[] hostValues = StringUtils.split(hostsString, ',');
 +          hosts.addAll(Arrays.<String> asList(hostValues));
 +        }
 +      } else {
 +        log.debug("Ignoring key " + aclKey);
 +      }
 +    }
 +  }
 +
 +  public UsersWithHosts get(String remoteUser) {
 +    return proxyUsers.get(remoteUser);
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6becfbd3/server/base/src/test/java/org/apache/accumulo/server/security/SystemCredentialsTest.java
----------------------------------------------------------------------
diff --cc server/base/src/test/java/org/apache/accumulo/server/security/SystemCredentialsTest.java
index 1af908b,a4c5fd6..274ec76
--- a/server/base/src/test/java/org/apache/accumulo/server/security/SystemCredentialsTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/security/SystemCredentialsTest.java
@@@ -56,8 -53,8 +56,8 @@@ public class SystemCredentialsTest 
    }
  
    /**
 -   * This is a test to ensure the string literal in {@link ConnectorImpl#ConnectorImpl(Instance, Credentials)} is kept up-to-date if we move the
 -   * {@link SystemToken}<br>
 +   * This is a test to ensure the string literal in {@link ConnectorImpl#ConnectorImpl(org.apache.accumulo.core.client.impl.ClientContext)} is kept up-to-date
-    * if we move the {@link SystemToken}<br/>
++   * if we move the {@link SystemToken}<br>
     * This check will not be needed after ACCUMULO-1578
     */
    @Test

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6becfbd3/server/master/src/main/java/org/apache/accumulo/master/replication/SequentialWorkAssigner.java
----------------------------------------------------------------------
diff --cc server/master/src/main/java/org/apache/accumulo/master/replication/SequentialWorkAssigner.java
index e30e9ac,0000000..f24da7e
mode 100644,000000..100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/SequentialWorkAssigner.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/SequentialWorkAssigner.java
@@@ -1,227 -1,0 +1,227 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.master.replication;
 +
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.Iterator;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import java.util.Set;
 +
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.conf.AccumuloConfiguration;
 +import org.apache.accumulo.core.replication.ReplicationConstants;
 +import org.apache.accumulo.core.replication.ReplicationTarget;
 +import org.apache.accumulo.core.zookeeper.ZooUtil;
 +import org.apache.accumulo.server.replication.DistributedWorkQueueWorkAssignerHelper;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.zookeeper.KeeperException;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +/**
-  * Creates work in ZK which is <code>filename.serialized_ReplicationTarget => filename</code>, but replicates files in the order in which they were created.
++ * Creates work in ZK which is <code>filename.serialized_ReplicationTarget =&gt; filename</code>, but replicates files in the order in which they were created.
 + * <p>
 + * The intent is to ensure that WALs are replayed in the same order on the peer in which they were applied on the primary.
 + */
 +public class SequentialWorkAssigner extends DistributedWorkQueueWorkAssigner {
 +  private static final Logger log = LoggerFactory.getLogger(SequentialWorkAssigner.class);
 +  private static final String NAME = "Sequential Work Assigner";
 +
 +  // @formatter:off
 +  /*
 +   * {
 +   *    peer1 => {sourceTableId1 => work_queue_key1, sourceTableId2 => work_queue_key2, ...}
 +   *    peer2 => {sourceTableId1 => work_queue_key1, sourceTableId3 => work_queue_key4, ...}
 +   *    ...
 +   * }
 +   */
 +  // @formatter:on
 +  private Map<String,Map<String,String>> queuedWorkByPeerName;
 +
 +  public SequentialWorkAssigner() {}
 +
 +  public SequentialWorkAssigner(AccumuloConfiguration conf, Connector conn) {
 +    configure(conf, conn);
 +  }
 +
 +  @Override
 +  public String getName() {
 +    return NAME;
 +  }
 +
 +  protected Map<String,Map<String,String>> getQueuedWork() {
 +    return queuedWorkByPeerName;
 +  }
 +
 +  protected void setQueuedWork(Map<String,Map<String,String>> queuedWork) {
 +    this.queuedWorkByPeerName = queuedWork;
 +  }
 +
 +  /**
 +   * Initialize the queuedWork set with the work already sent out
 +   */
 +  @Override
 +  protected void initializeQueuedWork() {
 +    if (null != queuedWorkByPeerName) {
 +      return;
 +    }
 +
 +    queuedWorkByPeerName = new HashMap<>();
 +    List<String> existingWork;
 +    try {
 +      existingWork = workQueue.getWorkQueued();
 +    } catch (KeeperException | InterruptedException e) {
 +      throw new RuntimeException("Error reading existing queued replication work", e);
 +    }
 +
 +    log.info("Restoring replication work queue state from zookeeper");
 +
 +    for (String work : existingWork) {
 +      Entry<String,ReplicationTarget> entry = DistributedWorkQueueWorkAssignerHelper.fromQueueKey(work);
 +      String filename = entry.getKey();
 +      String peerName = entry.getValue().getPeerName();
 +      String sourceTableId = entry.getValue().getSourceTableId();
 +
 +      log.debug("In progress replication of {} from table with ID {} to peer {}", filename, sourceTableId, peerName);
 +
 +      Map<String,String> replicationForPeer = queuedWorkByPeerName.get(peerName);
 +      if (null == replicationForPeer) {
 +        replicationForPeer = new HashMap<>();
 +        queuedWorkByPeerName.put(sourceTableId, replicationForPeer);
 +      }
 +
 +      replicationForPeer.put(sourceTableId, work);
 +    }
 +  }
 +
 +  /**
 +   * Iterate over the queued work to remove entries that have been completed.
 +   */
 +  @Override
 +  protected void cleanupFinishedWork() {
 +    final Iterator<Entry<String,Map<String,String>>> queuedWork = queuedWorkByPeerName.entrySet().iterator();
 +    final String instanceId = conn.getInstance().getInstanceID();
 +
 +    int elementsRemoved = 0;
 +    // Check the status of all the work we've queued up
 +    while (queuedWork.hasNext()) {
 +      // {peer -> {tableId -> workKey, tableId -> workKey, ... }, peer -> ...}
 +      Entry<String,Map<String,String>> workForPeer = queuedWork.next();
 +
 +      // TableID to workKey (filename and ReplicationTarget)
 +      Map<String,String> queuedReplication = workForPeer.getValue();
 +
 +      Iterator<Entry<String,String>> iter = queuedReplication.entrySet().iterator();
 +      // Loop over every target we need to replicate this file to, removing the target when
 +      // the replication task has finished
 +      while (iter.hasNext()) {
 +        // tableID -> workKey
 +        Entry<String,String> entry = iter.next();
 +        // Null equates to the work for this target was finished
 +        if (null == zooCache.get(ZooUtil.getRoot(instanceId) + ReplicationConstants.ZOO_WORK_QUEUE + "/" + entry.getValue())) {
 +          log.debug("Removing {} from work assignment state", entry.getValue());
 +          iter.remove();
 +          elementsRemoved++;
 +        }
 +      }
 +    }
 +
 +    log.info("Removed {} elements from internal workqueue state because the work was complete", elementsRemoved);
 +  }
 +
 +  @Override
 +  protected int getQueueSize() {
 +    return queuedWorkByPeerName.size();
 +  }
 +
 +  @Override
 +  protected boolean shouldQueueWork(ReplicationTarget target) {
 +    Map<String,String> queuedWorkForPeer = this.queuedWorkByPeerName.get(target.getPeerName());
 +    if (null == queuedWorkForPeer) {
 +      return true;
 +    }
 +
 +    String queuedWork = queuedWorkForPeer.get(target.getSourceTableId());
 +
 +    // If we have no work for the local table to the given peer, submit some!
 +    return null == queuedWork;
 +  }
 +
 +  @Override
 +  protected boolean queueWork(Path path, ReplicationTarget target) {
 +    String queueKey = DistributedWorkQueueWorkAssignerHelper.getQueueKey(path.getName(), target);
 +    Map<String,String> workForPeer = this.queuedWorkByPeerName.get(target.getPeerName());
 +    if (null == workForPeer) {
 +      workForPeer = new HashMap<>();
 +      this.queuedWorkByPeerName.put(target.getPeerName(), workForPeer);
 +    }
 +
 +    String queuedWork = workForPeer.get(target.getSourceTableId());
 +    if (null == queuedWork) {
 +      try {
 +        workQueue.addWork(queueKey, path.toString());
 +        workForPeer.put(target.getSourceTableId(), queueKey);
 +      } catch (KeeperException | InterruptedException e) {
 +        log.warn("Could not queue work for {} to {}", path, target, e);
 +        return false;
 +      }
 +
 +      return true;
 +    } else if (queuedWork.startsWith(path.getName())) {
 +      log.debug("Not re-queueing work for {} as it has already been queued for replication to {}", path, target);
 +      return false;
 +    } else {
 +      log.debug("Not queueing {} for work as {} must be replicated to {} first", path, queuedWork, target.getPeerName());
 +      return false;
 +    }
 +  }
 +
 +  @Override
 +  protected Set<String> getQueuedWork(ReplicationTarget target) {
 +    Map<String,String> queuedWorkForPeer = this.queuedWorkByPeerName.get(target.getPeerName());
 +    if (null == queuedWorkForPeer) {
 +      return Collections.emptySet();
 +    }
 +
 +    String queuedWork = queuedWorkForPeer.get(target.getSourceTableId());
 +    if (null == queuedWork) {
 +      return Collections.emptySet();
 +    } else {
 +      return Collections.singleton(queuedWork);
 +    }
 +  }
 +
 +  @Override
 +  protected void removeQueuedWork(ReplicationTarget target, String queueKey) {
 +    Map<String,String> queuedWorkForPeer = this.queuedWorkByPeerName.get(target.getPeerName());
 +    if (null == queuedWorkForPeer) {
 +      log.warn("removeQueuedWork called when no work was queued for {}", target.getPeerName());
 +      return;
 +    }
 +
 +    String queuedWork = queuedWorkForPeer.get(target.getSourceTableId());
 +    if (queuedWork.equals(queueKey)) {
 +      queuedWorkForPeer.remove(target.getSourceTableId());
 +    } else {
 +      log.warn("removeQueuedWork called on {} with differing queueKeys, expected {} but was {}", target, queueKey, queuedWork);
 +      return;
 +    }
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6becfbd3/server/monitor/src/main/java/org/apache/accumulo/monitor/servlets/DefaultServlet.java
----------------------------------------------------------------------