You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ct...@apache.org on 2015/01/09 03:44:38 UTC

[34/66] [abbrv] accumulo git commit: ACCUMULO-3451 Format master branch (1.7.0-SNAPSHOT)

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java
index 8d56889..5d1a0c7 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/RegExFilter.java
@@ -37,9 +37,9 @@ import org.apache.log4j.Logger;
  * A Filter that matches entries based on Java regular expressions.
  */
 public class RegExFilter extends Filter {
-  
+
   private static final Logger log = Logger.getLogger(RegExFilter.class);
-  
+
   @Override
   public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
     RegExFilter result = (RegExFilter) super.deepCopy(env);
@@ -50,7 +50,7 @@ public class RegExFilter extends Filter {
     result.orFields = orFields;
     return result;
   }
-  
+
   public static final String ROW_REGEX = "rowRegex";
   public static final String COLF_REGEX = "colfRegex";
   public static final String COLQ_REGEX = "colqRegex";
@@ -58,25 +58,25 @@ public class RegExFilter extends Filter {
   public static final String OR_FIELDS = "orFields";
   public static final String ENCODING = "encoding";
   public static final String MATCH_SUBSTRING = "matchSubstring";
-  
+
   public static final String ENCODING_DEFAULT = UTF_8.name();
-  
+
   private Matcher rowMatcher;
   private Matcher colfMatcher;
   private Matcher colqMatcher;
   private Matcher valueMatcher;
   private boolean orFields = false;
   private boolean matchSubstring = false;
-  
+
   private String encoding = ENCODING_DEFAULT;
-  
+
   private Matcher copyMatcher(Matcher m) {
     if (m == null)
       return m;
     else
       return m.pattern().matcher("");
   }
-  
+
   private boolean matches(Matcher matcher, ByteSequence bs) {
     if (matcher != null) {
       try {
@@ -88,7 +88,7 @@ public class RegExFilter extends Filter {
     }
     return !orFields;
   }
-  
+
   private boolean matches(Matcher matcher, byte data[], int offset, int len) {
     if (matcher != null) {
       try {
@@ -100,7 +100,7 @@ public class RegExFilter extends Filter {
     }
     return !orFields;
   }
-  
+
   @Override
   public boolean accept(Key key, Value value) {
     if (orFields)
@@ -111,7 +111,7 @@ public class RegExFilter extends Filter {
         && (matches(colfMatcher, colfMatcher == null ? null : key.getColumnFamilyData()))
         && (matches(colqMatcher, colqMatcher == null ? null : key.getColumnQualifierData())) && (matches(valueMatcher, value.get(), 0, value.get().length)));
   }
-  
+
   @Override
   public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
     super.init(source, options, env);
@@ -120,42 +120,42 @@ public class RegExFilter extends Filter {
     } else {
       rowMatcher = null;
     }
-    
+
     if (options.containsKey(COLF_REGEX)) {
       colfMatcher = Pattern.compile(options.get(COLF_REGEX)).matcher("");
     } else {
       colfMatcher = null;
     }
-    
+
     if (options.containsKey(COLQ_REGEX)) {
       colqMatcher = Pattern.compile(options.get(COLQ_REGEX)).matcher("");
     } else {
       colqMatcher = null;
     }
-    
+
     if (options.containsKey(VALUE_REGEX)) {
       valueMatcher = Pattern.compile(options.get(VALUE_REGEX)).matcher("");
     } else {
       valueMatcher = null;
     }
-    
+
     if (options.containsKey(OR_FIELDS)) {
       orFields = Boolean.parseBoolean(options.get(OR_FIELDS));
     } else {
       orFields = false;
     }
-    
+
     if (options.containsKey(MATCH_SUBSTRING)) {
       matchSubstring = Boolean.parseBoolean(options.get(MATCH_SUBSTRING));
     } else {
       matchSubstring = false;
     }
-    
+
     if (options.containsKey(ENCODING)) {
       encoding = options.get(ENCODING);
     }
   }
-  
+
   @Override
   public IteratorOptions describeOptions() {
     IteratorOptions io = super.describeOptions();
@@ -170,28 +170,28 @@ public class RegExFilter extends Filter {
     io.addNamedOption(RegExFilter.ENCODING, "character encoding of byte array value (default is " + ENCODING_DEFAULT + ")");
     return io;
   }
-  
+
   @Override
   public boolean validateOptions(Map<String,String> options) {
     if (super.validateOptions(options) == false)
       return false;
-    
+
     try {
       if (options.containsKey(ROW_REGEX))
         Pattern.compile(options.get(ROW_REGEX)).matcher("");
-      
+
       if (options.containsKey(COLF_REGEX))
         Pattern.compile(options.get(COLF_REGEX)).matcher("");
-      
+
       if (options.containsKey(COLQ_REGEX))
         Pattern.compile(options.get(COLQ_REGEX)).matcher("");
-      
+
       if (options.containsKey(VALUE_REGEX))
         Pattern.compile(options.get(VALUE_REGEX)).matcher("");
     } catch (Exception e) {
       throw new IllegalArgumentException("bad regex", e);
     }
-    
+
     if (options.containsKey(ENCODING)) {
       try {
         this.encoding = options.get(ENCODING);
@@ -202,14 +202,14 @@ public class RegExFilter extends Filter {
         throw new IllegalArgumentException("invalid encoding " + ENCODING + ":" + this.encoding, e);
       }
     }
-    
+
     return true;
   }
-  
+
   /**
    * Encode the terms to match against in the iterator. Same as calling {@link #setRegexs(IteratorSetting, String, String, String, String, boolean, boolean)}
    * with matchSubstring set to false
-   * 
+   *
    * @param si
    *          ScanIterator config to be updated
    * @param rowTerm
@@ -226,10 +226,10 @@ public class RegExFilter extends Filter {
   public static void setRegexs(IteratorSetting si, String rowTerm, String cfTerm, String cqTerm, String valueTerm, boolean orFields) {
     setRegexs(si, rowTerm, cfTerm, cqTerm, valueTerm, orFields, false);
   }
-  
+
   /**
    * Encode the terms to match against in the iterator
-   * 
+   *
    * @param si
    *          ScanIterator config to be updated
    * @param rowTerm
@@ -244,7 +244,7 @@ public class RegExFilter extends Filter {
    *          if true then search expressions will match on partial strings
    */
   public static void setRegexs(IteratorSetting si, String rowTerm, String cfTerm, String cqTerm, String valueTerm, boolean orFields, boolean matchSubstring) {
-    
+
     if (rowTerm != null)
       si.addOption(RegExFilter.ROW_REGEX, rowTerm);
     if (cfTerm != null)
@@ -255,17 +255,17 @@ public class RegExFilter extends Filter {
       si.addOption(RegExFilter.VALUE_REGEX, valueTerm);
     si.addOption(RegExFilter.OR_FIELDS, String.valueOf(orFields));
     si.addOption(RegExFilter.MATCH_SUBSTRING, String.valueOf(matchSubstring));
-    
+
   }
-  
+
   /**
    * Set the encoding string to use when interpreting characters
-   * 
+   *
    * @param si
    *          ScanIterator config to be updated
    * @param encoding
    *          the encoding string to use for character interpretation.
-   * 
+   *
    */
   public static void setEncoding(IteratorSetting si, String encoding) {
     if (!encoding.isEmpty()) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/iterators/user/ReqVisFilter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/ReqVisFilter.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/ReqVisFilter.java
index 754b2c3..d7f85f5 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/ReqVisFilter.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/ReqVisFilter.java
@@ -25,13 +25,13 @@ import org.apache.accumulo.core.security.ColumnVisibility;
  * A Filter that matches entries with a non-empty ColumnVisibility.
  */
 public class ReqVisFilter extends Filter {
-  
+
   @Override
   public boolean accept(Key k, Value v) {
     ColumnVisibility vis = new ColumnVisibility(k.getColumnVisibility());
     return vis.getExpression().length > 0;
   }
-  
+
   @Override
   public IteratorOptions describeOptions() {
     IteratorOptions io = super.describeOptions();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/iterators/user/RowDeletingIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/RowDeletingIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/RowDeletingIterator.java
index 3c24f0e..60870d8 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/RowDeletingIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/RowDeletingIterator.java
@@ -34,85 +34,85 @@ import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 
 /**
  * An iterator for deleting whole rows.
- * 
+ *
  * After setting this iterator up for your table, to delete a row insert a row with empty column family, empty column qualifier, empty column visibility, and a
  * value of DEL_ROW. Do not use empty columns for anything else when using this iterator.
- * 
+ *
  * When using this iterator the locality group containing the row deletes will always be read. The locality group containing the empty column family will
  * contain row deletes. Always reading this locality group can have an impact on performance.
- * 
+ *
  * For example assume there are two locality groups, one containing large images and one containing small metadata about the images. If row deletes are in the
  * same locality group as the images, then this will significantly slow down scans and major compactions that are only reading the metadata locality group.
  * Therefore, you would want to put the empty column family in the locality group that contains the metadata. Another option is to put the empty column in its
  * own locality group. Which is best depends on your data.
- * 
+ *
  */
 
 public class RowDeletingIterator implements SortedKeyValueIterator<Key,Value> {
-  
+
   public static final Value DELETE_ROW_VALUE = new Value("DEL_ROW".getBytes(UTF_8));
   private SortedKeyValueIterator<Key,Value> source;
   private boolean propogateDeletes;
   private ByteSequence currentRow;
   private boolean currentRowDeleted;
   private long deleteTS;
-  
+
   private boolean dropEmptyColFams;
-  
+
   private static final ByteSequence EMPTY = new ArrayByteSequence(new byte[] {});
-  
+
   private RowDeletingIterator(SortedKeyValueIterator<Key,Value> source, boolean propogateDeletes2) {
     this.source = source;
     this.propogateDeletes = propogateDeletes2;
   }
-  
+
   public RowDeletingIterator() {}
-  
+
   @Override
   public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
     return new RowDeletingIterator(source.deepCopy(env), propogateDeletes);
   }
-  
+
   @Override
   public Key getTopKey() {
     return source.getTopKey();
   }
-  
+
   @Override
   public Value getTopValue() {
     return source.getTopValue();
   }
-  
+
   @Override
   public boolean hasTop() {
     return source.hasTop();
   }
-  
+
   @Override
   public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
     this.source = source;
     this.propogateDeletes = (env.getIteratorScope() == IteratorScope.majc && !env.isFullMajorCompaction()) || env.getIteratorScope() == IteratorScope.minc;
   }
-  
+
   @Override
   public void next() throws IOException {
     source.next();
     consumeDeleted();
     consumeEmptyColFams();
   }
-  
+
   private void consumeEmptyColFams() throws IOException {
     while (dropEmptyColFams && source.hasTop() && source.getTopKey().getColumnFamilyData().length() == 0) {
       source.next();
       consumeDeleted();
     }
   }
-  
+
   private boolean isDeleteMarker(Key key, Value val) {
     return key.getColumnFamilyData().length() == 0 && key.getColumnQualifierData().length() == 0 && key.getColumnVisibilityData().length() == 0
         && val.equals(DELETE_ROW_VALUE);
   }
-  
+
   private void consumeDeleted() throws IOException {
     // this method tries to do as little work as possible when nothing is deleted
     while (source.hasTop()) {
@@ -120,29 +120,29 @@ public class RowDeletingIterator implements SortedKeyValueIterator<Key,Value> {
         while (source.hasTop() && currentRow.equals(source.getTopKey().getRowData()) && source.getTopKey().getTimestamp() <= deleteTS) {
           source.next();
         }
-        
+
         if (source.hasTop() && !currentRow.equals(source.getTopKey().getRowData())) {
           currentRowDeleted = false;
         }
       }
-      
+
       if (!currentRowDeleted && source.hasTop() && isDeleteMarker(source.getTopKey(), source.getTopValue())) {
         currentRow = source.getTopKey().getRowData();
         currentRowDeleted = true;
         deleteTS = source.getTopKey().getTimestamp();
-        
+
         if (propogateDeletes)
           break;
       } else {
         break;
       }
     }
-    
+
   }
-  
+
   @Override
   public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
-    
+
     if (inclusive && !columnFamilies.contains(EMPTY)) {
       columnFamilies = new HashSet<ByteSequence>(columnFamilies);
       columnFamilies.add(EMPTY);
@@ -154,16 +154,16 @@ public class RowDeletingIterator implements SortedKeyValueIterator<Key,Value> {
     } else {
       dropEmptyColFams = false;
     }
-    
+
     currentRowDeleted = false;
-    
+
     if (range.getStartKey() != null) {
       // seek to beginning of row
       Range newRange = new Range(new Key(range.getStartKey().getRow()), true, range.getEndKey(), range.isEndKeyInclusive());
       source.seek(newRange, columnFamilies, inclusive);
       consumeDeleted();
       consumeEmptyColFams();
-      
+
       if (source.hasTop() && range.beforeStartKey(source.getTopKey())) {
         source.seek(range, columnFamilies, inclusive);
         consumeDeleted();
@@ -174,7 +174,7 @@ public class RowDeletingIterator implements SortedKeyValueIterator<Key,Value> {
       consumeDeleted();
       consumeEmptyColFams();
     }
-    
+
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/iterators/user/RowEncodingIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/RowEncodingIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/RowEncodingIterator.java
index ce38b08..ef4003c 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/RowEncodingIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/RowEncodingIterator.java
@@ -33,23 +33,23 @@ import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 import org.apache.hadoop.io.Text;
 
 /**
- * 
+ *
  * The RowEncodingIterator is designed to provide row-isolation so that queries see mutations as atomic. It does so by encapsulating an entire row of key/value
  * pairs into a single key/value pair, which is returned through the client as an atomic operation. This is an abstract class, allowing the user to implement
  * rowEncoder and rowDecoder such that the columns and values of a given row may be encoded in a format best suited to the client.
- * 
+ *
  * <p>
  * For an example implementation, see {@link WholeRowIterator}.
- * 
+ *
  * <p>
  * One caveat is that when seeking in the WholeRowIterator using a range that starts at a non-inclusive first key in a row, (e.g. seek(new Range(new Key(new
  * Text("row")),false,...),...)) this iterator will skip to the next row. This is done in order to prevent repeated scanning of the same row when system
  * automatically creates ranges of that form, which happens in the case of the client calling continueScan, or in the case of the tablet server continuing a
  * scan after swapping out sources.
- * 
+ *
  * <p>
  * To regain the original key/value pairs of the row, call the rowDecoder function on the key/value pair that this iterator returned.
- * 
+ *
  * @see RowFilter
  */
 public abstract class RowEncodingIterator implements SortedKeyValueIterator<Key,Value> {
@@ -101,7 +101,7 @@ public abstract class RowEncodingIterator implements SortedKeyValueIterator<Key,
   }
 
   /**
-   * 
+   *
    * @param currentRow
    *          All keys have this in their row portion (do not modify!).
    * @param keys

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/iterators/user/RowFilter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/RowFilter.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/RowFilter.java
index 9c4edc2..1287b81 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/RowFilter.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/RowFilter.java
@@ -34,21 +34,21 @@ import org.apache.hadoop.io.Text;
 /**
  * This iterator makes it easy to select rows that meet a given criteria. Its an alternative to the {@link WholeRowIterator}. There are a few things to consider
  * when deciding which one to use.
- * 
+ *
  * First the WholeRowIterator requires that the row fit in memory and that the entire row is read before a decision is made. This iterator has neither
  * requirement, it allows seeking within a row to avoid reading the entire row to make a decision. So even if your rows fit into memory, this extending this
  * iterator may be better choice because you can seek.
- * 
+ *
  * Second the WholeRowIterator is currently the only way to achieve row isolation with the {@link BatchScanner}. With the normal {@link Scanner} row isolation
  * can be enabled and this Iterator may be used.
- * 
+ *
  * Third the row acceptance test will be executed every time this Iterator is seeked. If the row is large, then the row will fetched in batches of key/values.
  * As each batch is fetched the test may be re-executed because the iterator stack is reseeked for each batch. The batch size may be increased to reduce the
  * number of times the test is executed. With the normal Scanner, if isolation is enabled then it will read an entire row w/o seeking this iterator.
- * 
+ *
  */
 public abstract class RowFilter extends WrappingIterator {
-  
+
   private RowIterator decisionIterator;
   private Collection<ByteSequence> columnFamilies;
   Text currentRow;
@@ -59,23 +59,23 @@ public abstract class RowFilter extends WrappingIterator {
   private static class RowIterator extends WrappingIterator {
     private Range rowRange;
     private boolean hasTop;
-    
+
     RowIterator(SortedKeyValueIterator<Key,Value> source) {
       super.setSource(source);
     }
-    
+
     void setRow(Range row) {
       this.rowRange = row;
     }
-    
+
     @Override
     public boolean hasTop() {
       return hasTop && super.hasTop();
     }
-    
+
     @Override
     public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
-      
+
       range = rowRange.clip(range, true);
       if (range == null) {
         hasTop = false;
@@ -90,14 +90,14 @@ public abstract class RowFilter extends WrappingIterator {
     SortedKeyValueIterator<Key,Value> source = getSource();
     while (source.hasTop()) {
       Text row = source.getTopKey().getRow();
-      
+
       if (currentRow != null && currentRow.equals(row))
         break;
-      
+
       Range rowRange = new Range(row);
       decisionIterator.setRow(rowRange);
       decisionIterator.seek(rowRange, columnFamilies, inclusive);
-      
+
       if (acceptRow(decisionIterator)) {
         currentRow = row;
         break;
@@ -108,7 +108,7 @@ public abstract class RowFilter extends WrappingIterator {
           count++;
           source.next();
         }
-        
+
         if (source.hasTop() && source.getTopKey().getRow().equals(row)) {
           Range nextRow = new Range(row, false, null, false);
           nextRow = range.clip(nextRow, true);
@@ -120,11 +120,11 @@ public abstract class RowFilter extends WrappingIterator {
       }
     }
   }
-  
+
   /**
    * Implementation should return false to suppress a row.
-   * 
-   * 
+   *
+   *
    * @param rowIterator
    *          - An iterator over the row. This iterator is confined to the row. Seeking past the end of the row will return no data. Seeking before the row will
    *          always set top to the first column in the current row. By default this iterator will only see the columns the parent was seeked with. To see more
@@ -156,13 +156,13 @@ public abstract class RowFilter extends WrappingIterator {
   public boolean hasTop() {
     return hasTop && super.hasTop();
   }
-  
+
   @Override
   public void next() throws IOException {
     super.next();
     skipRows();
   }
-  
+
   @Override
   public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
     super.seek(range, columnFamilies, inclusive);
@@ -172,6 +172,6 @@ public abstract class RowFilter extends WrappingIterator {
     currentRow = null;
     hasTop = true;
     skipRows();
-    
+
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/iterators/user/SummingArrayCombiner.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/SummingArrayCombiner.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/SummingArrayCombiner.java
index 9bdc883..3c9bdd5 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/SummingArrayCombiner.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/SummingArrayCombiner.java
@@ -46,10 +46,10 @@ public class SummingArrayCombiner extends TypedValueCombiner<List<Long>> {
   public static final Encoder<List<Long>> FIXED_LONG_ARRAY_ENCODER = new FixedLongArrayEncoder();
   public static final Encoder<List<Long>> VAR_LONG_ARRAY_ENCODER = new VarLongArrayEncoder();
   public static final Encoder<List<Long>> STRING_ARRAY_ENCODER = new StringArrayEncoder();
-  
+
   private static final String TYPE = "type";
   private static final String CLASS_PREFIX = "class:";
-  
+
   public static enum Type {
     /**
      * indicates a variable-length encoding of a list of Longs using {@link SummingArrayCombiner.VarLongArrayEncoder}
@@ -64,7 +64,7 @@ public class SummingArrayCombiner extends TypedValueCombiner<List<Long>> {
      */
     STRING
   }
-  
+
   @Override
   public List<Long> typedReduce(Key key, Iterator<List<Long>> iter) {
     List<Long> sum = new ArrayList<Long>();
@@ -73,7 +73,7 @@ public class SummingArrayCombiner extends TypedValueCombiner<List<Long>> {
     }
     return sum;
   }
-  
+
   public static List<Long> arrayAdd(List<Long> la, List<Long> lb) {
     if (la.size() > lb.size()) {
       for (int i = 0; i < lb.size(); i++) {
@@ -87,13 +87,13 @@ public class SummingArrayCombiner extends TypedValueCombiner<List<Long>> {
       return lb;
     }
   }
-  
+
   @Override
   public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
     super.init(source, options, env);
     setEncoder(options);
   }
-  
+
   private void setEncoder(Map<String,String> options) {
     String type = options.get(TYPE);
     if (type == null)
@@ -117,7 +117,7 @@ public class SummingArrayCombiner extends TypedValueCombiner<List<Long>> {
       }
     }
   }
-  
+
   @Override
   public IteratorOptions describeOptions() {
     IteratorOptions io = super.describeOptions();
@@ -126,7 +126,7 @@ public class SummingArrayCombiner extends TypedValueCombiner<List<Long>> {
     io.addNamedOption(TYPE, "<VARLEN|FIXEDLEN|STRING|fullClassName>");
     return io;
   }
-  
+
   @Override
   public boolean validateOptions(Map<String,String> options) {
     if (super.validateOptions(options) == false)
@@ -138,12 +138,12 @@ public class SummingArrayCombiner extends TypedValueCombiner<List<Long>> {
     }
     return true;
   }
-  
+
   public abstract static class DOSArrayEncoder<V> implements Encoder<List<V>> {
     public abstract void write(DataOutputStream dos, V v) throws IOException;
-    
+
     public abstract V read(DataInputStream dis) throws IOException;
-    
+
     @Override
     public byte[] encode(List<V> vl) {
       ByteArrayOutputStream baos = new ByteArrayOutputStream();
@@ -158,7 +158,7 @@ public class SummingArrayCombiner extends TypedValueCombiner<List<Long>> {
       }
       return baos.toByteArray();
     }
-    
+
     @Override
     public List<V> decode(byte[] b) {
       DataInputStream dis = new DataInputStream(new ByteArrayInputStream(b));
@@ -174,31 +174,31 @@ public class SummingArrayCombiner extends TypedValueCombiner<List<Long>> {
       }
     }
   }
-  
+
   public static class VarLongArrayEncoder extends DOSArrayEncoder<Long> {
     @Override
     public void write(DataOutputStream dos, Long v) throws IOException {
       WritableUtils.writeVLong(dos, v);
     }
-    
+
     @Override
     public Long read(DataInputStream dis) throws IOException {
       return WritableUtils.readVLong(dis);
     }
   }
-  
+
   public static class FixedLongArrayEncoder extends DOSArrayEncoder<Long> {
     @Override
     public void write(DataOutputStream dos, Long v) throws IOException {
       dos.writeLong(v);
     }
-    
+
     @Override
     public Long read(DataInputStream dis) throws IOException {
       return dis.readLong();
     }
   }
-  
+
   public static class StringArrayEncoder implements Encoder<List<Long>> {
     @Override
     public byte[] encode(List<Long> la) {
@@ -211,7 +211,7 @@ public class SummingArrayCombiner extends TypedValueCombiner<List<Long>> {
       }
       return sb.toString().getBytes(UTF_8);
     }
-    
+
     @Override
     public List<Long> decode(byte[] b) {
       String[] longstrs = new String(b, UTF_8).split(",");
@@ -229,10 +229,10 @@ public class SummingArrayCombiner extends TypedValueCombiner<List<Long>> {
       return la;
     }
   }
-  
+
   /**
    * A convenience method for setting the encoding type.
-   * 
+   *
    * @param is
    *          IteratorSetting object to configure.
    * @param type
@@ -241,10 +241,10 @@ public class SummingArrayCombiner extends TypedValueCombiner<List<Long>> {
   public static void setEncodingType(IteratorSetting is, Type type) {
     is.addOption(TYPE, type.toString());
   }
-  
+
   /**
    * A convenience method for setting the encoding type.
-   * 
+   *
    * @param is
    *          IteratorSetting object to configure.
    * @param encoderClass
@@ -253,10 +253,10 @@ public class SummingArrayCombiner extends TypedValueCombiner<List<Long>> {
   public static void setEncodingType(IteratorSetting is, Class<? extends Encoder<List<Long>>> encoderClass) {
     is.addOption(TYPE, CLASS_PREFIX + encoderClass.getName());
   }
-  
+
   /**
    * A convenience method for setting the encoding type.
-   * 
+   *
    * @param is
    *          IteratorSetting object to configure.
    * @param encoderClassName

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/iterators/user/SummingCombiner.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/SummingCombiner.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/SummingCombiner.java
index e2c5f65..68550b5 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/SummingCombiner.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/SummingCombiner.java
@@ -34,7 +34,7 @@ public class SummingCombiner extends LongCombiner {
     }
     return sum;
   }
-  
+
   @Override
   public IteratorOptions describeOptions() {
     IteratorOptions io = super.describeOptions();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/iterators/user/TimestampFilter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/TimestampFilter.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/TimestampFilter.java
index 8747aa6..3cfbf5b 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/TimestampFilter.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/TimestampFilter.java
@@ -35,13 +35,13 @@ import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 public class TimestampFilter extends Filter {
   private static final String LONG_PREFIX = "LONG";
   private final SimpleDateFormat dateParser = initDateParser();
-  
+
   private static SimpleDateFormat initDateParser() {
     SimpleDateFormat dateParser = new SimpleDateFormat("yyyyMMddHHmmssz");
     dateParser.setTimeZone(TimeZone.getTimeZone("GMT"));
     return dateParser;
   }
-  
+
   public static final String START = "start";
   public static final String START_INCL = "startInclusive";
   public static final String END = "end";
@@ -52,9 +52,9 @@ public class TimestampFilter extends Filter {
   private boolean endInclusive;
   private boolean hasStart;
   private boolean hasEnd;
-  
+
   public TimestampFilter() {}
-  
+
   @Override
   public boolean accept(Key k, Value v) {
     long ts = k.getTimestamp();
@@ -66,26 +66,26 @@ public class TimestampFilter extends Filter {
       return false;
     return true;
   }
-  
+
   @Override
   public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
     if (options == null)
       throw new IllegalArgumentException("start and/or end must be set for " + TimestampFilter.class.getName());
-    
+
     super.init(source, options, env);
-    
+
     hasStart = false;
     hasEnd = false;
     startInclusive = true;
     endInclusive = true;
-    
+
     if (options.containsKey(START))
       hasStart = true;
     if (options.containsKey(END))
       hasEnd = true;
     if (!hasStart && !hasEnd)
       throw new IllegalArgumentException("must have either start or end for " + TimestampFilter.class.getName());
-    
+
     try {
       if (hasStart) {
         String s = options.get(START);
@@ -109,7 +109,7 @@ public class TimestampFilter extends Filter {
     if (options.get(END_INCL) != null)
       endInclusive = Boolean.parseBoolean(options.get(END_INCL));
   }
-  
+
   @Override
   public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
     TimestampFilter copy = (TimestampFilter) super.deepCopy(env);
@@ -121,7 +121,7 @@ public class TimestampFilter extends Filter {
     copy.endInclusive = endInclusive;
     return copy;
   }
-  
+
   @Override
   public IteratorOptions describeOptions() {
     IteratorOptions io = super.describeOptions();
@@ -133,7 +133,7 @@ public class TimestampFilter extends Filter {
     io.addNamedOption("endInclusive", "true or false");
     return io;
   }
-  
+
   @Override
   public boolean validateOptions(Map<String,String> options) {
     if (super.validateOptions(options) == false)
@@ -168,10 +168,10 @@ public class TimestampFilter extends Filter {
     }
     return true;
   }
-  
+
   /**
    * A convenience method for setting the range of timestamps accepted by the timestamp filter.
-   * 
+   *
    * @param is
    *          the iterator setting object to configure
    * @param start
@@ -182,10 +182,10 @@ public class TimestampFilter extends Filter {
   public static void setRange(IteratorSetting is, String start, String end) {
     setRange(is, start, true, end, true);
   }
-  
+
   /**
    * A convenience method for setting the range of timestamps accepted by the timestamp filter.
-   * 
+   *
    * @param is
    *          the iterator setting object to configure
    * @param start
@@ -201,10 +201,10 @@ public class TimestampFilter extends Filter {
     setStart(is, start, startInclusive);
     setEnd(is, end, endInclusive);
   }
-  
+
   /**
    * A convenience method for setting the start timestamp accepted by the timestamp filter.
-   * 
+   *
    * @param is
    *          the iterator setting object to configure
    * @param start
@@ -221,10 +221,10 @@ public class TimestampFilter extends Filter {
       throw new IllegalArgumentException("couldn't parse " + start);
     }
   }
-  
+
   /**
    * A convenience method for setting the end timestamp accepted by the timestamp filter.
-   * 
+   *
    * @param is
    *          the iterator setting object to configure
    * @param end
@@ -241,10 +241,10 @@ public class TimestampFilter extends Filter {
       throw new IllegalArgumentException("couldn't parse " + end);
     }
   }
-  
+
   /**
    * A convenience method for setting the range of timestamps accepted by the timestamp filter.
-   * 
+   *
    * @param is
    *          the iterator setting object to configure
    * @param start
@@ -255,10 +255,10 @@ public class TimestampFilter extends Filter {
   public static void setRange(IteratorSetting is, long start, long end) {
     setRange(is, start, true, end, true);
   }
-  
+
   /**
    * A convenience method for setting the range of timestamps accepted by the timestamp filter.
-   * 
+   *
    * @param is
    *          the iterator setting object to configure
    * @param start
@@ -274,10 +274,10 @@ public class TimestampFilter extends Filter {
     setStart(is, start, startInclusive);
     setEnd(is, end, endInclusive);
   }
-  
+
   /**
    * A convenience method for setting the start timestamp accepted by the timestamp filter.
-   * 
+   *
    * @param is
    *          the iterator setting object to configure
    * @param start
@@ -289,10 +289,10 @@ public class TimestampFilter extends Filter {
     is.addOption(START, LONG_PREFIX + Long.toString(start));
     is.addOption(START_INCL, Boolean.toString(startInclusive));
   }
-  
+
   /**
    * A convenience method for setting the end timestamp accepted by the timestamp filter.
-   * 
+   *
    * @param is
    *          the iterator setting object to configure
    * @param end

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/iterators/user/TransformingIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/TransformingIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/TransformingIterator.java
index 6b51ac5..9d6c290 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/TransformingIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/TransformingIterator.java
@@ -629,7 +629,7 @@ abstract public class TransformingIterator extends WrappingIterator implements O
    * @return the part of the key this iterator is not transforming
    */
   abstract protected PartialKey getKeyPrefix();
-  
+
   public interface KVBuffer {
     void append(Key key, Value val);
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/iterators/user/VersioningIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/VersioningIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/VersioningIterator.java
index 2fc3a27..88ba20d 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/VersioningIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/VersioningIterator.java
@@ -35,15 +35,15 @@ import org.apache.accumulo.core.iterators.WrappingIterator;
 
 public class VersioningIterator extends WrappingIterator implements OptionDescriber {
   private final int maxCount = 10;
-  
+
   private Key currentKey = new Key();
   private int numVersions;
   protected int maxVersions;
-  
+
   private Range range;
   private Collection<ByteSequence> columnFamilies;
   private boolean inclusive;
-  
+
   @Override
   public VersioningIterator deepCopy(IteratorEnvironment env) {
     VersioningIterator copy = new VersioningIterator();
@@ -51,7 +51,7 @@ public class VersioningIterator extends WrappingIterator implements OptionDescri
     copy.maxVersions = maxVersions;
     return copy;
   }
-  
+
   @Override
   public void next() throws IOException {
     if (numVersions >= maxVersions) {
@@ -59,7 +59,7 @@ public class VersioningIterator extends WrappingIterator implements OptionDescri
       resetVersionCount();
       return;
     }
-    
+
     super.next();
     if (getSource().hasTop()) {
       if (getSource().getTopKey().equals(currentKey, PartialKey.ROW_COLFAM_COLQUAL_COLVIS)) {
@@ -69,7 +69,7 @@ public class VersioningIterator extends WrappingIterator implements OptionDescri
       }
     }
   }
-  
+
   @Override
   public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
     // do not want to seek to the middle of a row
@@ -77,25 +77,25 @@ public class VersioningIterator extends WrappingIterator implements OptionDescri
     this.range = seekRange;
     this.columnFamilies = columnFamilies;
     this.inclusive = inclusive;
-    
+
     super.seek(seekRange, columnFamilies, inclusive);
     resetVersionCount();
-    
+
     if (range.getStartKey() != null)
       while (hasTop() && range.beforeStartKey(getTopKey()))
         next();
   }
-  
+
   private void resetVersionCount() {
     if (super.hasTop())
       currentKey.set(getSource().getTopKey());
     numVersions = 1;
   }
-  
+
   private void skipRowColumn() throws IOException {
     Key keyToSkip = currentKey;
     super.next();
-    
+
     int count = 0;
     while (getSource().hasTop() && getSource().getTopKey().equals(keyToSkip, PartialKey.ROW_COLFAM_COLQUAL_COLVIS)) {
       if (count < maxCount) {
@@ -109,7 +109,7 @@ public class VersioningIterator extends WrappingIterator implements OptionDescri
       }
     }
   }
-  
+
   protected void reseek(Key key) throws IOException {
     if (key == null)
       return;
@@ -121,30 +121,30 @@ public class VersioningIterator extends WrappingIterator implements OptionDescri
       getSource().seek(range, columnFamilies, inclusive);
     }
   }
-  
+
   @Override
   public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
     super.init(source, options, env);
     this.numVersions = 0;
-    
+
     String maxVerString = options.get("maxVersions");
     if (maxVerString != null)
       this.maxVersions = Integer.parseInt(maxVerString);
     else
       this.maxVersions = 1;
-    
+
     if (maxVersions < 1)
       throw new IllegalArgumentException("maxVersions for versioning iterator must be >= 1");
   }
-  
+
   @Override
   public IteratorOptions describeOptions() {
     return new IteratorOptions("vers", "The VersioningIterator keeps a fixed number of versions for each key", Collections.singletonMap("maxVersions",
         "number of versions to keep for a particular key (with differing timestamps)"), null);
   }
-  
+
   private static final String MAXVERSIONS_OPT = "maxVersions";
-  
+
   @Override
   public boolean validateOptions(Map<String,String> options) {
     int i;
@@ -157,7 +157,7 @@ public class VersioningIterator extends WrappingIterator implements OptionDescri
       throw new IllegalArgumentException(MAXVERSIONS_OPT + " for versioning iterator must be >= 1");
     return true;
   }
-  
+
   /**
    * Encode the maximum number of versions to return onto the ScanIterator
    */

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/iterators/user/VisibilityFilter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/VisibilityFilter.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/VisibilityFilter.java
index 878aa8e..6e55aec 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/VisibilityFilter.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/VisibilityFilter.java
@@ -35,28 +35,28 @@ import org.apache.commons.collections.map.LRUMap;
 import org.apache.hadoop.io.Text;
 
 /**
- * 
+ *
  */
 public class VisibilityFilter extends org.apache.accumulo.core.iterators.system.VisibilityFilter implements OptionDescriber {
-  
+
   private static final String AUTHS = "auths";
   private static final String FILTER_INVALID_ONLY = "filterInvalid";
-  
+
   private boolean filterInvalid;
-  
+
   /**
-   * 
+   *
    */
   public VisibilityFilter() {
     super();
   }
-  
+
   @Override
   public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
     super.init(source, options, env);
     validateOptions(options);
     this.filterInvalid = Boolean.parseBoolean(options.get(FILTER_INVALID_ONLY));
-    
+
     if (!filterInvalid) {
       String auths = options.get(AUTHS);
       Authorizations authObj = auths == null || auths.isEmpty() ? new Authorizations() : new Authorizations(auths.getBytes(UTF_8));
@@ -66,7 +66,7 @@ public class VisibilityFilter extends org.apache.accumulo.core.iterators.system.
     this.cache = new LRUMap(1000);
     this.tmpVis = new Text();
   }
-  
+
   @Override
   public boolean accept(Key k, Value v) {
     if (filterInvalid) {
@@ -86,7 +86,7 @@ public class VisibilityFilter extends org.apache.accumulo.core.iterators.system.
       return super.accept(k, v);
     }
   }
-  
+
   @Override
   public IteratorOptions describeOptions() {
     IteratorOptions io = super.describeOptions();
@@ -97,13 +97,13 @@ public class VisibilityFilter extends org.apache.accumulo.core.iterators.system.
     io.addNamedOption(AUTHS, "the serialized set of authorizations to filter against (default: empty string, accepts only entries visible by all)");
     return io;
   }
-  
+
   public static void setAuthorizations(IteratorSetting setting, Authorizations auths) {
     setting.addOption(AUTHS, auths.serialize());
   }
-  
+
   public static void filterInvalidLabelsOnly(IteratorSetting setting, boolean featureEnabled) {
     setting.addOption(FILTER_INVALID_ONLY, Boolean.toString(featureEnabled));
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/iterators/user/WholeColumnFamilyIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/WholeColumnFamilyIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/WholeColumnFamilyIterator.java
index 037f9a5..25f30a8 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/WholeColumnFamilyIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/WholeColumnFamilyIterator.java
@@ -39,31 +39,31 @@ import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 import org.apache.hadoop.io.Text;
 
 /**
- * 
+ *
  * The WholeColumnFamilyIterator is designed to provide row/cf-isolation so that queries see mutations as atomic. It does so by grouping row/Column family (as
  * key) and rest of data as Value into a single key/value pair, which is returned through the client as an atomic operation.
- * 
+ *
  * To regain the original key/value pairs of the row, call the decodeRow function on the key/value pair that this iterator returned.
- * 
+ *
  * @since 1.6.0
  */
 public class WholeColumnFamilyIterator implements SortedKeyValueIterator<Key,Value>, OptionDescriber {
-  
+
   private SortedKeyValueIterator<Key,Value> sourceIter;
   private Key topKey = null;
   private Value topValue = null;
-  
+
   public WholeColumnFamilyIterator() {
 
   }
-  
+
   WholeColumnFamilyIterator(SortedKeyValueIterator<Key,Value> source) {
     this.sourceIter = source;
   }
-  
+
   /**
    * Decode whole row/cf out of value. decode key value pairs that have been encoded into a single // value
-   * 
+   *
    * @param rowKey
    *          the row key to decode
    * @param rowValue
@@ -105,11 +105,11 @@ public class WholeColumnFamilyIterator implements SortedKeyValueIterator<Key,Val
     }
     return map;
   }
-  
+
   /**
    * Encode row/cf. Take a stream of keys and values and output a value that encodes everything but their row and column families keys and values must be paired
    * one for one
-   * 
+   *
    * @param keys
    *          the row keys to encode into value
    * @param values
@@ -144,25 +144,25 @@ public class WholeColumnFamilyIterator implements SortedKeyValueIterator<Key,Val
       dout.writeInt(valBytes.length);
       dout.write(valBytes);
     }
-    
+
     return new Value(out.toByteArray());
   }
-  
+
   List<Key> keys = new ArrayList<Key>();
   List<Value> values = new ArrayList<Value>();
-  
+
   private void prepKeys() throws IOException {
     if (topKey != null)
       return;
     Text currentRow;
     Text currentCf;
-    
+
     do {
       if (sourceIter.hasTop() == false)
         return;
       currentRow = new Text(sourceIter.getTopKey().getRow());
       currentCf = new Text(sourceIter.getTopKey().getColumnFamily());
-      
+
       keys.clear();
       values.clear();
       while (sourceIter.hasTop() && sourceIter.getTopKey().getRow().equals(currentRow) && sourceIter.getTopKey().getColumnFamily().equals(currentCf)) {
@@ -171,14 +171,14 @@ public class WholeColumnFamilyIterator implements SortedKeyValueIterator<Key,Val
         sourceIter.next();
       }
     } while (!filter(currentRow, keys, values));
-    
+
     topKey = new Key(currentRow, currentCf);
     topValue = encodeColumnFamily(keys, values);
-    
+
   }
-  
+
   /**
-   * 
+   *
    * @param currentRow
    *          All keys & cf have this in their row portion (do not modify!).
    * @param keys
@@ -190,48 +190,48 @@ public class WholeColumnFamilyIterator implements SortedKeyValueIterator<Key,Val
   protected boolean filter(Text currentRow, List<Key> keys, List<Value> values) {
     return true;
   }
-  
+
   @Override
   public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
     if (sourceIter != null)
       return new WholeColumnFamilyIterator(sourceIter.deepCopy(env));
     return new WholeColumnFamilyIterator();
   }
-  
+
   @Override
   public Key getTopKey() {
     return topKey;
   }
-  
+
   @Override
   public Value getTopValue() {
     return topValue;
   }
-  
+
   @Override
   public boolean hasTop() {
     return topKey != null || sourceIter.hasTop();
   }
-  
+
   @Override
   public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
     sourceIter = source;
   }
-  
+
   @Override
   public void next() throws IOException {
     topKey = null;
     topValue = null;
     prepKeys();
   }
-  
+
   @Override
   public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
     topKey = null;
     topValue = null;
-    
+
     Key sk = range.getStartKey();
-    
+
     if (sk != null && sk.getColumnQualifierData().length() == 0 && sk.getColumnVisibilityData().length() == 0 && sk.getTimestamp() == Long.MAX_VALUE
         && !range.isStartKeyInclusive()) {
       // assuming that we are seeking using a key previously returned by
@@ -240,22 +240,22 @@ public class WholeColumnFamilyIterator implements SortedKeyValueIterator<Key,Val
       Key followingRowKey = sk.followingKey(PartialKey.ROW_COLFAM);
       if (range.getEndKey() != null && followingRowKey.compareTo(range.getEndKey()) > 0)
         return;
-      
+
       range = new Range(sk.followingKey(PartialKey.ROW_COLFAM), true, range.getEndKey(), range.isEndKeyInclusive());
     }
-    
+
     sourceIter.seek(range, columnFamilies, inclusive);
     prepKeys();
   }
-  
+
   @Override
   public IteratorOptions describeOptions() {
     return new IteratorOptions("wholecolumnfamilyiterator", "WholeColumnFamilyIterator. Group equal row & column family into single row entry.", null, null);
   }
-  
+
   @Override
   public boolean validateOptions(Map<String,String> options) {
     return true;
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/iterators/user/WholeRowIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/WholeRowIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/WholeRowIterator.java
index 4b7802d..7c47ec3 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/WholeRowIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/WholeRowIterator.java
@@ -32,26 +32,26 @@ import org.apache.accumulo.core.iterators.IteratorEnvironment;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 
 /**
- * 
+ *
  * The WholeRowIterator is designed to provide row-isolation so that queries see mutations as atomic. It does so by encapsulating an entire row of key/value
  * pairs into a single key/value pair, which is returned through the client as an atomic operation.
- * 
+ *
  * <p>
  * This iterator extends the {@link RowEncodingIterator}, providing implementations for rowEncoder and rowDecoder which serializes all column and value
  * information from a given row into a single ByteStream in a value.
- * 
+ *
  * <p>
  * As with the RowEncodingIterator, when seeking in the WholeRowIterator using a range that starts at a non-inclusive first key in a row, this iterator will
  * skip to the next row.
- * 
+ *
  * <p>
  * To regain the original key/value pairs of the row, call the decodeRow function on the key/value pair that this iterator returned.
- * 
+ *
  * @see RowFilter
  */
 public class WholeRowIterator extends RowEncodingIterator {
   public WholeRowIterator() {}
-  
+
   WholeRowIterator(SortedKeyValueIterator<Key,Value> source) {
     this.sourceIter = source;
   }
@@ -74,8 +74,8 @@ public class WholeRowIterator extends RowEncodingIterator {
   }
 
   /**
-   * Returns the byte array containing the field of row key from the given DataInputStream din.
-   * Assumes that din first has the length of the field, followed by the field itself.
+   * Returns the byte array containing the field of row key from the given DataInputStream din. Assumes that din first has the length of the field, followed by
+   * the field itself.
    */
   private static byte[] readField(DataInputStream din) throws IOException {
     int len = din.readInt();
@@ -85,8 +85,7 @@ public class WholeRowIterator extends RowEncodingIterator {
     // We ignore the zero length case because DataInputStream.read can return -1
     // if zero length was expected and end of stream has been reached.
     if (len > 0 && len != readLen) {
-      throw new IOException(String.format("Expected to read %d bytes but read %d",
-          len, readLen));
+      throw new IOException(String.format("Expected to read %d bytes but read %d", len, readLen));
     }
     return b;
   }
@@ -107,7 +106,7 @@ public class WholeRowIterator extends RowEncodingIterator {
     }
     return map;
   }
-  
+
   // take a stream of keys and values and output a value that encodes everything but their row
   // keys and values must be paired one for one
   public static final Value encodeRow(List<Key> keys, List<Value> values) throws IOException {
@@ -142,7 +141,7 @@ public class WholeRowIterator extends RowEncodingIterator {
       dout.writeInt(valBytes.length);
       dout.write(valBytes);
     }
-    
+
     return new Value(out.toByteArray());
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/master/state/tables/TableState.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/master/state/tables/TableState.java b/core/src/main/java/org/apache/accumulo/core/master/state/tables/TableState.java
index 8cac10c..cbf6f4c 100644
--- a/core/src/main/java/org/apache/accumulo/core/master/state/tables/TableState.java
+++ b/core/src/main/java/org/apache/accumulo/core/master/state/tables/TableState.java
@@ -19,16 +19,16 @@ package org.apache.accumulo.core.master.state.tables;
 public enum TableState {
   // NEW while making directories and tablets;
   NEW,
-  
+
   // ONLINE tablets will be assigned
   ONLINE,
-  
+
   // OFFLINE tablets will be taken offline
   OFFLINE,
-  
+
   // DELETING waiting for tablets to go offline and table will be removed
   DELETING,
-  
+
   // UNKNOWN is NOT a valid state; it is reserved for unrecognized serialized
   // representations of table state
   UNKNOWN;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/metadata/MetadataLocationObtainer.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/MetadataLocationObtainer.java b/core/src/main/java/org/apache/accumulo/core/metadata/MetadataLocationObtainer.java
index b40a6bd..224aafd 100644
--- a/core/src/main/java/org/apache/accumulo/core/metadata/MetadataLocationObtainer.java
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/MetadataLocationObtainer.java
@@ -62,30 +62,30 @@ public class MetadataLocationObtainer implements TabletLocationObtainer {
   private static final Logger log = Logger.getLogger(MetadataLocationObtainer.class);
   private SortedSet<Column> locCols;
   private ArrayList<Column> columns;
-  
+
   public MetadataLocationObtainer() {
-    
+
     locCols = new TreeSet<Column>();
     locCols.add(new Column(TextUtil.getBytes(TabletsSection.CurrentLocationColumnFamily.NAME), null, null));
     locCols.add(TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.toColumn());
     columns = new ArrayList<Column>(locCols);
   }
-  
+
   @Override
   public TabletLocations lookupTablet(ClientContext context, TabletLocation src, Text row, Text stopRow, TabletLocator parent)
       throws AccumuloSecurityException, AccumuloException {
-    
+
     try {
       OpTimer opTimer = null;
       if (log.isTraceEnabled())
         opTimer = new OpTimer(log, Level.TRACE).start("Looking up in " + src.tablet_extent.getTableId() + " row=" + TextUtil.truncate(row) + "  extent="
             + src.tablet_extent + " tserver=" + src.tablet_location);
-      
+
       Range range = new Range(row, true, stopRow, true);
-      
+
       TreeMap<Key,Value> encodedResults = new TreeMap<Key,Value>();
       TreeMap<Key,Value> results = new TreeMap<Key,Value>();
-      
+
       // Use the whole row iterator so that a partial mutations is not read. The code that extracts locations for tablets does a sanity check to ensure there is
       // only one location. Reading a partial mutation could make it appear there are multiple locations when there are not.
       List<IterInfo> serverSideIteratorList = new ArrayList<IterInfo>();
@@ -93,25 +93,25 @@ public class MetadataLocationObtainer implements TabletLocationObtainer {
       Map<String,Map<String,String>> serverSideIteratorOptions = Collections.emptyMap();
       boolean more = ThriftScanner.getBatchFromServer(context, range, src.tablet_extent, src.tablet_location, encodedResults, locCols, serverSideIteratorList,
           serverSideIteratorOptions, Constants.SCAN_BATCH_SIZE, Authorizations.EMPTY, false);
-      
+
       decodeRows(encodedResults, results);
-      
+
       if (more && results.size() == 1) {
         range = new Range(results.lastKey().followingKey(PartialKey.ROW_COLFAM_COLQUAL_COLVIS_TIME), true, new Key(stopRow).followingKey(PartialKey.ROW), false);
         encodedResults.clear();
         more = ThriftScanner.getBatchFromServer(context, range, src.tablet_extent, src.tablet_location, encodedResults, locCols, serverSideIteratorList,
             serverSideIteratorOptions, Constants.SCAN_BATCH_SIZE, Authorizations.EMPTY, false);
-        
+
         decodeRows(encodedResults, results);
       }
-      
+
       if (opTimer != null)
         opTimer.stop("Got " + results.size() + " results  from " + src.tablet_extent + " in %DURATION%");
-      
-      //if (log.isTraceEnabled()) log.trace("results "+results);
-      
+
+      // if (log.isTraceEnabled()) log.trace("results "+results);
+
       return MetadataLocationObtainer.getMetadataLocationEntries(results);
-      
+
     } catch (AccumuloServerException ase) {
       if (log.isTraceEnabled())
         log.trace(src.tablet_extent.getTableId() + " lookup failed, " + src.tablet_location + " server side exception");
@@ -125,10 +125,10 @@ public class MetadataLocationObtainer implements TabletLocationObtainer {
         log.trace(src.tablet_extent.getTableId() + " lookup failed", e);
       parent.invalidateCache(context.getInstance(), src.tablet_location);
     }
-    
+
     return null;
   }
-  
+
   private void decodeRows(TreeMap<Key,Value> encodedResults, TreeMap<Key,Value> results) throws AccumuloException {
     for (Entry<Key,Value> entry : encodedResults.entrySet()) {
       try {
@@ -138,15 +138,15 @@ public class MetadataLocationObtainer implements TabletLocationObtainer {
       }
     }
   }
-  
+
   @Override
   public List<TabletLocation> lookupTablets(ClientContext context, String tserver, Map<KeyExtent,List<Range>> tabletsRanges, TabletLocator parent)
       throws AccumuloSecurityException, AccumuloException {
-    
+
     final TreeMap<Key,Value> results = new TreeMap<Key,Value>();
 
     ResultReceiver rr = new ResultReceiver() {
-      
+
       @Override
       public void receive(List<Entry<Key,Value>> entries) {
         for (Entry<Key,Value> entry : entries) {
@@ -158,7 +158,7 @@ public class MetadataLocationObtainer implements TabletLocationObtainer {
         }
       }
     };
-    
+
     ScannerOptions opts = new ScannerOptions() {
       ScannerOptions setOpts() {
         this.fetchedColumns = locCols;
@@ -168,7 +168,7 @@ public class MetadataLocationObtainer implements TabletLocationObtainer {
         return this;
       }
     }.setOpts();
-    
+
     Map<KeyExtent,List<Range>> unscanned = new HashMap<KeyExtent,List<Range>>();
     Map<KeyExtent,List<Range>> failures = new HashMap<KeyExtent,List<Range>>();
     try {
@@ -186,10 +186,10 @@ public class MetadataLocationObtainer implements TabletLocationObtainer {
       log.trace("lookupTablets failed server=" + tserver, e);
       throw e;
     }
-    
+
     return MetadataLocationObtainer.getMetadataLocationEntries(results).getLocations();
   }
-  
+
   public static TabletLocations getMetadataLocationEntries(SortedMap<Key,Value> entries) {
     Key key;
     Value val;
@@ -197,30 +197,30 @@ public class MetadataLocationObtainer implements TabletLocationObtainer {
     Text session = null;
     Value prevRow = null;
     KeyExtent ke;
-    
+
     List<TabletLocation> results = new ArrayList<TabletLocation>();
     ArrayList<KeyExtent> locationless = new ArrayList<KeyExtent>();
-    
+
     Text lastRowFromKey = new Text();
-    
+
     // text obj below is meant to be reused in loop for efficiency
     Text colf = new Text();
     Text colq = new Text();
-    
+
     for (Entry<Key,Value> entry : entries.entrySet()) {
       key = entry.getKey();
       val = entry.getValue();
-      
+
       if (key.compareRow(lastRowFromKey) != 0) {
         prevRow = null;
         location = null;
         session = null;
         key.getRow(lastRowFromKey);
       }
-      
+
       colf = key.getColumnFamily(colf);
       colq = key.getColumnQualifier(colq);
-      
+
       // interpret the row id as a key extent
       if (colf.equals(TabletsSection.CurrentLocationColumnFamily.NAME) || colf.equals(TabletsSection.FutureLocationColumnFamily.NAME)) {
         if (location != null) {
@@ -231,19 +231,19 @@ public class MetadataLocationObtainer implements TabletLocationObtainer {
       } else if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.equals(colf, colq)) {
         prevRow = new Value(val);
       }
-      
+
       if (prevRow != null) {
         ke = new KeyExtent(key.getRow(), prevRow);
         if (location != null)
           results.add(new TabletLocation(ke, location.toString(), session.toString()));
         else
           locationless.add(ke);
-        
+
         location = null;
         prevRow = null;
       }
     }
-    
+
     return new TabletLocations(results, locationless);
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/metadata/MetadataServicer.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/MetadataServicer.java b/core/src/main/java/org/apache/accumulo/core/metadata/MetadataServicer.java
index 7d9592b..3b443f4 100644
--- a/core/src/main/java/org/apache/accumulo/core/metadata/MetadataServicer.java
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/MetadataServicer.java
@@ -30,12 +30,12 @@ import org.apache.accumulo.core.data.KeyExtent;
  * Provides a consolidated API for handling table metadata
  */
 public abstract class MetadataServicer {
-  
+
   public static MetadataServicer forTableName(ClientContext context, String tableName) throws AccumuloException, AccumuloSecurityException {
     checkArgument(tableName != null, "tableName is null");
     return forTableId(context, context.getConnector().tableOperations().tableIdMap().get(tableName));
   }
-  
+
   public static MetadataServicer forTableId(ClientContext context, String tableId) {
     checkArgument(tableId != null, "tableId is null");
     if (RootTable.ID.equals(tableId))
@@ -45,19 +45,19 @@ public abstract class MetadataServicer {
     else
       return new ServicerForUserTables(context, tableId);
   }
-  
+
   /**
-   * 
+   *
    * @return the table id of the table currently being serviced
    */
   public abstract String getServicedTableId();
-  
+
   /**
    * Populate the provided data structure with the known tablets for the table being serviced
-   * 
+   *
    * @param tablets
    *          A mapping of all known tablets to their location (if available, null otherwise)
    */
   public abstract void getTabletLocations(SortedMap<KeyExtent,String> tablets) throws AccumuloException, AccumuloSecurityException, TableNotFoundException;
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/metadata/RootTable.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/RootTable.java b/core/src/main/java/org/apache/accumulo/core/metadata/RootTable.java
index 85219eb..24148b1 100644
--- a/core/src/main/java/org/apache/accumulo/core/metadata/RootTable.java
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/RootTable.java
@@ -21,7 +21,7 @@ import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.hadoop.io.Text;
 
 /**
- * 
+ *
  */
 public class RootTable {
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForMetadataTable.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForMetadataTable.java b/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForMetadataTable.java
index 29f7027..525e2a2 100644
--- a/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForMetadataTable.java
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForMetadataTable.java
@@ -23,9 +23,9 @@ import org.apache.accumulo.core.client.impl.ClientContext;
  * The metadata table's metadata is serviced in the root table.
  */
 class ServicerForMetadataTable extends TableMetadataServicer {
-  
+
   public ServicerForMetadataTable(ClientContext context) {
     super(context, RootTable.NAME, MetadataTable.ID);
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForRootTable.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForRootTable.java b/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForRootTable.java
index b13149e..d02242c 100644
--- a/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForRootTable.java
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForRootTable.java
@@ -30,18 +30,18 @@ import org.apache.accumulo.core.data.KeyExtent;
  * The root table's metadata is serviced in zookeeper.
  */
 class ServicerForRootTable extends MetadataServicer {
-  
+
   private final Instance instance;
-  
+
   public ServicerForRootTable(ClientContext context) {
     this.instance = context.getInstance();
   }
-  
+
   @Override
   public String getServicedTableId() {
     return RootTable.ID;
   }
-  
+
   @Override
   public void getTabletLocations(SortedMap<KeyExtent,String> tablets) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
     tablets.put(RootTable.EXTENT, instance.getRootTabletLocation());

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForUserTables.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForUserTables.java b/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForUserTables.java
index c9e2ede..5efa8a6 100644
--- a/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForUserTables.java
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForUserTables.java
@@ -23,9 +23,9 @@ import org.apache.accumulo.core.client.impl.ClientContext;
  * Metadata for user tables are serviced in the metadata table.
  */
 class ServicerForUserTables extends TableMetadataServicer {
-  
+
   public ServicerForUserTables(ClientContext context, String tableId) {
     super(context, MetadataTable.NAME, tableId);
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/metadata/TableMetadataServicer.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/TableMetadataServicer.java b/core/src/main/java/org/apache/accumulo/core/metadata/TableMetadataServicer.java
index fbba279..7e2ae0a 100644
--- a/core/src/main/java/org/apache/accumulo/core/metadata/TableMetadataServicer.java
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/TableMetadataServicer.java
@@ -37,40 +37,40 @@ import org.apache.hadoop.io.Text;
  * A {@link MetadataServicer} that is backed by a table
  */
 abstract class TableMetadataServicer extends MetadataServicer {
-  
+
   private final ClientContext context;
   private String tableIdBeingServiced;
   private String serviceTableName;
-  
+
   public TableMetadataServicer(ClientContext context, String serviceTableName, String tableIdBeingServiced) {
     this.context = context;
     this.serviceTableName = serviceTableName;
     this.tableIdBeingServiced = tableIdBeingServiced;
   }
-  
+
   @Override
   public String getServicedTableId() {
     return tableIdBeingServiced;
   }
-  
+
   public String getServicingTableName() {
     return serviceTableName;
   }
-  
+
   @Override
   public void getTabletLocations(SortedMap<KeyExtent,String> tablets) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    
+
     Scanner scanner = context.getConnector().createScanner(getServicingTableName(), Authorizations.EMPTY);
-    
+
     TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
     scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
-    
+
     // position at first entry in metadata table for given table
     scanner.setRange(TabletsSection.getRange(getServicedTableId()));
-    
+
     Text colf = new Text();
     Text colq = new Text();
-    
+
     KeyExtent currentKeyExtent = null;
     String location = null;
     Text row = null;
@@ -85,10 +85,10 @@ abstract class TableMetadataServicer extends MetadataServicer {
       } else {
         row = entry.getKey().getRow();
       }
-      
+
       colf = entry.getKey().getColumnFamily(colf);
       colq = entry.getKey().getColumnQualifier(colq);
-      
+
       if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.equals(colf, colq)) {
         currentKeyExtent = new KeyExtent(entry.getKey().getRow(), entry.getValue());
         tablets.put(currentKeyExtent, location);
@@ -97,42 +97,42 @@ abstract class TableMetadataServicer extends MetadataServicer {
       } else if (colf.equals(TabletsSection.CurrentLocationColumnFamily.NAME)) {
         location = entry.getValue().toString();
       }
-      
+
     }
-    
+
     validateEntries(tablets);
   }
-  
+
   private void validateEntries(SortedMap<KeyExtent,String> tablets) throws AccumuloException {
     SortedSet<KeyExtent> tabletsKeys = (SortedSet<KeyExtent>) tablets.keySet();
     // sanity check of metadata table entries
     // make sure tablets has no holes, and that it starts and ends w/ null
     if (tabletsKeys.size() == 0)
       throw new AccumuloException("No entries found in metadata table for table " + getServicedTableId());
-    
+
     if (tabletsKeys.first().getPrevEndRow() != null)
       throw new AccumuloException("Problem with metadata table, first entry for table " + getServicedTableId() + "- " + tabletsKeys.first()
           + " - has non null prev end row");
-    
+
     if (tabletsKeys.last().getEndRow() != null)
       throw new AccumuloException("Problem with metadata table, last entry for table " + getServicedTableId() + "- " + tabletsKeys.first()
           + " - has non null end row");
-    
+
     Iterator<KeyExtent> tabIter = tabletsKeys.iterator();
     Text lastEndRow = tabIter.next().getEndRow();
     while (tabIter.hasNext()) {
       KeyExtent tabke = tabIter.next();
-      
+
       if (tabke.getPrevEndRow() == null)
         throw new AccumuloException("Problem with metadata table, it has null prev end row in middle of table " + tabke);
-      
+
       if (!tabke.getPrevEndRow().equals(lastEndRow))
         throw new AccumuloException("Problem with metadata table, it has a hole " + tabke.getPrevEndRow() + " != " + lastEndRow);
-      
+
       lastEndRow = tabke.getEndRow();
     }
-    
+
     // end METADATA table sanity check
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/metadata/schema/DataFileValue.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/schema/DataFileValue.java b/core/src/main/java/org/apache/accumulo/core/metadata/schema/DataFileValue.java
index d3323a4..cebe041 100644
--- a/core/src/main/java/org/apache/accumulo/core/metadata/schema/DataFileValue.java
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/schema/DataFileValue.java
@@ -20,74 +20,74 @@ public class DataFileValue {
   private long size;
   private long numEntries;
   private long time = -1;
-  
+
   public DataFileValue(long size, long numEntries, long time) {
     this.size = size;
     this.numEntries = numEntries;
     this.time = time;
   }
-  
+
   public DataFileValue(long size, long numEntries) {
     this.size = size;
     this.numEntries = numEntries;
     this.time = -1;
   }
-  
+
   public DataFileValue(byte[] encodedDFV) {
     String[] ba = new String(encodedDFV).split(",");
-    
+
     size = Long.parseLong(ba[0]);
     numEntries = Long.parseLong(ba[1]);
-    
+
     if (ba.length == 3)
       time = Long.parseLong(ba[2]);
     else
       time = -1;
   }
-  
+
   public long getSize() {
     return size;
   }
-  
+
   public long getNumEntries() {
     return numEntries;
   }
-  
+
   public boolean isTimeSet() {
     return time >= 0;
   }
-  
+
   public long getTime() {
     return time;
   }
-  
+
   public byte[] encode() {
     if (time >= 0)
       return ("" + size + "," + numEntries + "," + time).getBytes();
     return ("" + size + "," + numEntries).getBytes();
   }
-  
+
   @Override
   public boolean equals(Object o) {
     if (o instanceof DataFileValue) {
       DataFileValue odfv = (DataFileValue) o;
-      
+
       return size == odfv.size && numEntries == odfv.numEntries;
     }
-    
+
     return false;
   }
-  
+
   @Override
   public int hashCode() {
     return Long.valueOf(size + numEntries).hashCode();
   }
-  
+
   @Override
   public String toString() {
     return size + " " + numEntries;
   }
-  
+
   public void setTime(long time) {
     if (time < 0)
       throw new IllegalArgumentException();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java b/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java
index 0d8a0dc..534dd7f 100644
--- a/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java
@@ -31,26 +31,26 @@ import com.google.common.base.Preconditions;
  * Describes the table schema used for metadata tables
  */
 public class MetadataSchema {
-  
+
   public static final String RESERVED_PREFIX = "~";
-  
+
   /**
    * Used for storing information about tablets
    */
   public static class TabletsSection {
     private static final Section section = new Section(null, false, RESERVED_PREFIX, false);
-    
+
     public static Range getRange() {
       return section.getRange();
     }
-    
+
     public static Range getRange(String tableId) {
       return new Range(new Key(tableId + ';'), true, new Key(tableId + '<').followingKey(PartialKey.ROW), false);
     }
-    
+
     public static Text getRow(Text tableId, Text endRow) {
       Text entry = new Text(tableId);
-      
+
       if (endRow == null) {
         // append delimiter for default tablet
         entry.append(new byte[] {'<'}, 0, 1);
@@ -59,10 +59,10 @@ public class MetadataSchema {
         entry.append(new byte[] {';'}, 0, 1);
         entry.append(endRow.getBytes(), 0, endRow.getLength());
       }
-      
+
       return entry;
     }
-    
+
     /**
      * Column family for storing the tablet information needed by clients
      */
@@ -86,7 +86,7 @@ public class MetadataSchema {
        */
       public static final ColumnFQ SPLIT_RATIO_COLUMN = new ColumnFQ(NAME, new Text("splitRatio"));
     }
-    
+
     /**
      * Column family for recording information used by the TServer
      */
@@ -113,63 +113,63 @@ public class MetadataSchema {
        */
       public static final ColumnFQ LOCK_COLUMN = new ColumnFQ(NAME, new Text("lock"));
     }
-    
+
     /**
      * Column family for storing entries created by the TServer to indicate it has loaded a tablet that it was assigned
      */
     public static class CurrentLocationColumnFamily {
       public static final Text NAME = new Text("loc");
     }
-    
+
     /**
      * Column family for storing the assigned location
      */
     public static class FutureLocationColumnFamily {
       public static final Text NAME = new Text("future");
     }
-    
+
     /**
      * Column family for storing last location, as a hint for assignment
      */
     public static class LastLocationColumnFamily {
       public static final Text NAME = new Text("last");
     }
-    
+
     /**
      * Temporary markers that indicate a tablet loaded a bulk file
      */
     public static class BulkFileColumnFamily {
       public static final Text NAME = new Text("loaded");
     }
-    
+
     /**
      * Temporary marker that indicates a tablet was successfully cloned
      */
     public static class ClonedColumnFamily {
       public static final Text NAME = new Text("!cloned");
     }
-    
+
     /**
      * Column family for storing files used by a tablet
      */
     public static class DataFileColumnFamily {
       public static final Text NAME = new Text("file");
     }
-    
+
     /**
      * Column family for storing the set of files scanned with an isolated scanner, to prevent them from being deleted
      */
     public static class ScanFileColumnFamily {
       public static final Text NAME = new Text("scan");
     }
-    
+
     /**
      * Column family for storing write-ahead log entries
      */
     public static class LogColumnFamily {
       public static final Text NAME = new Text("log");
     }
-    
+
     /**
      * Column family for indicating that the files in a tablet have been trimmed to only include data for the current tablet, so that they are safe to merge
      */
@@ -178,53 +178,53 @@ public class MetadataSchema {
       public static final ColumnFQ CHOPPED_COLUMN = new ColumnFQ(NAME, new Text("chopped"));
     }
   }
-  
+
   /**
    * Contains additional metadata in a reserved area not for tablets
    */
   public static class ReservedSection {
     private static final Section section = new Section(RESERVED_PREFIX, true, null, false);
-    
+
     public static Range getRange() {
       return section.getRange();
     }
-    
+
     public static String getRowPrefix() {
       return section.getRowPrefix();
     }
-    
+
   }
-  
+
   /**
    * Holds delete markers for potentially unused files/directories
    */
   public static class DeletesSection {
     private static final Section section = new Section(RESERVED_PREFIX + "del", true, RESERVED_PREFIX + "dem", false);
-    
+
     public static Range getRange() {
       return section.getRange();
     }
-    
+
     public static String getRowPrefix() {
       return section.getRowPrefix();
     }
-    
+
   }
-  
+
   /**
    * Holds bulk-load-in-progress processing flags
    */
   public static class BlipSection {
     private static final Section section = new Section(RESERVED_PREFIX + "blip", true, RESERVED_PREFIX + "bliq", false);
-    
+
     public static Range getRange() {
       return section.getRange();
     }
-    
+
     public static String getRowPrefix() {
       return section.getRowPrefix();
     }
-    
+
   }
 
   /**
@@ -247,7 +247,7 @@ public class MetadataSchema {
 
     /**
      * Extract the table ID from the colfam (inefficiently if called repeatedly)
-     * 
+     *
      * @param k
      *          Key to extract from
      * @return The table ID
@@ -261,7 +261,7 @@ public class MetadataSchema {
 
     /**
      * Extract the table ID from the colfam into the given {@link Text}
-     * 
+     *
      * @param k
      *          Key to extract from
      * @param buff
@@ -276,7 +276,7 @@ public class MetadataSchema {
 
     /**
      * Extract the file name from the row suffix into the given {@link Text}
-     * 
+     *
      * @param k
      *          Key to extract from
      * @param buff

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/protobuf/ProtobufUtil.java b/core/src/main/java/org/apache/accumulo/core/protobuf/ProtobufUtil.java
index 60eb840..e2489f9 100644
--- a/core/src/main/java/org/apache/accumulo/core/protobuf/ProtobufUtil.java
+++ b/core/src/main/java/org/apache/accumulo/core/protobuf/ProtobufUtil.java
@@ -25,7 +25,7 @@ import com.google.protobuf.TextFormat;
  * Helper methods for interacting with Protocol Buffers and Accumulo
  */
 public class ProtobufUtil {
-  private static final char LEFT_BRACKET = '[', RIGHT_BRACKET = ']'; 
+  private static final char LEFT_BRACKET = '[', RIGHT_BRACKET = ']';
 
   public static Value toValue(GeneratedMessage msg) {
     return new Value(msg.toByteArray());

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/replication/AccumuloReplicationReplayer.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/replication/AccumuloReplicationReplayer.java b/core/src/main/java/org/apache/accumulo/core/replication/AccumuloReplicationReplayer.java
index ff89196..fccafc5 100644
--- a/core/src/main/java/org/apache/accumulo/core/replication/AccumuloReplicationReplayer.java
+++ b/core/src/main/java/org/apache/accumulo/core/replication/AccumuloReplicationReplayer.java
@@ -24,7 +24,7 @@ import org.apache.accumulo.core.replication.thrift.RemoteReplicationException;
 import org.apache.accumulo.core.replication.thrift.WalEdits;
 
 /**
- * 
+ *
  */
 public interface AccumuloReplicationReplayer {
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/replication/ReplicationConfigurationUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/replication/ReplicationConfigurationUtil.java b/core/src/main/java/org/apache/accumulo/core/replication/ReplicationConfigurationUtil.java
index ae94c2a..0817d3b 100644
--- a/core/src/main/java/org/apache/accumulo/core/replication/ReplicationConfigurationUtil.java
+++ b/core/src/main/java/org/apache/accumulo/core/replication/ReplicationConfigurationUtil.java
@@ -27,8 +27,11 @@ public class ReplicationConfigurationUtil {
 
   /**
    * Determines if the replication is enabled for the given {@link KeyExtent}
-   * @param extent The {@link KeyExtent} for the Tablet in question
-   * @param conf The {@link AccumuloConfiguration} for that Tablet (table or namespace)
+   *
+   * @param extent
+   *          The {@link KeyExtent} for the Tablet in question
+   * @param conf
+   *          The {@link AccumuloConfiguration} for that Tablet (table or namespace)
    * @return True if this extent is a candidate for replication at the given point in time.
    */
   public static boolean isEnabled(KeyExtent extent, AccumuloConfiguration conf) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/replication/ReplicationConstants.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/replication/ReplicationConstants.java b/core/src/main/java/org/apache/accumulo/core/replication/ReplicationConstants.java
index 3d71681..3f96eca 100644
--- a/core/src/main/java/org/apache/accumulo/core/replication/ReplicationConstants.java
+++ b/core/src/main/java/org/apache/accumulo/core/replication/ReplicationConstants.java
@@ -17,7 +17,7 @@
 package org.apache.accumulo.core.replication;
 
 /**
- * 
+ *
  */
 public class ReplicationConstants {
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/replication/ReplicationSchema.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/replication/ReplicationSchema.java b/core/src/main/java/org/apache/accumulo/core/replication/ReplicationSchema.java
index 491fda5..ed46130 100644
--- a/core/src/main/java/org/apache/accumulo/core/replication/ReplicationSchema.java
+++ b/core/src/main/java/org/apache/accumulo/core/replication/ReplicationSchema.java
@@ -19,6 +19,7 @@ package org.apache.accumulo.core.replication;
 import static java.nio.charset.StandardCharsets.UTF_8;
 
 import java.nio.charset.CharacterCodingException;
+
 import org.apache.accumulo.core.client.ScannerBase;
 import org.apache.accumulo.core.client.lexicoder.ULongLexicoder;
 import org.apache.accumulo.core.data.ArrayByteSequence;
@@ -34,7 +35,7 @@ import org.slf4j.LoggerFactory;
 import com.google.common.base.Preconditions;
 
 /**
- * 
+ *
  */
 public class ReplicationSchema {
   private static final Logger log = LoggerFactory.getLogger(ReplicationSchema.class);
@@ -90,7 +91,7 @@ public class ReplicationSchema {
 
     /**
      * Extract the table ID from the key (inefficiently if called repeatedly)
-     * 
+     *
      * @param k
      *          Key to extract from
      * @return The table ID
@@ -104,7 +105,7 @@ public class ReplicationSchema {
 
     /**
      * Extract the table ID from the key into the given {@link Text}
-     * 
+     *
      * @param k
      *          Key to extract from
      * @param buff
@@ -119,7 +120,7 @@ public class ReplicationSchema {
 
     /**
      * Extract the file name from the row suffix into the given {@link Text}
-     * 
+     *
      * @param k
      *          Key to extract from
      * @param buff
@@ -154,12 +155,12 @@ public class ReplicationSchema {
    */
   public static class OrderSection {
     public static final Text NAME = new Text("order");
-    public static final Text ROW_SEPARATOR = new Text(new byte[]{0});
+    public static final Text ROW_SEPARATOR = new Text(new byte[] {0});
     private static final ULongLexicoder longEncoder = new ULongLexicoder();
 
     /**
      * Extract the table ID from the given key (inefficiently if called repeatedly)
-     * 
+     *
      * @param k
      *          OrderSection Key
      * @return source table id
@@ -172,7 +173,7 @@ public class ReplicationSchema {
 
     /**
      * Extract the table ID from the given key
-     * 
+     *
      * @param k
      *          OrderSection key
      * @param buff
@@ -194,7 +195,7 @@ public class ReplicationSchema {
 
     /**
      * Creates the Mutation for the Order section for the given file and time
-     * 
+     *
      * @param file
      *          Filename
      * @param timeInMillis
@@ -224,7 +225,7 @@ public class ReplicationSchema {
 
     /**
      * Add a column update to the given mutation with the provided tableId and value
-     * 
+     *
      * @param m
      *          Mutation for OrderSection
      * @param tableId

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/core/src/main/java/org/apache/accumulo/core/replication/ReplicationTarget.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/replication/ReplicationTarget.java b/core/src/main/java/org/apache/accumulo/core/replication/ReplicationTarget.java
index 9c57ad9..eb7833e 100644
--- a/core/src/main/java/org/apache/accumulo/core/replication/ReplicationTarget.java
+++ b/core/src/main/java/org/apache/accumulo/core/replication/ReplicationTarget.java
@@ -21,6 +21,7 @@ import static java.nio.charset.StandardCharsets.UTF_8;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
+
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.Text;
@@ -36,7 +37,7 @@ public class ReplicationTarget implements Writable {
   private String remoteIdentifier;
   private String sourceTableId;
 
-  public ReplicationTarget() { }
+  public ReplicationTarget() {}
 
   public ReplicationTarget(String peerName, String remoteIdentifier, String sourceTableId) {
     this.peerName = peerName;
@@ -130,7 +131,9 @@ public class ReplicationTarget implements Writable {
 
   /**
    * Deserialize a ReplicationTarget
-   * @param t Serialized copy
+   *
+   * @param t
+   *          Serialized copy
    * @return the deserialized version
    */
   public static ReplicationTarget from(Text t) {
@@ -149,7 +152,9 @@ public class ReplicationTarget implements Writable {
 
   /**
    * Deserialize a ReplicationTarget
-   * @param s Serialized copy
+   *
+   * @param s
+   *          Serialized copy
    * @return the deserialized version
    */
   public static ReplicationTarget from(String s) {
@@ -167,8 +172,9 @@ public class ReplicationTarget implements Writable {
   }
 
   /**
-   * Convenience method to serialize a ReplicationTarget to {@link Text} using the {@link Writable} methods without caring about
-   * performance penalties due to excessive object creation
+   * Convenience method to serialize a ReplicationTarget to {@link Text} using the {@link Writable} methods without caring about performance penalties due to
+   * excessive object creation
+   *
    * @return The serialized representation of the object
    */
   public Text toText() {