You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ct...@apache.org on 2015/01/09 03:44:22 UTC

[18/66] [abbrv] accumulo git commit: ACCUMULO-3451 Format master branch (1.7.0-SNAPSHOT)

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java
index 103ba05..58fa9d3 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java
@@ -59,34 +59,34 @@ import org.apache.log4j.Logger;
 import com.google.common.base.Optional;
 
 public class FileUtil {
-  
+
   public static class FileInfo {
     Key firstKey = new Key();
     Key lastKey = new Key();
-    
+
     public FileInfo(Key firstKey, Key lastKey) {
       this.firstKey = firstKey;
       this.lastKey = lastKey;
     }
-    
+
     public Text getFirstRow() {
       return firstKey.getRow();
     }
-    
+
     public Text getLastRow() {
       return lastKey.getRow();
     }
   }
-  
+
   private static final Logger log = Logger.getLogger(FileUtil.class);
-  
+
   private static Path createTmpDir(AccumuloConfiguration acuConf, VolumeManager fs) throws IOException {
-    String accumuloDir = fs.choose(Optional.<String>absent(), ServerConstants.getBaseUris());
+    String accumuloDir = fs.choose(Optional.<String> absent(), ServerConstants.getBaseUris());
 
     Path result = null;
     while (result == null) {
       result = new Path(accumuloDir + Path.SEPARATOR + "tmp/idxReduce_" + String.format("%09d", new Random().nextInt(Integer.MAX_VALUE)));
-      
+
       try {
         fs.getFileStatus(result);
         result = null;
@@ -94,9 +94,9 @@ public class FileUtil {
       } catch (FileNotFoundException fne) {
         // found an unused temp directory
       }
-      
+
       fs.mkdirs(result);
-      
+
       // try to reserve the tmp dir
       // In some versions of hadoop, two clients concurrently trying to create the same directory might both return true
       // Creating a file is not subject to this, so create a special file to make sure we solely will use this directory
@@ -105,36 +105,36 @@ public class FileUtil {
     }
     return result;
   }
-  
+
   public static Collection<String> reduceFiles(AccumuloConfiguration acuConf, Configuration conf, VolumeManager fs, Text prevEndRow, Text endRow,
       Collection<String> mapFiles, int maxFiles, Path tmpDir, int pass) throws IOException {
     ArrayList<String> paths = new ArrayList<String>(mapFiles);
-    
+
     if (paths.size() <= maxFiles)
       return paths;
-    
+
     String newDir = String.format("%s/pass_%04d", tmpDir, pass);
-    
+
     int start = 0;
-    
+
     ArrayList<String> outFiles = new ArrayList<String>();
-    
+
     int count = 0;
-    
+
     while (start < paths.size()) {
       int end = Math.min(maxFiles + start, paths.size());
       List<String> inFiles = paths.subList(start, end);
-      
+
       start = end;
-      
+
       String newMapFile = String.format("%s/%04d.%s", newDir, count++, RFile.EXTENSION);
-      
+
       outFiles.add(newMapFile);
       FileSystem ns = fs.getVolumeByPath(new Path(newMapFile)).getFileSystem();
       FileSKVWriter writer = new RFileOperations().openWriter(newMapFile.toString(), ns, ns.getConf(), acuConf);
       writer.startDefaultLocalityGroup();
       List<SortedKeyValueIterator<Key,Value>> iters = new ArrayList<SortedKeyValueIterator<Key,Value>>(inFiles.size());
-      
+
       FileSKVIterator reader = null;
       try {
         for (String s : inFiles) {
@@ -142,21 +142,21 @@ public class FileUtil {
           reader = FileOperations.getInstance().openIndex(s, ns, ns.getConf(), acuConf);
           iters.add(reader);
         }
-        
+
         MultiIterator mmfi = new MultiIterator(iters, true);
-        
+
         while (mmfi.hasTop()) {
           Key key = mmfi.getTopKey();
-          
+
           boolean gtPrevEndRow = prevEndRow == null || key.compareRow(prevEndRow) > 0;
           boolean lteEndRow = endRow == null || key.compareRow(endRow) <= 0;
-          
+
           if (gtPrevEndRow && lteEndRow)
             writer.append(key, new Value(new byte[0]));
-          
+
           if (!lteEndRow)
             break;
-          
+
           mmfi.next();
         }
       } finally {
@@ -166,7 +166,7 @@ public class FileUtil {
         } catch (IOException e) {
           log.error(e, e);
         }
-        
+
         for (SortedKeyValueIterator<Key,Value> r : iters)
           try {
             if (r != null)
@@ -175,16 +175,16 @@ public class FileUtil {
             // continue closing
             log.error(e, e);
           }
-        
+
         try {
-            writer.close();
+          writer.close();
         } catch (IOException e) {
           log.error(e, e);
           throw e;
         }
       }
     }
-    
+
     return reduceFiles(acuConf, conf, fs, prevEndRow, endRow, outFiles, maxFiles, tmpDir, pass + 1);
   }
 
@@ -192,114 +192,114 @@ public class FileUtil {
       double minSplit) throws IOException {
     return findMidPoint(fs, acuConf, prevEndRow, endRow, mapFiles, minSplit, true);
   }
-  
+
   public static double estimatePercentageLTE(VolumeManager fs, AccumuloConfiguration acuconf, Text prevEndRow, Text endRow, Collection<String> mapFiles,
       Text splitRow) throws IOException {
-    
+
     Configuration conf = CachedConfiguration.getInstance();
-    
+
     Path tmpDir = null;
-    
+
     int maxToOpen = acuconf.getCount(Property.TSERV_TABLET_SPLIT_FINDMIDPOINT_MAXOPEN);
     ArrayList<FileSKVIterator> readers = new ArrayList<FileSKVIterator>(mapFiles.size());
-    
+
     try {
       if (mapFiles.size() > maxToOpen) {
         tmpDir = createTmpDir(acuconf, fs);
-        
+
         log.debug("Too many indexes (" + mapFiles.size() + ") to open at once for " + endRow + " " + prevEndRow + ", reducing in tmpDir = " + tmpDir);
-        
+
         long t1 = System.currentTimeMillis();
         mapFiles = reduceFiles(acuconf, conf, fs, prevEndRow, endRow, mapFiles, maxToOpen, tmpDir, 0);
         long t2 = System.currentTimeMillis();
-        
+
         log.debug("Finished reducing indexes for " + endRow + " " + prevEndRow + " in " + String.format("%6.2f secs", (t2 - t1) / 1000.0));
       }
-      
+
       if (prevEndRow == null)
         prevEndRow = new Text();
-      
+
       long numKeys = 0;
-      
+
       numKeys = countIndexEntries(acuconf, prevEndRow, endRow, mapFiles, true, conf, fs, readers);
-      
+
       if (numKeys == 0) {
         // not enough info in the index to answer the question, so instead of going to
         // the data just punt and return .5
         return .5;
       }
-      
+
       List<SortedKeyValueIterator<Key,Value>> iters = new ArrayList<SortedKeyValueIterator<Key,Value>>(readers);
       MultiIterator mmfi = new MultiIterator(iters, true);
-      
+
       // skip the prevendrow
       while (mmfi.hasTop() && mmfi.getTopKey().compareRow(prevEndRow) <= 0) {
         mmfi.next();
       }
-      
+
       int numLte = 0;
-      
+
       while (mmfi.hasTop() && mmfi.getTopKey().compareRow(splitRow) <= 0) {
         numLte++;
         mmfi.next();
       }
-      
+
       if (numLte > numKeys) {
         // something went wrong
         throw new RuntimeException("numLte > numKeys " + numLte + " " + numKeys + " " + prevEndRow + " " + endRow + " " + splitRow + " " + mapFiles);
       }
-      
+
       // do not want to return 0% or 100%, so add 1 and 2 below
       return (numLte + 1) / (double) (numKeys + 2);
-      
+
     } finally {
       cleanupIndexOp(acuconf, tmpDir, fs, readers);
     }
   }
-  
+
   /**
-   * 
+   *
    * @param mapFiles
    *          - list MapFiles to find the mid point key
-   * 
+   *
    *          ISSUES : This method used the index files to find the mid point. If the map files have different index intervals this method will not return an
    *          accurate mid point. Also, it would be tricky to use this method in conjunction with an in memory map because the indexing interval is unknown.
    */
   public static SortedMap<Double,Key> findMidPoint(VolumeManager fs, AccumuloConfiguration acuConf, Text prevEndRow, Text endRow, Collection<String> mapFiles,
       double minSplit, boolean useIndex) throws IOException {
     Configuration conf = CachedConfiguration.getInstance();
-    
+
     Collection<String> origMapFiles = mapFiles;
-    
+
     Path tmpDir = null;
-    
+
     int maxToOpen = acuConf.getCount(Property.TSERV_TABLET_SPLIT_FINDMIDPOINT_MAXOPEN);
     ArrayList<FileSKVIterator> readers = new ArrayList<FileSKVIterator>(mapFiles.size());
-    
+
     try {
       if (mapFiles.size() > maxToOpen) {
         if (!useIndex)
           throw new IOException("Cannot find mid point using data files, too many " + mapFiles.size());
         tmpDir = createTmpDir(acuConf, fs);
-        
+
         log.debug("Too many indexes (" + mapFiles.size() + ") to open at once for " + endRow + " " + prevEndRow + ", reducing in tmpDir = " + tmpDir);
-        
+
         long t1 = System.currentTimeMillis();
         mapFiles = reduceFiles(acuConf, conf, fs, prevEndRow, endRow, mapFiles, maxToOpen, tmpDir, 0);
         long t2 = System.currentTimeMillis();
-        
+
         log.debug("Finished reducing indexes for " + endRow + " " + prevEndRow + " in " + String.format("%6.2f secs", (t2 - t1) / 1000.0));
       }
-      
+
       if (prevEndRow == null)
         prevEndRow = new Text();
-      
+
       long t1 = System.currentTimeMillis();
-      
+
       long numKeys = 0;
-      
+
       numKeys = countIndexEntries(acuConf, prevEndRow, endRow, mapFiles, tmpDir == null ? useIndex : false, conf, fs, readers);
-      
+
       if (numKeys == 0) {
         if (useIndex) {
           log.warn("Failed to find mid point using indexes, falling back to data files which is slower. No entries between " + prevEndRow + " and " + endRow
@@ -309,48 +309,48 @@ public class FileUtil {
         }
         throw new IOException("Failed to find mid point, no entries between " + prevEndRow + " and " + endRow + " for " + mapFiles);
       }
-      
+
       List<SortedKeyValueIterator<Key,Value>> iters = new ArrayList<SortedKeyValueIterator<Key,Value>>(readers);
       MultiIterator mmfi = new MultiIterator(iters, true);
-      
+
       // skip the prevendrow
       while (mmfi.hasTop() && mmfi.getTopKey().compareRow(prevEndRow) <= 0)
         mmfi.next();
-      
+
       // read half of the keys in the index
       TreeMap<Double,Key> ret = new TreeMap<Double,Key>();
       Key lastKey = null;
       long keysRead = 0;
-      
+
       Key keyBeforeMidPoint = null;
       long keyBeforeMidPointPosition = 0;
-      
+
       while (keysRead < numKeys / 2) {
         if (lastKey != null && !lastKey.equals(mmfi.getTopKey(), PartialKey.ROW) && (keysRead - 1) / (double) numKeys >= minSplit) {
           keyBeforeMidPoint = new Key(lastKey);
           keyBeforeMidPointPosition = keysRead - 1;
         }
-        
+
         if (lastKey == null)
           lastKey = new Key();
-        
+
         lastKey.set(mmfi.getTopKey());
-        
+
         keysRead++;
-        
+
         // consume minimum
         mmfi.next();
       }
-      
+
       if (keyBeforeMidPoint != null)
         ret.put(keyBeforeMidPointPosition / (double) numKeys, keyBeforeMidPoint);
-      
+
       long t2 = System.currentTimeMillis();
-      
+
       log.debug(String.format("Found midPoint from indexes in %6.2f secs.%n", ((t2 - t1) / 1000.0)));
-      
+
       ret.put(.5, mmfi.getTopKey());
-      
+
       // sanity check
       for (Key key : ret.values()) {
         boolean inRange = (key.compareRow(prevEndRow) > 0 && (endRow == null || key.compareRow(endRow) < 1));
@@ -358,7 +358,7 @@ public class FileUtil {
           throw new IOException("Found mid point is not in range " + key + " " + prevEndRow + " " + endRow + " " + mapFiles);
         }
       }
-      
+
       return ret;
     } finally {
       cleanupIndexOp(acuConf, tmpDir, fs, readers);
@@ -390,9 +390,9 @@ public class FileUtil {
 
   private static long countIndexEntries(AccumuloConfiguration acuConf, Text prevEndRow, Text endRow, Collection<String> mapFiles, boolean useIndex,
       Configuration conf, VolumeManager fs, ArrayList<FileSKVIterator> readers) throws IOException {
-    
+
     long numKeys = 0;
-    
+
     // count the total number of index entries
     for (String ref : mapFiles) {
       FileSKVIterator reader = null;
@@ -402,16 +402,16 @@ public class FileUtil {
         if (useIndex)
           reader = FileOperations.getInstance().openIndex(path.toString(), ns, ns.getConf(), acuConf);
         else
-          reader = FileOperations.getInstance().openReader(path.toString(), new Range(prevEndRow, false, null, true), LocalityGroupUtil.EMPTY_CF_SET, false, ns, ns.getConf(),
-              acuConf);
-        
+          reader = FileOperations.getInstance().openReader(path.toString(), new Range(prevEndRow, false, null, true), LocalityGroupUtil.EMPTY_CF_SET, false,
+              ns, ns.getConf(), acuConf);
+
         while (reader.hasTop()) {
           Key key = reader.getTopKey();
           if (endRow != null && key.compareRow(endRow) > 0)
             break;
           else if (prevEndRow == null || key.compareRow(prevEndRow) > 0)
             numKeys++;
-          
+
           reader.next();
         }
       } finally {
@@ -422,35 +422,35 @@ public class FileUtil {
           log.error(e, e);
         }
       }
-      
+
       if (useIndex)
         readers.add(FileOperations.getInstance().openIndex(path.toString(), ns, ns.getConf(), acuConf));
       else
-        readers.add(FileOperations.getInstance().openReader(path.toString(), new Range(prevEndRow, false, null, true), LocalityGroupUtil.EMPTY_CF_SET, false, ns, ns.getConf(),
-            acuConf));
-      
+        readers.add(FileOperations.getInstance().openReader(path.toString(), new Range(prevEndRow, false, null, true), LocalityGroupUtil.EMPTY_CF_SET, false,
+            ns, ns.getConf(), acuConf));
+
     }
     return numKeys;
   }
-  
+
   public static Map<FileRef,FileInfo> tryToGetFirstAndLastRows(VolumeManager fs, AccumuloConfiguration acuConf, Set<FileRef> mapfiles) {
-    
+
     HashMap<FileRef,FileInfo> mapFilesInfo = new HashMap<FileRef,FileInfo>();
-    
+
     long t1 = System.currentTimeMillis();
-    
+
     for (FileRef mapfile : mapfiles) {
-      
+
       FileSKVIterator reader = null;
       FileSystem ns = fs.getVolumeByPath(mapfile.path()).getFileSystem();
       try {
         reader = FileOperations.getInstance().openReader(mapfile.toString(), false, ns, ns.getConf(), acuConf);
-        
+
         Key firstKey = reader.getFirstKey();
         if (firstKey != null) {
           mapFilesInfo.put(mapfile, new FileInfo(firstKey, reader.getLastKey()));
         }
-        
+
       } catch (IOException ioe) {
         log.warn("Failed to read map file to determine first and last key : " + mapfile, ioe);
       } finally {
@@ -462,31 +462,31 @@ public class FileUtil {
           }
         }
       }
-      
+
     }
-    
+
     long t2 = System.currentTimeMillis();
-    
+
     log.debug(String.format("Found first and last keys for %d map files in %6.2f secs", mapfiles.size(), (t2 - t1) / 1000.0));
-    
+
     return mapFilesInfo;
   }
-  
+
   public static WritableComparable<Key> findLastKey(VolumeManager fs, AccumuloConfiguration acuConf, Collection<FileRef> mapFiles) throws IOException {
     Key lastKey = null;
-    
+
     for (FileRef ref : mapFiles) {
       Path path = ref.path();
       FileSystem ns = fs.getVolumeByPath(path).getFileSystem();
       FileSKVIterator reader = FileOperations.getInstance().openReader(path.toString(), true, ns, ns.getConf(), acuConf);
-      
+
       try {
         if (!reader.hasTop())
           // file is empty, so there is no last key
           continue;
-        
+
         Key key = reader.getLastKey();
-        
+
         if (lastKey == null || key.compareTo(lastKey) > 0)
           lastKey = key;
       } finally {
@@ -498,41 +498,41 @@ public class FileUtil {
         }
       }
     }
-    
+
     return lastKey;
-    
+
   }
-  
+
   private static class MLong {
     public MLong(long i) {
       l = i;
     }
-    
+
     long l;
   }
-  
+
   public static Map<KeyExtent,Long> estimateSizes(AccumuloConfiguration acuConf, Path mapFile, long fileSize, List<KeyExtent> extents, Configuration conf,
       VolumeManager fs) throws IOException {
-    
+
     long totalIndexEntries = 0;
     Map<KeyExtent,MLong> counts = new TreeMap<KeyExtent,MLong>();
     for (KeyExtent keyExtent : extents)
       counts.put(keyExtent, new MLong(0));
-    
+
     Text row = new Text();
     FileSystem ns = fs.getVolumeByPath(mapFile).getFileSystem();
     FileSKVIterator index = FileOperations.getInstance().openIndex(mapFile.toString(), ns, ns.getConf(), acuConf);
-    
+
     try {
       while (index.hasTop()) {
         Key key = index.getTopKey();
         totalIndexEntries++;
         key.getRow(row);
-        
+
         for (Entry<KeyExtent,MLong> entry : counts.entrySet())
           if (entry.getKey().contains(row))
             entry.getValue().l++;
-        
+
         index.next();
       }
     } finally {
@@ -544,7 +544,7 @@ public class FileUtil {
         log.error(e, e);
       }
     }
-    
+
     Map<KeyExtent,Long> results = new TreeMap<KeyExtent,Long>();
     for (KeyExtent keyExtent : extents) {
       double numEntries = counts.get(keyExtent).l;
@@ -555,7 +555,7 @@ public class FileUtil {
     }
     return results;
   }
-  
+
   public static Collection<String> toPathStrings(Collection<FileRef> refs) {
     ArrayList<String> ret = new ArrayList<String>();
     for (FileRef fileRef : refs) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/main/java/org/apache/accumulo/server/util/Halt.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/Halt.java b/server/base/src/main/java/org/apache/accumulo/server/util/Halt.java
index 82eb639..aa45bff 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/Halt.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/Halt.java
@@ -22,7 +22,7 @@ import org.apache.log4j.Logger;
 
 public class Halt {
   static private final Logger log = Logger.getLogger(Halt.class);
-  
+
   public static void halt(final String msg) {
     halt(0, new Runnable() {
       public void run() {
@@ -30,7 +30,7 @@ public class Halt {
       }
     });
   }
-  
+
   public static void halt(final String msg, int status) {
     halt(status, new Runnable() {
       public void run() {
@@ -38,7 +38,7 @@ public class Halt {
       }
     });
   }
-  
+
   public static void halt(final int status, Runnable runnable) {
     try {
       // give ourselves a little time to try and do something
@@ -48,7 +48,7 @@ public class Halt {
           Runtime.getRuntime().halt(status);
         }
       }.start();
-      
+
       if (runnable != null)
         runnable.run();
       Runtime.getRuntime().halt(status);
@@ -57,5 +57,5 @@ public class Halt {
       Runtime.getRuntime().halt(-1);
     }
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/main/java/org/apache/accumulo/server/util/ListInstances.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/ListInstances.java b/server/base/src/main/java/org/apache/accumulo/server/util/ListInstances.java
index 8550d0b..c8b8dff 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/ListInstances.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/ListInstances.java
@@ -38,39 +38,40 @@ import org.apache.log4j.Logger;
 import com.beust.jcommander.Parameter;
 
 public class ListInstances {
-  
+
   private static final Logger log = Logger.getLogger(ListInstances.class);
-  
+
   private static final int NAME_WIDTH = 20;
   private static final int UUID_WIDTH = 37;
   private static final int MASTER_WIDTH = 30;
-  
+
   private static final int ZOOKEEPER_TIMER_MILLIS = 30 * 1000;
-  
+
   static class Opts extends Help {
-    @Parameter(names="--print-errors", description="display errors while listing instances")
+    @Parameter(names = "--print-errors", description = "display errors while listing instances")
     boolean printErrors = false;
-    @Parameter(names="--print-all", description="print information for all instances, not just those with names")
+    @Parameter(names = "--print-all", description = "print information for all instances, not just those with names")
     boolean printAll = false;
-    @Parameter(names={"-z", "--zookeepers"}, description="the zookeepers to contact")
+    @Parameter(names = {"-z", "--zookeepers"}, description = "the zookeepers to contact")
     String keepers = null;
   }
+
   static Opts opts = new Opts();
   static int errors = 0;
-  
+
   public static void main(String[] args) {
     opts.parseArgs(ListInstances.class.getName(), args);
-    
+
     if (opts.keepers == null) {
       opts.keepers = SiteConfiguration.getInstance().get(Property.INSTANCE_ZK_HOST);
     }
-    
+
     String keepers = opts.keepers;
     boolean printAll = opts.printAll;
     boolean printErrors = opts.printErrors;
-    
+
     listInstances(keepers, printAll, printErrors);
-    
+
   }
 
   static synchronized void listInstances(String keepers, boolean printAll, boolean printErrors) {
@@ -81,17 +82,17 @@ public class ListInstances {
     ZooCache cache = new ZooCache(keepers, ZOOKEEPER_TIMER_MILLIS);
 
     TreeMap<String,UUID> instanceNames = getInstanceNames(rdr, printErrors);
-    
+
     System.out.println();
     printHeader();
-    
+
     for (Entry<String,UUID> entry : instanceNames.entrySet()) {
       printInstanceInfo(cache, entry.getKey(), entry.getValue(), printErrors);
     }
-    
+
     TreeSet<UUID> instancedIds = getInstanceIDs(rdr, printErrors);
     instancedIds.removeAll(instanceNames.values());
-    
+
     if (printAll) {
       for (UUID uuid : instancedIds) {
         printInstanceInfo(cache, null, uuid, printErrors);
@@ -102,57 +103,57 @@ public class ListInstances {
     } else {
       System.out.println();
     }
-    
+
     if (!printErrors && errors > 0) {
       System.err.println("WARN : There were " + errors + " errors, run with --print-errors to see more info");
     }
   }
-  
+
   private static class CharFiller implements Formattable {
-    
+
     char c;
-    
+
     CharFiller(char c) {
       this.c = c;
     }
-    
+
     @Override
     public void formatTo(Formatter formatter, int flags, int width, int precision) {
-      
+
       StringBuilder sb = new StringBuilder();
       for (int i = 0; i < width; i++)
         sb.append(c);
       formatter.format(sb.toString());
     }
-    
+
   }
-  
+
   private static void printHeader() {
     System.out.printf(" %-" + NAME_WIDTH + "s| %-" + UUID_WIDTH + "s| %-" + MASTER_WIDTH + "s%n", "Instance Name", "Instance ID", "Master");
     System.out.printf("%" + (NAME_WIDTH + 1) + "s+%" + (UUID_WIDTH + 1) + "s+%" + (MASTER_WIDTH + 1) + "s%n", new CharFiller('-'), new CharFiller('-'),
         new CharFiller('-'));
-    
+
   }
-  
+
   private static void printInstanceInfo(ZooCache cache, String instanceName, UUID iid, boolean printErrors) {
     String master = getMaster(cache, iid, printErrors);
     if (instanceName == null) {
       instanceName = "";
     }
-    
+
     if (master == null) {
       master = "";
     }
-    
+
     System.out.printf("%" + NAME_WIDTH + "s |%" + UUID_WIDTH + "s |%" + MASTER_WIDTH + "s%n", "\"" + instanceName + "\"", iid, master);
   }
-  
+
   private static String getMaster(ZooCache cache, UUID iid, boolean printErrors) {
-    
+
     if (iid == null) {
       return null;
     }
-    
+
     try {
       String masterLocPath = Constants.ZROOT + "/" + iid + Constants.ZMASTER_LOCK;
       byte[] master = ZooLock.getLockData(cache, masterLocPath, null);
@@ -165,22 +166,22 @@ public class ListInstances {
       return null;
     }
   }
-  
+
   private static TreeMap<String,UUID> getInstanceNames(ZooReader zk, boolean printErrors) {
-    
+
     String instancesPath = Constants.ZROOT + Constants.ZINSTANCES;
-    
+
     TreeMap<String,UUID> tm = new TreeMap<String,UUID>();
-    
+
     List<String> names;
-    
+
     try {
       names = zk.getChildren(instancesPath);
     } catch (Exception e) {
       handleException(e, printErrors);
       return tm;
     }
-    
+
     for (String name : names) {
       String instanceNamePath = Constants.ZROOT + Constants.ZINSTANCES + "/" + name;
       try {
@@ -191,16 +192,16 @@ public class ListInstances {
         tm.put(name, null);
       }
     }
-    
+
     return tm;
   }
-  
+
   private static TreeSet<UUID> getInstanceIDs(ZooReader zk, boolean printErrors) {
     TreeSet<UUID> ts = new TreeSet<UUID>();
-    
+
     try {
       List<String> children = zk.getChildren(Constants.ZROOT);
-      
+
       for (String iid : children) {
         if (iid.equals("instances"))
           continue;
@@ -213,15 +214,15 @@ public class ListInstances {
     } catch (Exception e) {
       handleException(e, printErrors);
     }
-    
+
     return ts;
   }
-  
+
   private static void handleException(Exception e, boolean printErrors) {
     if (printErrors) {
       log.error(e);
     }
-    
+
     errors++;
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/main/java/org/apache/accumulo/server/util/ListVolumesUsed.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/ListVolumesUsed.java b/server/base/src/main/java/org/apache/accumulo/server/util/ListVolumesUsed.java
index b288aac..e90d1dd 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/ListVolumesUsed.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/ListVolumesUsed.java
@@ -37,7 +37,7 @@ import org.apache.accumulo.server.fs.VolumeManager.FileType;
 import org.apache.hadoop.fs.Path;
 
 /**
- * 
+ *
  */
 public class ListVolumesUsed {
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/main/java/org/apache/accumulo/server/util/LocalityCheck.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/LocalityCheck.java b/server/base/src/main/java/org/apache/accumulo/server/util/LocalityCheck.java
index 35bb8f5..5d49fa7 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/LocalityCheck.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/LocalityCheck.java
@@ -41,22 +41,22 @@ import org.apache.hadoop.fs.Path;
 import com.google.common.net.HostAndPort;
 
 public class LocalityCheck {
-  
+
   public int run(String[] args) throws Exception {
     ClientOpts opts = new ClientOpts();
     opts.parseArgs(LocalityCheck.class.getName(), args);
-    
+
     VolumeManager fs = VolumeManagerImpl.get();
     Connector connector = opts.getConnector();
     Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
     scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
     scanner.setRange(MetadataSchema.TabletsSection.getRange());
-    
+
     Map<String,Long> totalBlocks = new HashMap<String,Long>();
     Map<String,Long> localBlocks = new HashMap<String,Long>();
     ArrayList<String> files = new ArrayList<String>();
-    
+
     for (Entry<Key,Value> entry : scanner) {
       Key key = entry.getKey();
       if (key.compareColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME) == 0) {
@@ -66,7 +66,7 @@ public class LocalityCheck {
         addBlocks(fs, host, files, totalBlocks, localBlocks);
         files.clear();
       } else if (key.compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
-        
+
         files.add(fs.getFullPath(key).toString());
       }
     }
@@ -78,7 +78,7 @@ public class LocalityCheck {
     }
     return 0;
   }
-  
+
   private void addBlocks(VolumeManager fs, String host, ArrayList<String> files, Map<String,Long> totalBlocks, Map<String,Long> localBlocks) throws Exception {
     long allBlocks = 0;
     long matchingBlocks = 0;
@@ -105,7 +105,7 @@ public class LocalityCheck {
     totalBlocks.put(host, allBlocks + totalBlocks.get(host));
     localBlocks.put(host, matchingBlocks + localBlocks.get(host));
   }
-  
+
   public static void main(String[] args) throws Exception {
     LocalityCheck check = new LocalityCheck();
     System.exit(check.run(args));

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/main/java/org/apache/accumulo/server/util/LoginProperties.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/LoginProperties.java b/server/base/src/main/java/org/apache/accumulo/server/util/LoginProperties.java
index 241eef3..be5a7c8 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/LoginProperties.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/LoginProperties.java
@@ -30,29 +30,29 @@ import org.apache.accumulo.server.security.handler.Authenticator;
 import org.apache.accumulo.start.classloader.vfs.AccumuloVFSClassLoader;
 
 /**
- * 
+ *
  */
 public class LoginProperties {
-  
+
   public static void main(String[] args) throws Exception {
     AccumuloConfiguration config = new ServerConfigurationFactory(HdfsZooInstance.getInstance()).getConfiguration();
     Authenticator authenticator = AccumuloVFSClassLoader.getClassLoader().loadClass(config.get(Property.INSTANCE_SECURITY_AUTHENTICATOR))
         .asSubclass(Authenticator.class).newInstance();
-    
+
     List<Set<TokenProperty>> tokenProps = new ArrayList<Set<TokenProperty>>();
-    
+
     for (Class<? extends AuthenticationToken> tokenType : authenticator.getSupportedTokenTypes()) {
       tokenProps.add(tokenType.newInstance().getProperties());
     }
-    
+
     System.out.println("Supported token types for " + authenticator.getClass().getName() + " are : ");
     for (Class<? extends AuthenticationToken> tokenType : authenticator.getSupportedTokenTypes()) {
       System.out.println("\t" + tokenType.getName() + ", which accepts the following properties : ");
-      
+
       for (TokenProperty tokenProperty : tokenType.newInstance().getProperties()) {
         System.out.println("\t\t" + tokenProperty);
       }
-      
+
       System.out.println();
     }
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java
index 88316c6..80a6734 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java
@@ -60,94 +60,94 @@ import org.apache.log4j.Logger;
 import org.apache.zookeeper.KeeperException;
 
 /**
- * 
+ *
  */
 public class MasterMetadataUtil {
-  
+
   private static final Logger log = Logger.getLogger(MasterMetadataUtil.class);
-  
-  public static void addNewTablet(ClientContext context, KeyExtent extent, String path, TServerInstance location,
-      Map<FileRef,DataFileValue> datafileSizes, Map<FileRef,Long> bulkLoadedFiles, String time, long lastFlushID, long lastCompactID, ZooLock zooLock) {
+
+  public static void addNewTablet(ClientContext context, KeyExtent extent, String path, TServerInstance location, Map<FileRef,DataFileValue> datafileSizes,
+      Map<FileRef,Long> bulkLoadedFiles, String time, long lastFlushID, long lastCompactID, ZooLock zooLock) {
     Mutation m = extent.getPrevRowUpdateMutation();
-    
+
     TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(path.getBytes(UTF_8)));
     TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value(time.getBytes(UTF_8)));
     if (lastFlushID > 0)
       TabletsSection.ServerColumnFamily.FLUSH_COLUMN.put(m, new Value(("" + lastFlushID).getBytes()));
     if (lastCompactID > 0)
       TabletsSection.ServerColumnFamily.COMPACT_COLUMN.put(m, new Value(("" + lastCompactID).getBytes()));
-    
+
     if (location != null) {
       location.putLocation(m);
       location.clearFutureLocation(m);
     }
-    
+
     for (Entry<FileRef,DataFileValue> entry : datafileSizes.entrySet()) {
       m.put(DataFileColumnFamily.NAME, entry.getKey().meta(), new Value(entry.getValue().encode()));
     }
-    
+
     for (Entry<FileRef,Long> entry : bulkLoadedFiles.entrySet()) {
       byte[] tidBytes = Long.toString(entry.getValue()).getBytes();
       m.put(TabletsSection.BulkFileColumnFamily.NAME, entry.getKey().meta(), new Value(tidBytes));
     }
-    
+
     MetadataTableUtil.update(context, zooLock, m, extent);
   }
-  
+
   public static KeyExtent fixSplit(ClientContext context, Text metadataEntry, SortedMap<ColumnFQ,Value> columns, TServerInstance tserver, ZooLock lock)
       throws AccumuloException, IOException {
     log.info("Incomplete split " + metadataEntry + " attempting to fix");
-    
+
     Value oper = columns.get(TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN);
-    
+
     if (columns.get(TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN) == null) {
       throw new IllegalArgumentException("Metadata entry does not have split ratio (" + metadataEntry + ")");
     }
-    
+
     double splitRatio = Double.parseDouble(new String(columns.get(TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN).get(), UTF_8));
-    
+
     Value prevEndRowIBW = columns.get(TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN);
-    
+
     if (prevEndRowIBW == null) {
       throw new IllegalArgumentException("Metadata entry does not have prev row (" + metadataEntry + ")");
     }
-    
+
     Value time = columns.get(TabletsSection.ServerColumnFamily.TIME_COLUMN);
-    
+
     if (time == null) {
       throw new IllegalArgumentException("Metadata entry does not have time (" + metadataEntry + ")");
     }
-    
+
     Value flushID = columns.get(TabletsSection.ServerColumnFamily.FLUSH_COLUMN);
     long initFlushID = -1;
     if (flushID != null)
       initFlushID = Long.parseLong(flushID.toString());
-    
+
     Value compactID = columns.get(TabletsSection.ServerColumnFamily.COMPACT_COLUMN);
     long initCompactID = -1;
     if (compactID != null)
       initCompactID = Long.parseLong(compactID.toString());
-    
+
     Text metadataPrevEndRow = KeyExtent.decodePrevEndRow(prevEndRowIBW);
-    
+
     Text table = (new KeyExtent(metadataEntry, (Text) null)).getTableId();
-    
+
     return fixSplit(context, table, metadataEntry, metadataPrevEndRow, oper, splitRatio, tserver, time.toString(), initFlushID, initCompactID, lock);
   }
-  
+
   private static KeyExtent fixSplit(ClientContext context, Text table, Text metadataEntry, Text metadataPrevEndRow, Value oper, double splitRatio,
       TServerInstance tserver, String time, long initFlushID, long initCompactID, ZooLock lock) throws AccumuloException, IOException {
     if (metadataPrevEndRow == null)
       // something is wrong, this should not happen... if a tablet is split, it will always have a
       // prev end row....
       throw new AccumuloException("Split tablet does not have prev end row, something is amiss, extent = " + metadataEntry);
-    
+
     // check to see if prev tablet exist in metadata tablet
     Key prevRowKey = new Key(new Text(KeyExtent.getMetadataEntry(table, metadataPrevEndRow)));
-    
+
     ScannerImpl scanner2 = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY);
     scanner2.setRange(new Range(prevRowKey, prevRowKey.followingKey(PartialKey.ROW)));
-    
+
     VolumeManager fs = VolumeManagerImpl.get();
     if (!scanner2.iterator().hasNext()) {
       log.info("Rolling back incomplete split " + metadataEntry + " " + metadataPrevEndRow);
@@ -155,34 +155,34 @@ public class MasterMetadataUtil {
       return new KeyExtent(metadataEntry, KeyExtent.decodePrevEndRow(oper));
     } else {
       log.info("Finishing incomplete split " + metadataEntry + " " + metadataPrevEndRow);
-      
+
       List<FileRef> highDatafilesToRemove = new ArrayList<FileRef>();
-      
+
       Scanner scanner3 = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY);
       Key rowKey = new Key(metadataEntry);
-      
+
       SortedMap<FileRef,DataFileValue> origDatafileSizes = new TreeMap<FileRef,DataFileValue>();
       SortedMap<FileRef,DataFileValue> highDatafileSizes = new TreeMap<FileRef,DataFileValue>();
       SortedMap<FileRef,DataFileValue> lowDatafileSizes = new TreeMap<FileRef,DataFileValue>();
       scanner3.fetchColumnFamily(DataFileColumnFamily.NAME);
       scanner3.setRange(new Range(rowKey, rowKey.followingKey(PartialKey.ROW)));
-      
+
       for (Entry<Key,Value> entry : scanner3) {
         if (entry.getKey().compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
           origDatafileSizes.put(new FileRef(fs, entry.getKey()), new DataFileValue(entry.getValue().get()));
         }
       }
-      
+
       MetadataTableUtil.splitDatafiles(table, metadataPrevEndRow, splitRatio, new HashMap<FileRef,FileUtil.FileInfo>(), origDatafileSizes, lowDatafileSizes,
           highDatafileSizes, highDatafilesToRemove);
-      
+
       MetadataTableUtil.finishSplit(metadataEntry, highDatafileSizes, highDatafilesToRemove, context, lock);
-      
+
       return new KeyExtent(metadataEntry, KeyExtent.encodePrevEndRow(metadataPrevEndRow));
     }
-    
+
   }
-  
+
   private static TServerInstance getTServerInstance(String address, ZooLock zooLock) {
     while (true) {
       try {
@@ -195,51 +195,51 @@ public class MasterMetadataUtil {
       UtilWaitThread.sleep(1000);
     }
   }
-  
+
   public static void replaceDatafiles(ClientContext context, KeyExtent extent, Set<FileRef> datafilesToDelete, Set<FileRef> scanFiles, FileRef path,
       Long compactionId, DataFileValue size, String address, TServerInstance lastLocation, ZooLock zooLock) throws IOException {
     replaceDatafiles(context, extent, datafilesToDelete, scanFiles, path, compactionId, size, address, lastLocation, zooLock, true);
   }
-  
+
   public static void replaceDatafiles(ClientContext context, KeyExtent extent, Set<FileRef> datafilesToDelete, Set<FileRef> scanFiles, FileRef path,
       Long compactionId, DataFileValue size, String address, TServerInstance lastLocation, ZooLock zooLock, boolean insertDeleteFlags) throws IOException {
-    
+
     if (insertDeleteFlags) {
       // add delete flags for those paths before the data file reference is removed
       MetadataTableUtil.addDeleteEntries(extent, datafilesToDelete, context);
     }
-    
+
     // replace data file references to old mapfiles with the new mapfiles
     Mutation m = new Mutation(extent.getMetadataEntry());
-    
+
     for (FileRef pathToRemove : datafilesToDelete)
       m.putDelete(DataFileColumnFamily.NAME, pathToRemove.meta());
-    
+
     for (FileRef scanFile : scanFiles)
       m.put(ScanFileColumnFamily.NAME, scanFile.meta(), new Value(new byte[0]));
-    
+
     if (size.getNumEntries() > 0)
       m.put(DataFileColumnFamily.NAME, path.meta(), new Value(size.encode()));
-    
+
     if (compactionId != null)
       TabletsSection.ServerColumnFamily.COMPACT_COLUMN.put(m, new Value(("" + compactionId).getBytes()));
-    
+
     TServerInstance self = getTServerInstance(address, zooLock);
     self.putLastLocation(m);
-    
+
     // remove the old location
     if (lastLocation != null && !lastLocation.equals(self))
       lastLocation.clearLastLocation(m);
-    
+
     MetadataTableUtil.update(context, zooLock, m, extent);
   }
-  
+
   /**
    * new data file update function adds one data file to a tablet's list
-   * 
+   *
    * @param path
    *          should be relative to the table directory
-   * 
+   *
    */
   public static void updateTabletDataFile(ClientContext context, KeyExtent extent, FileRef path, FileRef mergeFile, DataFileValue dfv, String time,
       Set<FileRef> filesInUseByScans, String address, ZooLock zooLock, Set<String> unusedWalLogs, TServerInstance lastLocation, long flushId) {
@@ -292,6 +292,7 @@ public class MasterMetadataUtil {
 
   /**
    * Create an update that updates a tablet
+   *
    * @return A Mutation to update a tablet from the given information
    */
   protected static Mutation getUpdateForTabletDataFile(KeyExtent extent, FileRef path, FileRef mergeFile, DataFileValue dfv, String time,

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
index 524abb0..ed7626e 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
@@ -182,8 +182,7 @@ public class MetadataTableUtil {
     }
   }
 
-  public static void updateTabletDataFile(long tid, KeyExtent extent, Map<FileRef,DataFileValue> estSizes, String time, ClientContext context,
-      ZooLock zooLock) {
+  public static void updateTabletDataFile(long tid, KeyExtent extent, Map<FileRef,DataFileValue> estSizes, String time, ClientContext context, ZooLock zooLock) {
     Mutation m = new Mutation(extent.getMetadataEntry());
     byte[] tidBytes = Long.toString(tid).getBytes(UTF_8);
 
@@ -506,8 +505,8 @@ public class MetadataTableUtil {
     }
   }
 
-  public static Pair<List<LogEntry>,SortedMap<FileRef,DataFileValue>> getFileAndLogEntries(ClientContext context, KeyExtent extent)
-      throws KeeperException, InterruptedException, IOException {
+  public static Pair<List<LogEntry>,SortedMap<FileRef,DataFileValue>> getFileAndLogEntries(ClientContext context, KeyExtent extent) throws KeeperException,
+      InterruptedException, IOException {
     ArrayList<LogEntry> result = new ArrayList<LogEntry>();
     TreeMap<FileRef,DataFileValue> sizes = new TreeMap<FileRef,DataFileValue>();
 
@@ -891,8 +890,8 @@ public class MetadataTableUtil {
       Key k = entry.getKey();
       Mutation m = new Mutation(k.getRow());
       m.putDelete(k.getColumnFamily(), k.getColumnQualifier());
-      String dir = volumeManager.choose(Optional.of(tableId), ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + tableId + Path.SEPARATOR
-          + new String(FastFormat.toZeroPaddedString(dirCount++, 8, 16, Constants.CLONE_PREFIX_BYTES));
+      String dir = volumeManager.choose(Optional.of(tableId), ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + tableId
+          + Path.SEPARATOR + new String(FastFormat.toZeroPaddedString(dirCount++, 8, 16, Constants.CLONE_PREFIX_BYTES));
       TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(dir.getBytes(UTF_8)));
 
       bw.addMutation(m);
@@ -983,8 +982,8 @@ public class MetadataTableUtil {
    * During an upgrade from 1.6 to 1.7, we need to add the replication table
    */
   public static void createReplicationTable(ClientContext context) throws IOException {
-    String dir = VolumeManagerImpl.get().choose(Optional.of(ReplicationTable.ID), ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + ReplicationTable.ID
-        + Constants.DEFAULT_TABLET_LOCATION;
+    String dir = VolumeManagerImpl.get().choose(Optional.of(ReplicationTable.ID), ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR
+        + ReplicationTable.ID + Constants.DEFAULT_TABLET_LOCATION;
 
     Mutation m = new Mutation(new Text(KeyExtent.getMetadataEntry(new Text(ReplicationTable.ID), null)));
     m.put(DIRECTORY_COLUMN.getColumnFamily(), DIRECTORY_COLUMN.getColumnQualifier(), 0, new Value(dir.getBytes(UTF_8)));

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/main/java/org/apache/accumulo/server/util/RandomWriter.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/RandomWriter.java b/server/base/src/main/java/org/apache/accumulo/server/util/RandomWriter.java
index 0666297..b3c198e 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/RandomWriter.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/RandomWriter.java
@@ -31,26 +31,26 @@ import org.apache.log4j.Logger;
 import com.beust.jcommander.Parameter;
 
 public class RandomWriter {
-  
+
   private static String table_name = "test_write_table";
   private static int num_columns_per_row = 1;
   private static int num_payload_bytes = 1024;
   private static final Logger log = Logger.getLogger(RandomWriter.class);
-  
+
   public static class RandomMutationGenerator implements Iterable<Mutation>, Iterator<Mutation> {
     private long max_mutations;
     private int mutations_so_far = 0;
     private Random r = new Random();
     private static final Logger log = Logger.getLogger(RandomMutationGenerator.class);
-    
+
     public RandomMutationGenerator(long num_mutations) {
       max_mutations = num_mutations;
     }
-    
+
     public boolean hasNext() {
       return mutations_so_far < max_mutations;
     }
-    
+
     public Mutation next() {
       Text row_value = new Text(Long.toString(((r.nextLong() & 0x7fffffffffffffffl) / 177) % 100000000000l));
       Mutation m = new Mutation(row_value);
@@ -66,27 +66,31 @@ public class RandomWriter {
       }
       return m;
     }
-    
+
     public void remove() {
       mutations_so_far++;
     }
-    
+
     @Override
     public Iterator<Mutation> iterator() {
       return this;
     }
   }
+
   static class Opts extends ClientOnDefaultTable {
-    @Parameter(names="--count", description="number of mutations to write", required=true)
+    @Parameter(names = "--count", description = "number of mutations to write", required = true)
     long count;
-    
-    Opts(String table) { super(table); }
+
+    Opts(String table) {
+      super(table);
+    }
   }
+
   public static void main(String[] args) throws Exception {
     Opts opts = new Opts(table_name);
     BatchWriterOpts bwOpts = new BatchWriterOpts();
     opts.parseArgs(RandomWriter.class.getName(), args, bwOpts);
-    
+
     long start = System.currentTimeMillis();
     log.info("starting at " + start + " for user " + opts.principal);
     try {
@@ -100,9 +104,9 @@ public class RandomWriter {
       throw e;
     }
     long stop = System.currentTimeMillis();
-    
+
     log.info("stopping at " + stop);
     log.info("elapsed: " + (((double) stop - (double) start) / 1000.0));
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java b/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java
index de360fe..77ffca3 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java
@@ -112,7 +112,8 @@ public class RandomizeVolumes {
       Key key = entry.getKey();
       Mutation m = new Mutation(key.getRow());
 
-      final String newLocation = vm.choose(Optional.of(tableId), ServerConstants.getBaseUris()) + Path.SEPARATOR + ServerConstants.TABLE_DIR + Path.SEPARATOR + tableId + Path.SEPARATOR + directory;
+      final String newLocation = vm.choose(Optional.of(tableId), ServerConstants.getBaseUris()) + Path.SEPARATOR + ServerConstants.TABLE_DIR + Path.SEPARATOR
+          + tableId + Path.SEPARATOR + directory;
       m.put(key.getColumnFamily(), key.getColumnQualifier(), new Value(newLocation.getBytes(UTF_8)));
       if (log.isTraceEnabled()) {
         log.trace("Replacing " + oldLocation + " with " + newLocation);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/main/java/org/apache/accumulo/server/util/RemoveEntriesForMissingFiles.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/RemoveEntriesForMissingFiles.java b/server/base/src/main/java/org/apache/accumulo/server/util/RemoveEntriesForMissingFiles.java
index d5b586c..b5cf510 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/RemoveEntriesForMissingFiles.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/RemoveEntriesForMissingFiles.java
@@ -57,7 +57,7 @@ import com.beust.jcommander.Parameter;
 
 /**
  * Remove file entries for map files that don't exist.
- * 
+ *
  */
 public class RemoveEntriesForMissingFiles {
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/main/java/org/apache/accumulo/server/util/RestoreZookeeper.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/RestoreZookeeper.java b/server/base/src/main/java/org/apache/accumulo/server/util/RestoreZookeeper.java
index b08bf90..e5a2add 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/RestoreZookeeper.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/RestoreZookeeper.java
@@ -40,17 +40,17 @@ import org.xml.sax.helpers.DefaultHandler;
 import com.beust.jcommander.Parameter;
 
 public class RestoreZookeeper {
-  
+
   private static class Restore extends DefaultHandler {
     IZooReaderWriter zk = null;
     Stack<String> cwd = new Stack<String>();
     boolean overwrite = false;
-    
+
     Restore(IZooReaderWriter zk, boolean overwrite) {
       this.zk = zk;
       this.overwrite = overwrite;
     }
-    
+
     @Override
     public void startElement(String uri, String localName, String name, Attributes attributes) throws SAXException {
       if ("node".equals(name)) {
@@ -73,12 +73,12 @@ public class RestoreZookeeper {
         create(root, "", UTF_8.name());
       }
     }
-    
+
     @Override
     public void endElement(String uri, String localName, String name) throws SAXException {
       cwd.pop();
     }
-    
+
     // assume UTF-8 if not "base64"
     private void create(String path, String value, String encoding) {
       byte[] data = value.getBytes(UTF_8);
@@ -97,7 +97,7 @@ public class RestoreZookeeper {
       }
     }
   }
-  
+
   static class Opts extends Help {
     @Parameter(names = {"-z", "--keepers"})
     String keepers = "localhost:2181";
@@ -106,17 +106,17 @@ public class RestoreZookeeper {
     @Parameter(names = "--file")
     String file;
   }
-  
+
   public static void main(String[] args) throws Exception {
     Logger.getRootLogger().setLevel(Level.WARN);
     Opts opts = new Opts();
     opts.parseArgs(RestoreZookeeper.class.getName(), args);
-    
+
     InputStream in = System.in;
     if (opts.file != null) {
       in = new FileInputStream(opts.file);
     }
-    
+
     SAXParserFactory factory = SAXParserFactory.newInstance();
     SAXParser parser = factory.newSAXParser();
     parser.parse(in, new Restore(ZooReaderWriter.getInstance(), opts.overwrite));

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/main/java/org/apache/accumulo/server/util/SendLogToChainsaw.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/SendLogToChainsaw.java b/server/base/src/main/java/org/apache/accumulo/server/util/SendLogToChainsaw.java
index 63f1343..2c192cf 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/SendLogToChainsaw.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/SendLogToChainsaw.java
@@ -254,7 +254,7 @@ public class SendLogToChainsaw extends XMLLayout {
   }
 
   /**
-   * 
+   *
    * @param args
    *          <ol>
    *          <li>path to log directory</li>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/main/java/org/apache/accumulo/server/util/SystemPropUtil.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/SystemPropUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/SystemPropUtil.java
index a376ed6..49a6971 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/SystemPropUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/SystemPropUtil.java
@@ -38,14 +38,14 @@ public class SystemPropUtil {
       log.warn("Ignoring property {} it is null, an invalid format, or not capable of being changed in zookeeper", property);
       return false;
     }
-    
+
     // create the zk node for this property and set it's data to the specified value
     String zPath = ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZCONFIG + "/" + property;
     ZooReaderWriter.getInstance().putPersistentData(zPath, value.getBytes(UTF_8), NodeExistsPolicy.OVERWRITE);
-    
+
     return true;
   }
-  
+
   public static void removeSystemProperty(String property) throws InterruptedException, KeeperException {
     String zPath = ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZCONFIG + "/" + property;
     ZooReaderWriter.getInstance().recursiveDelete(zPath, NodeMissingPolicy.FAIL);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/main/java/org/apache/accumulo/server/util/TableInfoUtil.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/TableInfoUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/TableInfoUtil.java
index cc91ef3..6aa937f 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/TableInfoUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/TableInfoUtil.java
@@ -25,10 +25,10 @@ import org.apache.accumulo.core.master.thrift.TableInfo;
 import org.apache.accumulo.core.master.thrift.TabletServerStatus;
 
 /**
- * 
+ *
  */
 public class TableInfoUtil {
-  
+
   public static void add(TableInfo total, TableInfo more) {
     if (total.minors == null)
       total.minors = new Compacting();
@@ -58,7 +58,7 @@ public class TableInfoUtil {
     total.queryByteRate += more.queryByteRate;
     total.scanRate += more.scanRate;
   }
-  
+
   public static TableInfo summarizeTableStats(TabletServerStatus status) {
     TableInfo summary = new TableInfo();
     summary.majors = new Compacting();
@@ -69,7 +69,7 @@ public class TableInfoUtil {
     }
     return summary;
   }
-  
+
   public static Map<String,Double> summarizeTableStats(MasterMonitorInfo mmi) {
     Map<String,Double> compactingByTable = new HashMap<String,Double>();
     if (mmi != null && mmi.tServerInfo != null) {
@@ -84,5 +84,5 @@ public class TableInfoUtil {
     }
     return compactingByTable;
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/main/java/org/apache/accumulo/server/util/TablePropUtil.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/TablePropUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/TablePropUtil.java
index 5e6542d..ab14311 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/TablePropUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/TablePropUtil.java
@@ -31,31 +31,31 @@ public class TablePropUtil {
   public static boolean setTableProperty(String tableId, String property, String value) throws KeeperException, InterruptedException {
     if (!isPropertyValid(property, value))
       return false;
-    
+
     // create the zk node for per-table properties for this table if it doesn't already exist
     String zkTablePath = getTablePath(tableId);
     ZooReaderWriter.getInstance().putPersistentData(zkTablePath, new byte[0], NodeExistsPolicy.SKIP);
-    
+
     // create the zk node for this property and set it's data to the specified value
     String zPath = zkTablePath + "/" + property;
     ZooReaderWriter.getInstance().putPersistentData(zPath, value.getBytes(UTF_8), NodeExistsPolicy.OVERWRITE);
-    
+
     return true;
   }
-  
+
   public static boolean isPropertyValid(String property, String value) {
     Property p = Property.getPropertyByKey(property);
     if ((p != null && !p.getType().isValidFormat(value)) || !Property.isValidTablePropertyKey(property))
       return false;
-    
+
     return true;
   }
-  
+
   public static void removeTableProperty(String tableId, String property) throws InterruptedException, KeeperException {
     String zPath = getTablePath(tableId) + "/" + property;
     ZooReaderWriter.getInstance().recursiveDelete(zPath, NodeMissingPolicy.SKIP);
   }
-  
+
   private static String getTablePath(String tablename) {
     return ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZTABLES + "/" + tablename + Constants.ZTABLE_CONF;
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/main/java/org/apache/accumulo/server/util/TabletIterator.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/TabletIterator.java b/server/base/src/main/java/org/apache/accumulo/server/util/TabletIterator.java
index 8dd414b..3bc6c96 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/TabletIterator.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/TabletIterator.java
@@ -40,56 +40,52 @@ import org.apache.log4j.Logger;
  * This class iterates over the metadata table returning all key values for a tablet in one chunk. As it scans the metadata table it checks the correctness of
  * the metadata table, and rescans if needed. So the tablet key/values returned by this iterator should satisfy the sorted linked list property of the metadata
  * table.
- * 
+ *
  * The purpose of this is to hide inconsistencies caused by splits and detect anomalies in the metadata table.
- * 
+ *
  * If a tablet that was returned by this iterator is subsequently deleted from the metadata table, then this iterator will throw a TabletDeletedException. This
  * could occur when a table is merged.
- * 
- * 
+ *
+ *
  */
 public class TabletIterator implements Iterator<Map<Key,Value>> {
-  
+
   private static final Logger log = Logger.getLogger(TabletIterator.class);
-  
+
   private SortedMap<Key,Value> currentTabletKeys;
-  
+
   private Text lastTablet;
-  
+
   private Scanner scanner;
   private Iterator<Entry<Key,Value>> iter;
-  
+
   private boolean returnPrevEndRow;
-  
+
   private boolean returnDir;
-  
+
   private Range range;
-  
+
   public static class TabletDeletedException extends RuntimeException {
-    
-    /**
-		 * 
-		 */
-    
+
     private static final long serialVersionUID = 1L;
-    
+
     public TabletDeletedException(String msg) {
       super(msg);
     }
   }
-  
+
   /*
    * public TabletIterator(String table, boolean returnPrevEndRow){
-   * 
+   *
    * }
    */
-  
+
   /**
-   * 
+   *
    * @param s
    *          A scanner over the entire metadata table configure to fetch needed columns.
    */
-  
+
   public TabletIterator(Scanner s, Range range, boolean returnPrevEndRow, boolean returnDir) {
     this.scanner = s;
     this.range = range;
@@ -100,129 +96,129 @@ public class TabletIterator implements Iterator<Map<Key,Value>> {
     this.returnPrevEndRow = returnPrevEndRow;
     this.returnDir = returnDir;
   }
-  
+
   @Override
   public boolean hasNext() {
     while (currentTabletKeys == null) {
-      
+
       currentTabletKeys = scanToPrevEndRow();
       if (currentTabletKeys.size() == 0) {
         break;
       }
-      
+
       Key prevEndRowKey = currentTabletKeys.lastKey();
       Value prevEndRowValue = currentTabletKeys.get(prevEndRowKey);
-      
+
       if (!TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(prevEndRowKey)) {
         log.debug(currentTabletKeys);
         throw new RuntimeException("Unexpected key " + prevEndRowKey);
       }
-      
+
       Text per = KeyExtent.decodePrevEndRow(prevEndRowValue);
       Text lastEndRow;
-      
+
       if (lastTablet == null) {
         lastEndRow = null;
       } else {
         lastEndRow = new KeyExtent(lastTablet, (Text) null).getEndRow();
-        
+
         // do table transition sanity check
         String lastTable = new KeyExtent(lastTablet, (Text) null).getTableId().toString();
         String currentTable = new KeyExtent(prevEndRowKey.getRow(), (Text) null).getTableId().toString();
-        
+
         if (!lastTable.equals(currentTable) && (per != null || lastEndRow != null)) {
           log.info("Metadata inconsistency on table transition : " + lastTable + " " + currentTable + " " + per + " " + lastEndRow);
-          
+
           currentTabletKeys = null;
           resetScanner();
-          
+
           UtilWaitThread.sleep(250);
-          
+
           continue;
         }
       }
-      
+
       boolean perEqual = (per == null && lastEndRow == null) || (per != null && lastEndRow != null && per.equals(lastEndRow));
-      
+
       if (!perEqual) {
-        
+
         log.info("Metadata inconsistency : " + per + " != " + lastEndRow + " metadataKey = " + prevEndRowKey);
-        
+
         currentTabletKeys = null;
         resetScanner();
-        
+
         UtilWaitThread.sleep(250);
-        
+
         continue;
-        
+
       }
       // this tablet is good, so set it as the last tablet
       lastTablet = prevEndRowKey.getRow();
     }
-    
+
     return currentTabletKeys.size() > 0;
   }
-  
+
   @Override
   public Map<Key,Value> next() {
-    
+
     if (!hasNext())
       throw new NoSuchElementException();
-    
+
     Map<Key,Value> tmp = currentTabletKeys;
     currentTabletKeys = null;
-    
+
     Set<Entry<Key,Value>> es = tmp.entrySet();
     Iterator<Entry<Key,Value>> esIter = es.iterator();
-    
+
     while (esIter.hasNext()) {
       Map.Entry<Key,Value> entry = esIter.next();
       if (!returnPrevEndRow && TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(entry.getKey())) {
         esIter.remove();
       }
-      
+
       if (!returnDir && TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(entry.getKey())) {
         esIter.remove();
       }
     }
-    
+
     return tmp;
   }
-  
+
   @Override
   public void remove() {
     throw new UnsupportedOperationException();
   }
-  
+
   private SortedMap<Key,Value> scanToPrevEndRow() {
-    
+
     Text curMetaDataRow = null;
-    
+
     TreeMap<Key,Value> tm = new TreeMap<Key,Value>();
-    
+
     boolean sawPrevEndRow = false;
-    
+
     while (true) {
       while (iter.hasNext()) {
         Entry<Key,Value> entry = iter.next();
-        
+
         if (curMetaDataRow == null) {
           curMetaDataRow = entry.getKey().getRow();
         }
-        
+
         if (!curMetaDataRow.equals(entry.getKey().getRow())) {
           // tablet must not have a prev end row, try scanning again
           break;
         }
-        
+
         tm.put(entry.getKey(), entry.getValue());
-        
+
         if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(entry.getKey())) {
           sawPrevEndRow = true;
           break;
         }
       }
-      
+
       if (!sawPrevEndRow && tm.size() > 0) {
         log.warn("Metadata problem : tablet " + curMetaDataRow + " has no prev end row");
         resetScanner();
@@ -233,14 +229,14 @@ public class TabletIterator implements Iterator<Map<Key,Value>> {
         break;
       }
     }
-    
+
     return tm;
   }
-  
+
   protected void resetScanner() {
-    
+
     Range range;
-    
+
     if (lastTablet == null) {
       range = this.range;
     } else {
@@ -252,19 +248,19 @@ public class TabletIterator implements Iterator<Map<Key,Value>> {
       Entry<Key,Value> entry : scanner) {
         count++;
       }
-      
+
       if (count == 0)
         throw new TabletDeletedException("Tablet " + lastTablet + " was deleted while iterating");
-      
+
       // start right after the last good tablet
       range = new Range(new Key(lastTablet).followingKey(PartialKey.ROW), true, this.range.getEndKey(), this.range.isEndKeyInclusive());
     }
-    
+
     log.info("Resetting " + MetadataTable.NAME + " scanner to " + range);
-    
+
     scanner.setRange(range);
     iter = scanner.iterator();
-    
+
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/main/java/org/apache/accumulo/server/util/TabletOperations.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/TabletOperations.java b/server/base/src/main/java/org/apache/accumulo/server/util/TabletOperations.java
index c0e1a9b..b1e2d35 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/TabletOperations.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/TabletOperations.java
@@ -24,8 +24,8 @@ import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.accumulo.server.tablets.UniqueNameAllocator;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
@@ -33,15 +33,15 @@ import org.apache.log4j.Logger;
 import com.google.common.base.Optional;
 
 public class TabletOperations {
-  
+
   private static final Logger log = Logger.getLogger(TabletOperations.class);
-  
+
   public static String createTabletDirectory(VolumeManager fs, String tableId, Text endRow) {
     String lowDirectory;
-    
+
     UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
     String volume = fs.choose(Optional.of(tableId), ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR;
-    
+
     while (true) {
       try {
         if (endRow == null) {
@@ -54,7 +54,7 @@ public class TabletOperations {
           log.warn("Failed to create " + lowDirectoryPath + " for unknown reason");
         } else {
           lowDirectory = "/" + Constants.GENERATED_TABLET_DIRECTORY_PREFIX + namer.getNextName();
-          Path lowDirectoryPath = new Path(volume + "/" + tableId + "/" +  lowDirectory);
+          Path lowDirectoryPath = new Path(volume + "/" + tableId + "/" + lowDirectory);
           if (fs.exists(lowDirectoryPath))
             throw new IllegalStateException("Dir exist when it should not " + lowDirectoryPath);
           if (fs.mkdirs(lowDirectoryPath)) {
@@ -65,13 +65,13 @@ public class TabletOperations {
       } catch (IOException e) {
         log.warn(e);
       }
-      
+
       log.warn("Failed to create dir for tablet in table " + tableId + " in volume " + volume + " + will retry ...");
       UtilWaitThread.sleep(3000);
-      
+
     }
   }
-  
+
   public static String createTabletDirectory(String tableDir, Text endRow) {
     while (true) {
       try {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/main/java/org/apache/accumulo/server/util/TabletServerLocks.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/TabletServerLocks.java b/server/base/src/main/java/org/apache/accumulo/server/util/TabletServerLocks.java
index 307bb0c..01bb926 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/TabletServerLocks.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/TabletServerLocks.java
@@ -33,34 +33,35 @@ import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import com.beust.jcommander.Parameter;
 
 public class TabletServerLocks {
-  
+
   static class Opts extends Help {
-    @Parameter(names="-list")
+    @Parameter(names = "-list")
     boolean list = false;
-    @Parameter(names="-delete")
+    @Parameter(names = "-delete")
     String delete = null;
   }
+
   public static void main(String[] args) throws Exception {
-    
+
     Instance instance = HdfsZooInstance.getInstance();
     String tserverPath = ZooUtil.getRoot(instance) + Constants.ZTSERVERS;
     Opts opts = new Opts();
     opts.parseArgs(TabletServerLocks.class.getName(), args);
-    
+
     ZooCache cache = new ZooCache(instance.getZooKeepers(), instance.getZooKeepersSessionTimeOut());
-    
+
     if (opts.list) {
       IZooReaderWriter zoo = ZooReaderWriter.getInstance();
-      
+
       List<String> tabletServers = zoo.getChildren(tserverPath);
-      
+
       for (String tabletServer : tabletServers) {
         byte[] lockData = ZooLock.getLockData(cache, tserverPath + "/" + tabletServer, null);
         String holder = null;
         if (lockData != null) {
           holder = new String(lockData, UTF_8);
         }
-        
+
         System.out.printf("%32s %16s%n", tabletServer, holder);
       }
     } else if (opts.delete != null) {
@@ -68,7 +69,7 @@ public class TabletServerLocks {
     } else {
       System.out.println("Usage : " + TabletServerLocks.class.getName() + " -list|-delete <tserver lock>");
     }
-    
+
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java b/server/base/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java
index 8e6b339..98de6be 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java
@@ -61,36 +61,36 @@ import com.google.common.net.HostAndPort;
 
 public class VerifyTabletAssignments {
   private static final Logger log = Logger.getLogger(VerifyTabletAssignments.class);
-  
+
   static class Opts extends ClientOpts {
     @Parameter(names = {"-v", "--verbose"}, description = "verbose mode (prints locations of tablets)")
     boolean verbose = false;
   }
-  
+
   public static void main(String[] args) throws Exception {
     Opts opts = new Opts();
     opts.parseArgs(VerifyTabletAssignments.class.getName(), args);
-    
+
     ClientContext context = new ClientContext(opts.getInstance(), new Credentials(opts.principal, opts.getToken()), opts.getClientConfiguration());
     Connector conn = opts.getConnector();
     for (String table : conn.tableOperations().list())
       checkTable(context, opts, table, null);
-    
+
   }
-  
+
   private static void checkTable(final ClientContext context, final Opts opts, String tableName, HashSet<KeyExtent> check) throws AccumuloException,
       AccumuloSecurityException, TableNotFoundException, InterruptedException {
-    
+
     if (check == null)
       System.out.println("Checking table " + tableName);
     else
       System.out.println("Checking table " + tableName + " again, failures " + check.size());
-    
+
     TreeMap<KeyExtent,String> tabletLocations = new TreeMap<KeyExtent,String>();
-    
+
     String tableId = Tables.getNameToIdMap(context.getInstance()).get(tableName);
     MetadataServicer.forTableId(context, tableId).getTabletLocations(tabletLocations);
-    
+
     final HashSet<KeyExtent> failures = new HashSet<KeyExtent>();
 
     Map<HostAndPort,List<KeyExtent>> extentsPerServer = new TreeMap<HostAndPort,List<KeyExtent>>();
@@ -102,7 +102,7 @@ public class VerifyTabletAssignments {
         System.out.println(" Tablet " + keyExtent + " has no location");
       else if (opts.verbose)
         System.out.println(" Tablet " + keyExtent + " is located at " + loc);
-      
+
       if (loc != null) {
         final HostAndPort parsedLoc = HostAndPort.fromString(loc);
         List<KeyExtent> extentList = extentsPerServer.get(parsedLoc);
@@ -110,35 +110,35 @@ public class VerifyTabletAssignments {
           extentList = new ArrayList<KeyExtent>();
           extentsPerServer.put(parsedLoc, extentList);
         }
-        
+
         if (check == null || check.contains(keyExtent))
           extentList.add(keyExtent);
       }
     }
-    
+
     ExecutorService tp = Executors.newFixedThreadPool(20);
     for (final Entry<HostAndPort,List<KeyExtent>> entry : extentsPerServer.entrySet()) {
       Runnable r = new Runnable() {
-        
+
         @Override
         public void run() {
           try {
             checkTabletServer(context, entry, failures);
           } catch (Exception e) {
-            log.error("Failure on tablet server '"+entry.getKey()+".", e);
+            log.error("Failure on tablet server '" + entry.getKey() + ".", e);
             failures.addAll(entry.getValue());
           }
         }
-        
+
       };
-      
+
       tp.execute(r);
     }
-    
+
     tp.shutdown();
-    
+
     while (!tp.awaitTermination(1, TimeUnit.HOURS)) {}
-    
+
     if (failures.size() > 0)
       checkTable(context, opts, tableName, failures);
   }
@@ -154,32 +154,32 @@ public class VerifyTabletAssignments {
   private static void checkTabletServer(ClientContext context, Entry<HostAndPort,List<KeyExtent>> entry, HashSet<KeyExtent> failures)
       throws ThriftSecurityException, TException, NoSuchScanIDException {
     TabletClientService.Iface client = ThriftUtil.getTServerClient(entry.getKey(), context);
-    
+
     Map<TKeyExtent,List<TRange>> batch = new TreeMap<TKeyExtent,List<TRange>>();
-    
+
     for (KeyExtent keyExtent : entry.getValue()) {
       Text row = keyExtent.getEndRow();
       Text row2 = null;
-      
+
       if (row == null) {
         row = keyExtent.getPrevEndRow();
-        
+
         if (row != null) {
           row = new Text(row);
           row.append(new byte[] {'a'}, 0, 1);
         } else {
           row = new Text("1234567890");
         }
-        
+
         row2 = new Text(row);
         row2.append(new byte[] {'!'}, 0, 1);
       } else {
         row = new Text(row);
         row2 = new Text(row);
-        
+
         row.getBytes()[row.getLength() - 1] = (byte) (row.getBytes()[row.getLength() - 1] - 1);
       }
-      
+
       Range r = new Range(row, true, row2, false);
       batch.put(keyExtent.toThrift(), Collections.singletonList(r.toThrift()));
     }
@@ -192,15 +192,15 @@ public class VerifyTabletAssignments {
     if (is.result.more) {
       MultiScanResult result = client.continueMultiScan(tinfo, is.scanID);
       checkFailures(entry.getKey(), failures, result);
-      
+
       while (result.more) {
         result = client.continueMultiScan(tinfo, is.scanID);
         checkFailures(entry.getKey(), failures, result);
       }
     }
-    
+
     client.closeMultiScan(tinfo, is.scanID);
-    
+
     ThriftUtil.returnClient((TServiceClient) client);
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java b/server/base/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java
index efcefde..0edcf71 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java
@@ -27,16 +27,16 @@ import org.apache.hadoop.fs.Path;
 import com.beust.jcommander.Parameter;
 
 public class ZooKeeperMain {
-  
+
   static class Opts extends Help {
-    
+
     @Parameter(names = {"-z", "--keepers"}, description = "Comma separated list of zookeeper hosts (host:port,host:port)")
     String servers = null;
-    
+
     @Parameter(names = {"-t", "--timeout"}, description = "timeout, in seconds to timeout the zookeeper connection")
     long timeout = 30;
   }
-  
+
   public static void main(String[] args) throws Exception {
     Opts opts = new Opts();
     opts.parseArgs(ZooKeeperMain.class.getName(), args);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/main/java/org/apache/accumulo/server/util/ZooZap.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/ZooZap.java b/server/base/src/main/java/org/apache/accumulo/server/util/ZooZap.java
index 7fdbf13..ef182f1 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/ZooZap.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/ZooZap.java
@@ -20,67 +20,66 @@ import java.util.List;
 
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
+import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.server.zookeeper.ZooLock;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
+import org.apache.log4j.Logger;
 
 import com.beust.jcommander.JCommander;
 import com.beust.jcommander.Parameter;
-import org.apache.log4j.Logger;
 
 public class ZooZap {
   private static final Logger log = Logger.getLogger(ZooZap.class);
-  
+
   static boolean verbose = false;
-  
+
   private static void message(String msg) {
     if (verbose)
       System.out.println(msg);
   }
-  
+
   static class Opts extends ClientOpts {
-    @Parameter(names="-master", description="remove master locks")
+    @Parameter(names = "-master", description = "remove master locks")
     boolean zapMaster = false;
-    @Parameter(names="-tservers", description="remove tablet server locks")
+    @Parameter(names = "-tservers", description = "remove tablet server locks")
     boolean zapTservers = false;
-    @Parameter(names="-tracers", description="remove tracer locks")
+    @Parameter(names = "-tracers", description = "remove tracer locks")
     boolean zapTracers = false;
-    @Parameter(names="-verbose", description="print out messages about progress")
+    @Parameter(names = "-verbose", description = "print out messages about progress")
     boolean verbose = false;
 
     String getTraceZKPath() {
       return super.getClientConfiguration().get(ClientProperty.TRACE_ZK_PATH);
     }
   }
-  
+
   public static void main(String[] args) {
     Opts opts = new Opts();
     opts.parseArgs(ZooZap.class.getName(), args);
-    
-    if (!opts.zapMaster && !opts.zapTservers && !opts.zapTracers)
-    {
-        new JCommander(opts).usage();
-        return;
+
+    if (!opts.zapMaster && !opts.zapTservers && !opts.zapTracers) {
+      new JCommander(opts).usage();
+      return;
     }
-    
+
     String iid = opts.getInstance().getInstanceID();
     IZooReaderWriter zoo = ZooReaderWriter.getInstance();
-    
+
     if (opts.zapMaster) {
       String masterLockPath = Constants.ZROOT + "/" + iid + Constants.ZMASTER_LOCK;
-      
+
       zapDirectory(zoo, masterLockPath);
     }
-    
+
     if (opts.zapTservers) {
       String tserversPath = Constants.ZROOT + "/" + iid + Constants.ZTSERVERS;
       try {
         List<String> children = zoo.getChildren(tserversPath);
         for (String child : children) {
           message("Deleting " + tserversPath + "/" + child + " from zookeeper");
-          
+
           if (opts.zapMaster)
             ZooReaderWriter.getInstance().recursiveDelete(tserversPath + "/" + child, NodeMissingPolicy.SKIP);
           else {
@@ -96,14 +95,14 @@ public class ZooZap {
         log.error(e);
       }
     }
-    
+
     if (opts.zapTracers) {
       String path = opts.getTraceZKPath();
       zapDirectory(zoo, path);
     }
-    
+
   }
-  
+
   private static void zapDirectory(IZooReaderWriter zoo, String path) {
     try {
       List<String> children = zoo.getChildren(path);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/main/java/org/apache/accumulo/server/util/time/BaseRelativeTime.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/time/BaseRelativeTime.java b/server/base/src/main/java/org/apache/accumulo/server/util/time/BaseRelativeTime.java
index 393c6d2..73afc7e 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/time/BaseRelativeTime.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/time/BaseRelativeTime.java
@@ -18,25 +18,25 @@ package org.apache.accumulo.server.util.time;
 
 /**
  * Provide time from a local source and a hint from a time source.
- * 
+ *
  * RelativeTime and BaseRelativeTime are separated to provide unit tests of the core functionality of Relative timekeeping.
- * 
+ *
  */
 public class BaseRelativeTime implements ProvidesTime {
-  
+
   private long diff = 0;
   private long lastReportedTime = 0;
   ProvidesTime local;
-  
+
   BaseRelativeTime(ProvidesTime real, long lastReportedTime) {
     this.local = real;
     this.lastReportedTime = lastReportedTime;
   }
-  
+
   BaseRelativeTime(ProvidesTime real) {
     this(real, 0);
   }
-  
+
   @Override
   synchronized public long currentTime() {
     long localNow = local.currentTime();
@@ -46,12 +46,12 @@ public class BaseRelativeTime implements ProvidesTime {
     lastReportedTime = result;
     return result;
   }
-  
+
   synchronized public void updateTime(long advice) {
     long localNow = local.currentTime();
     long diff = advice - localNow;
     // smooth in 20% of the change, not the whole thing.
     this.diff = (this.diff * 4 / 5) + diff / 5;
   }
-  
+
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/main/java/org/apache/accumulo/server/util/time/ProvidesTime.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/time/ProvidesTime.java b/server/base/src/main/java/org/apache/accumulo/server/util/time/ProvidesTime.java
index 117fa5f..1042c32 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/time/ProvidesTime.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/time/ProvidesTime.java
@@ -18,7 +18,7 @@ package org.apache.accumulo.server.util.time;
 
 /**
  * An interface for anything that returns the time in the same format as System.currentTimeMillis().
- * 
+ *
  */
 public interface ProvidesTime {
   long currentTime();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/main/java/org/apache/accumulo/server/util/time/RelativeTime.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/time/RelativeTime.java b/server/base/src/main/java/org/apache/accumulo/server/util/time/RelativeTime.java
index 99581e9..bc48b10 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/time/RelativeTime.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/time/RelativeTime.java
@@ -18,27 +18,27 @@ package org.apache.accumulo.server.util.time;
 
 /**
  * Provide time from System time and hints from another time source.
- * 
+ *
  * Provides a convenient static replacement for System.currentTimeMillis()
  */
 public class RelativeTime extends BaseRelativeTime {
-  
+
   private RelativeTime() {
     super(new SystemTime());
   }
-  
+
   private static BaseRelativeTime instance = new RelativeTime();
-  
+
   public static BaseRelativeTime getInstance() {
     return instance;
   }
-  
+
   public static void setInstance(BaseRelativeTime newInstance) {
     instance = newInstance;
   }
-  
+
   public static long currentTimeMillis() {
     return getInstance().currentTime();
   }
-  
+
 }