You are viewing a plain text version of this content. The canonical link for it is here.
Posted to notifications@accumulo.apache.org by GitBox <gi...@apache.org> on 2018/05/08 21:18:18 UTC

[GitHub] mikewalch closed pull request #478: Removed unnecessary calls to toString()

mikewalch closed pull request #478: Removed unnecessary calls to toString()
URL: https://github.com/apache/accumulo/pull/478
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java
index 36792a7c5a..49bab9a037 100644
--- a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java
+++ b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AbstractInputFormat.java
@@ -461,7 +461,7 @@ private void setupIterators(JobConf job, ScannerBase scanner, String tableName,
      */
     public void initialize(InputSplit inSplit, JobConf job) throws IOException {
       baseSplit = (org.apache.accumulo.core.client.mapreduce.RangeInputSplit) inSplit;
-      log.debug("Initializing input split: " + baseSplit.toString());
+      log.debug("Initializing input split: " + baseSplit);
 
       Instance instance = baseSplit.getInstance(getClientConfiguration(job));
       if (null == instance) {
@@ -559,8 +559,7 @@ public void initialize(InputSplit inSplit, JobConf job) throws IOException {
         scanner.setRange(baseSplit.getRange());
         scannerBase = scanner;
       } else {
-        throw new IllegalArgumentException(
-            "Can not initialize from " + baseSplit.getClass().toString());
+        throw new IllegalArgumentException("Can not initialize from " + baseSplit.getClass());
       }
 
       Collection<Pair<Text,Text>> columns = baseSplit.getFetchedColumns();
diff --git a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormat.java b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormat.java
index 32a2b61925..3bb86cab96 100644
--- a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormat.java
+++ b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloInputFormat.java
@@ -64,7 +64,7 @@
         log.setLevel(level);
       }
     } else {
-      throw new IllegalArgumentException("No RecordReader for " + split.getClass().toString());
+      throw new IllegalArgumentException("No RecordReader for " + split.getClass());
     }
 
     RecordReaderBase<Key,Value> recordReader = new RecordReaderBase<Key,Value>() {
diff --git a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloOutputFormat.java b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloOutputFormat.java
index c273892d3b..9fdacc9bfe 100644
--- a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloOutputFormat.java
+++ b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapred/AccumuloOutputFormat.java
@@ -444,7 +444,7 @@ public void write(Text table, Mutation mutation) throws IOException {
         try {
           addTable(table);
         } catch (final Exception e) {
-          log.error("Could not add table '" + table.toString() + "'", e);
+          log.error("Could not add table '" + table + "'", e);
           throw new IOException(e);
         }
 
diff --git a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java
index b7adbbb2e4..6b9679a7c9 100644
--- a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java
+++ b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/AbstractInputFormat.java
@@ -468,7 +468,7 @@ private void setupIterators(TaskAttemptContext context, ScannerBase scanner, Str
     public void initialize(InputSplit inSplit, TaskAttemptContext attempt) throws IOException {
 
       split = (RangeInputSplit) inSplit;
-      log.debug("Initializing input split: " + split.toString());
+      log.debug("Initializing input split: " + split);
 
       Instance instance = split.getInstance(getClientConfiguration(attempt));
       if (null == instance) {
diff --git a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormat.java b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormat.java
index 1dac80a2d6..efc6221a23 100644
--- a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormat.java
+++ b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/AccumuloInputFormat.java
@@ -64,7 +64,7 @@
         log.setLevel(level);
       }
     } else {
-      throw new IllegalArgumentException("No RecordReader for " + split.getClass().toString());
+      throw new IllegalArgumentException("No RecordReader for " + split.getClass());
     }
 
     return new RecordReaderBase<Key,Value>() {
diff --git a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java
index 9223b5ab3c..441189e4c7 100644
--- a/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java
+++ b/client/mapreduce/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/impl/ConfiguratorBase.java
@@ -155,8 +155,8 @@ public static void setConnectorInfo(Class<?> implementingClass, Configuration co
       // Avoid serializing the DelegationToken secret in the configuration -- the Job will do that
       // work for us securely
       DelegationTokenImpl delToken = (DelegationTokenImpl) token;
-      conf.set(enumToConfKey(implementingClass, ConnectorInfo.TOKEN), TokenSource.JOB.prefix()
-          + token.getClass().getName() + ":" + delToken.getServiceName().toString());
+      conf.set(enumToConfKey(implementingClass, ConnectorInfo.TOKEN),
+          TokenSource.JOB.prefix() + token.getClass().getName() + ":" + delToken.getServiceName());
     } else {
       conf.set(enumToConfKey(implementingClass, ConnectorInfo.TOKEN),
           TokenSource.INLINE.prefix() + token.getClass().getName() + ":"
diff --git a/core/src/main/java/org/apache/accumulo/core/client/TimedOutException.java b/core/src/main/java/org/apache/accumulo/core/client/TimedOutException.java
index ff8fab385e..3ce2d00f06 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/TimedOutException.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/TimedOutException.java
@@ -34,8 +34,7 @@ private static String shorten(Set<String> set) {
       return set.toString();
     }
 
-    return new ArrayList<>(set).subList(0, 10).toString() + " ... " + (set.size() - 10)
-        + " servers not shown";
+    return new ArrayList<>(set).subList(0, 10) + " ... " + (set.size() - 10) + " servers not shown";
   }
 
   public TimedOutException(Set<String> timedoutServers) {
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/Bulk.java b/core/src/main/java/org/apache/accumulo/core/client/impl/Bulk.java
index 935834f1f7..4f09689246 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/Bulk.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/Bulk.java
@@ -90,7 +90,7 @@ public Text getPrevEndRow() {
 
     @Override
     public String toString() {
-      return getEndRow().toString() + ";" + getPrevEndRow().toString();
+      return getEndRow() + ";" + getPrevEndRow();
     }
   }
 
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/NamespaceOperationsHelper.java b/core/src/main/java/org/apache/accumulo/core/client/impl/NamespaceOperationsHelper.java
index cf74cd91d1..1dd4b1da2d 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/NamespaceOperationsHelper.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/NamespaceOperationsHelper.java
@@ -187,7 +187,7 @@ public int addConstraint(String namespace, String constraintClassName)
           i = Integer.parseInt(
               property.getKey().substring(Property.TABLE_CONSTRAINT_PREFIX.toString().length()));
         } catch (NumberFormatException e) {
-          throw new AccumuloException("Bad key for existing constraint: " + property.toString());
+          throw new AccumuloException("Bad key for existing constraint: " + property);
         }
         constraintNumbers.add(i);
         constraintClasses.put(property.getValue(), i);
@@ -225,7 +225,7 @@ public void removeConstraint(String namespace, int number)
           constraints.put(property.getValue(), Integer.parseInt(
               property.getKey().substring(Property.TABLE_CONSTRAINT_PREFIX.toString().length())));
         } catch (NumberFormatException e) {
-          throw new AccumuloException("Bad key for existing constraint: " + property.toString());
+          throw new AccumuloException("Bad key for existing constraint: " + property);
         }
       }
     }
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/TableOperationsHelper.java b/core/src/main/java/org/apache/accumulo/core/client/impl/TableOperationsHelper.java
index 755cb095b2..c84139b83e 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/TableOperationsHelper.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/TableOperationsHelper.java
@@ -186,7 +186,7 @@ public int addConstraint(String tableName, String constraintClassName)
           i = Integer.parseInt(
               property.getKey().substring(Property.TABLE_CONSTRAINT_PREFIX.toString().length()));
         } catch (NumberFormatException e) {
-          throw new AccumuloException("Bad key for existing constraint: " + property.toString());
+          throw new AccumuloException("Bad key for existing constraint: " + property);
         }
         constraintNumbers.add(i);
         constraintClasses.put(property.getValue(), i);
@@ -223,7 +223,7 @@ public void removeConstraint(String tableName, int number)
           constraints.put(property.getValue(), Integer.parseInt(
               property.getKey().substring(Property.TABLE_CONSTRAINT_PREFIX.toString().length())));
         } catch (NumberFormatException e) {
-          throw new AccumuloException("Bad key for existing constraint: " + property.toString());
+          throw new AccumuloException("Bad key for existing constraint: " + property);
         }
       }
     }
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletLocator.java b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletLocator.java
index b9d2730f69..e4567523c2 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletLocator.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletLocator.java
@@ -197,7 +197,7 @@ public boolean equals(Object o) {
     @Override
     public int hashCode() {
       throw new UnsupportedOperationException(
-          "hashcode is not implemented for class " + this.getClass().toString());
+          "hashcode is not implemented for class " + this.getClass());
     }
 
     @Override
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ThriftTransportKey.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ThriftTransportKey.java
index 82aceee7b0..49b30851ef 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/ThriftTransportKey.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ThriftTransportKey.java
@@ -105,7 +105,7 @@ public String toString() {
     if (isSsl()) {
       prefix = "ssl:";
     } else if (isSasl()) {
-      prefix = saslParams.toString() + ":";
+      prefix = saslParams + ":";
     }
     return prefix + server + " (" + Long.toString(timeout) + ")";
   }
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/MultiLevelIndex.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/MultiLevelIndex.java
index 2943e85e03..a403add486 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/MultiLevelIndex.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/MultiLevelIndex.java
@@ -897,7 +897,7 @@ private void printIndex(IndexBlock ib, String prefix, PrintStream out) throws IO
         sb.append(" RawSize : ");
         sb.append(ie.rawSize);
 
-        out.println(sb.toString());
+        out.println(sb);
 
         if (ib.getLevel() > 0) {
           IndexBlock cib = getIndexBlock(ie);
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java
index 7e424c8175..53a43c7e19 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java
@@ -160,8 +160,8 @@ public void execute(final String[] args) throws Exception {
             "Attempting to find file across filesystems. Consider providing URI instead of path");
         fs = hadoopFs.exists(path) ? hadoopFs : localFs; // fall back to local
       }
-      System.out.println(
-          "Reading file: " + path.makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString());
+      System.out
+          .println("Reading file: " + path.makeQualified(fs.getUri(), fs.getWorkingDirectory()));
 
       CachableBlockFile.Reader _rdr = new CachableBlockFile.Reader(fs, path, conf, null, null,
           SiteConfiguration.getInstance());
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java b/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java
index ada3936ccc..1a37b7c1a3 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/IteratorUtil.java
@@ -119,8 +119,7 @@ static Property getProperty(IteratorScope scope) {
       }
     }
 
-    props.put(Property.TABLE_CONSTRAINT_PREFIX.toString() + "1",
-        DefaultKeySizeConstraint.class.getName());
+    props.put(Property.TABLE_CONSTRAINT_PREFIX + "1", DefaultKeySizeConstraint.class.getName());
 
     return props;
   }
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/IndexedDocIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/IndexedDocIterator.java
index b4a708a414..9257182ecf 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/IndexedDocIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/IndexedDocIterator.java
@@ -98,22 +98,22 @@ public static Text parseDocID(Key key) {
     Text colq = key.getColumnQualifier();
     int firstZeroIndex = colq.find("\0");
     if (firstZeroIndex < 0) {
-      throw new IllegalArgumentException("bad docid: " + key.toString());
+      throw new IllegalArgumentException("bad docid: " + key);
     }
     int secondZeroIndex = colq.find("\0", firstZeroIndex + 1);
     if (secondZeroIndex < 0) {
-      throw new IllegalArgumentException("bad docid: " + key.toString());
+      throw new IllegalArgumentException("bad docid: " + key);
     }
     int thirdZeroIndex = colq.find("\0", secondZeroIndex + 1);
     if (thirdZeroIndex < 0) {
-      throw new IllegalArgumentException("bad docid: " + key.toString());
+      throw new IllegalArgumentException("bad docid: " + key);
     }
     Text docID = new Text();
     try {
       docID.set(colq.getBytes(), firstZeroIndex + 1, thirdZeroIndex - 1 - firstZeroIndex);
     } catch (ArrayIndexOutOfBoundsException e) {
-      throw new IllegalArgumentException("bad indices for docid: " + key.toString() + " "
-          + firstZeroIndex + " " + secondZeroIndex + " " + thirdZeroIndex);
+      throw new IllegalArgumentException("bad indices for docid: " + key + " " + firstZeroIndex
+          + " " + secondZeroIndex + " " + thirdZeroIndex);
     }
     return docID;
   }
@@ -167,14 +167,14 @@ protected void advanceToIntersection() throws IOException {
     if (topKey == null)
       return;
     if (log.isTraceEnabled())
-      log.trace("using top key to seek for doc: {}", topKey.toString());
+      log.trace("using top key to seek for doc: {}", topKey);
     Key docKey = buildDocKey();
     docSource.seek(new Range(docKey, true, null, false), docColfSet, true);
-    log.debug("got doc key: {}", docSource.getTopKey().toString());
+    log.debug("got doc key: {}", docSource.getTopKey());
     if (docSource.hasTop() && docKey.equals(docSource.getTopKey(), PartialKey.ROW_COLFAM_COLQUAL)) {
       value = docSource.getTopValue();
     }
-    log.debug("got doc value: {}", value.toString());
+    log.debug("got doc value: {}", value);
   }
 
   protected Key buildDocKey() {
@@ -193,7 +193,7 @@ protected Key buildDocKey() {
     colq.set(currentDocID.getBytes(), zeroIndex + 1, currentDocID.getLength() - zeroIndex - 1);
     Key k = new Key(currentPartition, colf, colq);
     if (log.isTraceEnabled())
-      log.trace("built doc key for seek: {}", k.toString());
+      log.trace("built doc key for seek: {}", k);
     return k;
   }
 
diff --git a/core/src/main/java/org/apache/accumulo/core/iterators/user/RowEncodingIterator.java b/core/src/main/java/org/apache/accumulo/core/iterators/user/RowEncodingIterator.java
index 0abf815496..79f4b6a45b 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/user/RowEncodingIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/user/RowEncodingIterator.java
@@ -119,8 +119,8 @@ private void prepKeys() throws IOException {
         values.add(new Value(sourceTopValue));
         kvBufSize += sourceTopKey.getSize() + sourceTopValue.getSize() + 128;
         if (kvBufSize > maxBufferSize) {
-          throw new BufferOverflowException("Exceeded buffer size of " + maxBufferSize
-              + " for row: " + sourceTopKey.getRow().toString());
+          throw new BufferOverflowException(
+              "Exceeded buffer size of " + maxBufferSize + " for row: " + sourceTopKey.getRow());
         }
         sourceIter.next();
       }
diff --git a/core/src/main/java/org/apache/accumulo/core/tabletserver/log/LogEntry.java b/core/src/main/java/org/apache/accumulo/core/tabletserver/log/LogEntry.java
index ab70bb0db0..3e4f0cc022 100644
--- a/core/src/main/java/org/apache/accumulo/core/tabletserver/log/LogEntry.java
+++ b/core/src/main/java/org/apache/accumulo/core/tabletserver/log/LogEntry.java
@@ -51,7 +51,7 @@ public LogEntry(KeyExtent extent, long timestamp, String server, String filename
 
   @Override
   public String toString() {
-    return extent.toString() + " " + filename;
+    return extent + " " + filename;
   }
 
   public String getName() {
diff --git a/core/src/main/java/org/apache/accumulo/core/util/format/HexFormatter.java b/core/src/main/java/org/apache/accumulo/core/util/format/HexFormatter.java
index 4f11ce5538..8220193022 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/format/HexFormatter.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/format/HexFormatter.java
@@ -87,7 +87,7 @@ public String next() {
     sb.append("  ");
     toHex(sb, entry.getKey().getColumnQualifierData().toArray());
     sb.append(" [");
-    sb.append(entry.getKey().getColumnVisibilityData().toString());
+    sb.append(entry.getKey().getColumnVisibilityData());
     sb.append("] ");
     if (config.willPrintTimestamps()) {
       sb.append(Long.toString(entry.getKey().getTimestamp()));
diff --git a/core/src/main/java/org/apache/accumulo/core/volume/NonConfiguredVolume.java b/core/src/main/java/org/apache/accumulo/core/volume/NonConfiguredVolume.java
index 2d0b4a6dec..3f319c4dd3 100644
--- a/core/src/main/java/org/apache/accumulo/core/volume/NonConfiguredVolume.java
+++ b/core/src/main/java/org/apache/accumulo/core/volume/NonConfiguredVolume.java
@@ -75,7 +75,7 @@ public boolean equals(Object o) {
 
   @Override
   public String toString() {
-    return "NonConfiguredVolume: " + this.fs.toString();
+    return "NonConfiguredVolume: " + this.fs;
   }
 
   @Override
diff --git a/core/src/main/java/org/apache/accumulo/core/volume/VolumeConfiguration.java b/core/src/main/java/org/apache/accumulo/core/volume/VolumeConfiguration.java
index 2af020f4f8..8558438e8f 100644
--- a/core/src/main/java/org/apache/accumulo/core/volume/VolumeConfiguration.java
+++ b/core/src/main/java/org/apache/accumulo/core/volume/VolumeConfiguration.java
@@ -73,7 +73,7 @@ public static String getConfiguredBaseDir(AccumuloConfiguration conf,
 
     if (dfsUri == null || dfsUri.isEmpty()) {
       try {
-        baseDir = FileSystem.get(hadoopConfig).getUri().toString() + singleNamespace;
+        baseDir = FileSystem.get(hadoopConfig).getUri() + singleNamespace;
       } catch (IOException e) {
         throw new RuntimeException(e);
       }
diff --git a/core/src/main/java/org/apache/accumulo/fate/ZooStore.java b/core/src/main/java/org/apache/accumulo/fate/ZooStore.java
index cd560e5dac..f662d3dd41 100644
--- a/core/src/main/java/org/apache/accumulo/fate/ZooStore.java
+++ b/core/src/main/java/org/apache/accumulo/fate/ZooStore.java
@@ -281,7 +281,7 @@ private void verifyReserved(long tid) {
         byte[] ser = zk.getData(txpath + "/" + top, null);
         return (Repo<T>) deserialize(ser);
       } catch (KeeperException.NoNodeException ex) {
-        log.debug("zookeeper error reading " + txpath + ": " + ex.toString(), ex);
+        log.debug("zookeeper error reading " + txpath + ": " + ex, ex);
         sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
         continue;
       } catch (Exception e) {
diff --git a/core/src/test/java/org/apache/accumulo/core/client/summary/CountingSummarizerTest.java b/core/src/test/java/org/apache/accumulo/core/client/summary/CountingSummarizerTest.java
index df0b3cb13c..d7aff9c048 100644
--- a/core/src/test/java/org/apache/accumulo/core/client/summary/CountingSummarizerTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/client/summary/CountingSummarizerTest.java
@@ -48,9 +48,9 @@
     @Override
     protected Converter<String> converter() {
       return (k, v, c) -> {
-        c.accept("rp:" + k.getRowData().subSequence(0, 2).toString());
-        c.accept("fp:" + k.getColumnFamilyData().subSequence(0, 2).toString());
-        c.accept("qp:" + k.getColumnQualifierData().subSequence(0, 2).toString());
+        c.accept("rp:" + k.getRowData().subSequence(0, 2));
+        c.accept("fp:" + k.getColumnFamilyData().subSequence(0, 2));
+        c.accept("qp:" + k.getColumnQualifierData().subSequence(0, 2));
       };
     }
   }
diff --git a/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiThreadedRFileTest.java b/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiThreadedRFileTest.java
index 157e3b69af..ee7b02b7d5 100644
--- a/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiThreadedRFileTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiThreadedRFileTest.java
@@ -120,7 +120,7 @@ public TestRFile(AccumuloConfiguration accumuloConfiguration) {
     public void close() throws IOException {
       if (rfile != null) {
         FileSystem fs = FileSystem.newInstance(conf);
-        Path path = new Path("file://" + rfile.toString());
+        Path path = new Path("file://" + rfile);
         fs.delete(path, false);
       }
     }
@@ -145,7 +145,7 @@ public void openWriter(boolean startDLG) throws IOException {
         rfile = File.createTempFile("TestRFile", ".rf");
       }
       FileSystem fs = FileSystem.newInstance(conf);
-      Path path = new Path("file://" + rfile.toString());
+      Path path = new Path("file://" + rfile);
       dos = fs.create(path, true);
       CachableBlockFile.Writer _cbw = new CachableBlockFile.Writer(PositionedOutputs.wrap(dos),
           "gz", conf, accumuloConfiguration);
@@ -177,7 +177,7 @@ public void closeWriter() throws IOException {
 
     public void openReader() throws IOException {
       FileSystem fs = FileSystem.newInstance(conf);
-      Path path = new Path("file://" + rfile.toString());
+      Path path = new Path("file://" + rfile);
 
       // the caches used to obfuscate the multithreaded issues
       CachableBlockFile.Reader _cbr = new CachableBlockFile.Reader(fs, path, conf, null, null,
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterImpl.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterImpl.java
index 32890e0def..665504ec72 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterImpl.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/impl/MiniAccumuloClusterImpl.java
@@ -329,7 +329,7 @@ private Process _exec(Class<?> clazz, List<String> extraJvmOpts, String... args)
 
     log.info("Starting MiniAccumuloCluster process with class: " + clazz.getSimpleName()
         + "\n, jvmOpts: " + extraJvmOpts + "\n, classpath: " + classpath + "\n, args: " + argList
-        + "\n, environment: " + builder.environment().toString());
+        + "\n, environment: " + builder.environment());
     Process process = builder.start();
 
     LogWriter lw;
diff --git a/server/base/src/main/java/org/apache/accumulo/server/client/deprecated/BulkImporter.java b/server/base/src/main/java/org/apache/accumulo/server/client/deprecated/BulkImporter.java
index 37cb82fd23..4d3358a5a7 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/client/deprecated/BulkImporter.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/client/deprecated/BulkImporter.java
@@ -154,7 +154,7 @@ public void run() {
             try {
               tabletsToAssignMapFileTo = findOverlappingTablets(context, fs, locator, mapFile);
             } catch (Exception ex) {
-              log.warn("Unable to find tablets that overlap file " + mapFile.toString(), ex);
+              log.warn("Unable to find tablets that overlap file " + mapFile, ex);
             }
             log.debug("Map file {} found to overlap {} tablets", mapFile,
                 tabletsToAssignMapFileTo.size());
diff --git a/server/base/src/main/java/org/apache/accumulo/server/log/WalStateManager.java b/server/base/src/main/java/org/apache/accumulo/server/log/WalStateManager.java
index 22e9ee1492..15eb5a29af 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/log/WalStateManager.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/log/WalStateManager.java
@@ -120,7 +120,7 @@ public void initWalMarker(TServerInstance tsi) throws WalMarkerException {
     byte[] data = new byte[0];
 
     try {
-      zoo.putPersistentData(root() + "/" + tsi.toString(), data, NodeExistsPolicy.FAIL);
+      zoo.putPersistentData(root() + "/" + tsi, data, NodeExistsPolicy.FAIL);
     } catch (KeeperException | InterruptedException e) {
       throw new WalMarkerException(e);
     }
@@ -133,14 +133,14 @@ public void addNewWalMarker(TServerInstance tsi, Path path) throws WalMarkerExce
 
   private void updateState(TServerInstance tsi, Path path, WalState state)
       throws WalMarkerException {
-    byte[] data = (state.toString() + "," + path.toString()).getBytes(UTF_8);
+    byte[] data = (state + "," + path).getBytes(UTF_8);
     try {
       NodeExistsPolicy policy = NodeExistsPolicy.OVERWRITE;
       if (state == WalState.OPEN) {
         policy = NodeExistsPolicy.FAIL;
       }
       log.debug("Setting {} to {}", path.getName(), state);
-      zoo.putPersistentData(root() + "/" + tsi.toString() + "/" + path.getName(), data, policy);
+      zoo.putPersistentData(root() + "/" + tsi + "/" + path.getName(), data, policy);
     } catch (KeeperException | InterruptedException e) {
       throw new WalMarkerException(e);
     }
@@ -160,7 +160,7 @@ public void walUnreferenced(TServerInstance tsi, Path path) throws WalMarkerExce
   public List<Path> getWalsInUse(TServerInstance tsi) throws WalMarkerException {
     List<Path> result = new ArrayList<>();
     try {
-      String zpath = root() + "/" + tsi.toString();
+      String zpath = root() + "/" + tsi;
       zoo.sync(zpath);
       for (String child : zoo.getChildren(zpath)) {
         Pair<WalState,Path> parts = parse(zoo.getData(zpath + "/" + child, null));
@@ -200,7 +200,7 @@ public void walUnreferenced(TServerInstance tsi, Path path) throws WalMarkerExce
   // garbage collector wants to know the state (open/closed) of a log, and the filename to delete
   public Pair<WalState,Path> state(TServerInstance instance, UUID uuid) throws WalMarkerException {
     try {
-      String path = root() + "/" + instance.toString() + "/" + uuid.toString();
+      String path = root() + "/" + instance + "/" + uuid;
       return parse(zoo.getData(path, null));
     } catch (KeeperException | InterruptedException e) {
       throw new WalMarkerException(e);
@@ -223,7 +223,7 @@ public void walUnreferenced(TServerInstance tsi, Path path) throws WalMarkerExce
   public void removeWalMarker(TServerInstance instance, UUID uuid) throws WalMarkerException {
     try {
       log.debug("Removing {}", uuid);
-      String path = root() + "/" + instance.toString() + "/" + uuid.toString();
+      String path = root() + "/" + instance + "/" + uuid;
       zoo.delete(path, -1);
     } catch (InterruptedException | KeeperException e) {
       throw new WalMarkerException(e);
@@ -232,7 +232,7 @@ public void removeWalMarker(TServerInstance instance, UUID uuid) throws WalMarke
 
   // garbage collector knows the instance is dead, and has no markers
   public void forget(TServerInstance instance) throws WalMarkerException {
-    String path = root() + "/" + instance.toString();
+    String path = root() + "/" + instance;
     try {
       zoo.recursiveDelete(path, NodeMissingPolicy.FAIL);
     } catch (InterruptedException | KeeperException e) {
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/recovery/HadoopLogCloser.java b/server/base/src/main/java/org/apache/accumulo/server/master/recovery/HadoopLogCloser.java
index 2f0ba55dcc..c208e08b04 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/recovery/HadoopLogCloser.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/recovery/HadoopLogCloser.java
@@ -53,16 +53,16 @@ public long close(AccumuloConfiguration conf, VolumeManager fs, Path source) thr
       DistributedFileSystem dfs = (DistributedFileSystem) ns;
       try {
         if (!dfs.recoverLease(source)) {
-          log.info("Waiting for file to be closed {}", source.toString());
+          log.info("Waiting for file to be closed {}", source);
           return conf.getTimeInMillis(Property.MASTER_LEASE_RECOVERY_WAITING_PERIOD);
         }
-        log.info("Recovered lease on {}", source.toString());
+        log.info("Recovered lease on {}", source);
       } catch (FileNotFoundException ex) {
         throw ex;
       } catch (Exception ex) {
-        log.warn("Error recovering lease on " + source.toString(), ex);
+        log.warn("Error recovering lease on " + source, ex);
         ns.append(source).close();
-        log.info("Recovered lease on {} using append", source.toString());
+        log.info("Recovered lease on {} using append", source);
       }
     } else if (ns instanceof LocalFileSystem || ns instanceof RawLocalFileSystem) {
       // ignore
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/recovery/MapRLogCloser.java b/server/base/src/main/java/org/apache/accumulo/server/master/recovery/MapRLogCloser.java
index d291597aee..6701b14e17 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/recovery/MapRLogCloser.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/recovery/MapRLogCloser.java
@@ -32,7 +32,7 @@
 
   @Override
   public long close(AccumuloConfiguration conf, VolumeManager fs, Path path) throws IOException {
-    log.info("Recovering file {} by changing permission to readonly", path.toString());
+    log.info("Recovering file {} by changing permission to readonly", path);
     FileSystem ns = fs.getVolumeByPath(path).getFileSystem();
     FsPermission roPerm = new FsPermission((short) 0444);
     try {
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/state/MergeInfo.java b/server/base/src/main/java/org/apache/accumulo/server/master/state/MergeInfo.java
index f46f72868d..0cacd02739 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/state/MergeInfo.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/state/MergeInfo.java
@@ -102,7 +102,7 @@ public boolean overlaps(KeyExtent otherExtent) {
   @Override
   public String toString() {
     if (!state.equals(MergeState.NONE))
-      return "Merge " + operation.toString() + " of " + extent + " State: " + state;
+      return "Merge " + operation + " of " + extent + " State: " + state;
     return "No Merge in progress";
   }
 }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/state/SuspendingTServer.java b/server/base/src/main/java/org/apache/accumulo/server/master/state/SuspendingTServer.java
index 1769652dbb..ccdef293ef 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/state/SuspendingTServer.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/state/SuspendingTServer.java
@@ -41,7 +41,7 @@ public static SuspendingTServer fromValue(Value value) {
   }
 
   public Value toValue() {
-    return new Value(server.toString() + "|" + suspensionTime);
+    return new Value(server + "|" + suspensionTime);
   }
 
   @Override
@@ -68,6 +68,6 @@ public int hashCode() {
 
   @Override
   public String toString() {
-    return server.toString() + "[" + suspensionTime + "]";
+    return server + "[" + suspensionTime + "]";
   }
 }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/monitor/DedupedLogEvent.java b/server/base/src/main/java/org/apache/accumulo/server/monitor/DedupedLogEvent.java
index a1b6ecf7c0..70bf1f7122 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/monitor/DedupedLogEvent.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/monitor/DedupedLogEvent.java
@@ -48,8 +48,8 @@ public void setCount(int count) {
   @Override
   public int hashCode() {
     if (hash == -1) {
-      String eventId = event.getMDC("application").toString() + ":" + event.getLevel().toString()
-          + ":" + event.getMessage().toString();
+      String eventId = event.getMDC("application") + ":" + event.getLevel() + ":"
+          + event.getMessage();
       hash = eventId.hashCode();
     }
     return hash;
@@ -64,7 +64,6 @@ public boolean equals(Object obj) {
 
   @Override
   public String toString() {
-    return event.getMDC("application").toString() + ":" + event.getLevel().toString() + ":"
-        + event.getMessage().toString();
+    return event.getMDC("application") + ":" + event.getLevel() + ":" + event.getMessage();
   }
 }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicaSystemHelper.java b/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicaSystemHelper.java
index d5effb174a..e9a65076e7 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicaSystemHelper.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicaSystemHelper.java
@@ -61,8 +61,7 @@ public void recordNewStatus(Path filePath, Status status, ReplicationTarget targ
       throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
     try (BatchWriter bw = context.getConnector().createBatchWriter(ReplicationTable.NAME,
         new BatchWriterConfig())) {
-      log.debug("Recording new status for {}, {}", filePath.toString(),
-          ProtobufUtil.toString(status));
+      log.debug("Recording new status for {}, {}", filePath, ProtobufUtil.toString(status));
       Mutation m = new Mutation(filePath.toString());
       WorkSection.add(m, target.toText(), ProtobufUtil.toValue(status));
       bw.addMutation(m);
diff --git a/server/base/src/main/java/org/apache/accumulo/server/rpc/TServerUtils.java b/server/base/src/main/java/org/apache/accumulo/server/rpc/TServerUtils.java
index ff5756f139..c8cba68c49 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/rpc/TServerUtils.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/rpc/TServerUtils.java
@@ -592,8 +592,7 @@ public static ServerAddress startTServer(ThriftServerType serverType, TimedProce
         }
         break;
       } catch (TTransportException e) {
-        log.warn("Error attempting to create server at {}. Error: {}", address.toString(),
-            e.getMessage());
+        log.warn("Error attempting to create server at {}. Error: {}", address, e.getMessage());
       }
     }
     if (null == serverAddress) {
diff --git a/server/base/src/main/java/org/apache/accumulo/server/security/delegation/AuthenticationTokenKeyManager.java b/server/base/src/main/java/org/apache/accumulo/server/security/delegation/AuthenticationTokenKeyManager.java
index 60b7e474ac..b31cf169c6 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/security/delegation/AuthenticationTokenKeyManager.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/security/delegation/AuthenticationTokenKeyManager.java
@@ -156,7 +156,7 @@ void _run(long now) {
       AuthenticationKey newKey = new AuthenticationKey(++idSeq, now, now + tokenMaxLifetime,
           secretManager.generateSecret());
 
-      log.debug("Created new {}", newKey.toString());
+      log.debug("Created new {}", newKey);
 
       // Will set to be the current key given the idSeq
       secretManager.addKey(newKey);
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
index 82cf8461a5..3d38e94361 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
@@ -380,8 +380,7 @@ public static void addDeleteEntry(AccumuloServerContext context, Table.ID tableI
   public static Mutation createDeleteMutation(Table.ID tableId, String pathToRemove)
       throws IOException {
     Path path = VolumeManagerImpl.get().getFullPath(tableId, pathToRemove);
-    Mutation delFlag = new Mutation(
-        new Text(MetadataSchema.DeletesSection.getRowPrefix() + path.toString()));
+    Mutation delFlag = new Mutation(new Text(MetadataSchema.DeletesSection.getRowPrefix() + path));
     delFlag.put(EMPTY_TEXT, EMPTY_TEXT, new Value(new byte[] {}));
     return delFlag;
   }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/ReplicationTableUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/ReplicationTableUtil.java
index b6e717fb1c..9ad2d7a2fa 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/ReplicationTableUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/ReplicationTableUtil.java
@@ -192,8 +192,7 @@ public static void updateFiles(ClientContext context, KeyExtent extent, String f
 
   static Mutation createUpdateMutation(Path file, Value v, KeyExtent extent) {
     // Need to normalize the file path so we can assuredly find it again later
-    return createUpdateMutation(new Text(ReplicationSection.getRowPrefix() + file.toString()), v,
-        extent);
+    return createUpdateMutation(new Text(ReplicationSection.getRowPrefix() + file), v, extent);
   }
 
   private static Mutation createUpdateMutation(Text row, Value v, KeyExtent extent) {
diff --git a/server/base/src/test/java/org/apache/accumulo/server/util/FileUtilTest.java b/server/base/src/test/java/org/apache/accumulo/server/util/FileUtilTest.java
index cbdbbc801a..e7530813d7 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/util/FileUtilTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/util/FileUtilTest.java
@@ -114,7 +114,7 @@ public void testCleanupIndexOpWithCommonParentVolume() throws IOException {
     Path tmpPath1 = new Path(tmp1.toURI()), tmpPath2 = new Path(tmp2.toURI());
 
     HashMap<Property,String> testProps = new HashMap<>();
-    testProps.put(Property.INSTANCE_VOLUMES, v1.toURI().toString() + "," + v2.toURI().toString());
+    testProps.put(Property.INSTANCE_VOLUMES, v1.toURI() + "," + v2.toURI());
 
     VolumeManager fs = VolumeManagerImpl.getLocal(accumuloDir.getAbsolutePath());
 
@@ -147,7 +147,7 @@ public void testCleanupIndexOpWithCommonParentVolumeWithDepth() throws IOExcepti
     Path tmpPath1 = new Path(tmp1.toURI()), tmpPath2 = new Path(tmp2.toURI());
 
     HashMap<Property,String> testProps = new HashMap<>();
-    testProps.put(Property.INSTANCE_VOLUMES, v1.toURI().toString() + "," + v2.toURI().toString());
+    testProps.put(Property.INSTANCE_VOLUMES, v1.toURI() + "," + v2.toURI());
 
     VolumeManager fs = VolumeManagerImpl.getLocal(accumuloDir.getAbsolutePath());
 
@@ -174,7 +174,7 @@ public void testCleanupIndexOpWithoutCommonParentVolume() throws IOException {
     Path tmpPath1 = new Path(tmp1.toURI()), tmpPath2 = new Path(tmp2.toURI());
 
     HashMap<Property,String> testProps = new HashMap<>();
-    testProps.put(Property.INSTANCE_VOLUMES, v1.toURI().toString() + "," + v2.toURI().toString());
+    testProps.put(Property.INSTANCE_VOLUMES, v1.toURI() + "," + v2.toURI());
 
     VolumeManager fs = VolumeManagerImpl.getLocal(accumuloDir.getAbsolutePath());
 
@@ -204,7 +204,7 @@ public void testCleanupIndexOpWithoutCommonParentVolumeWithDepth() throws IOExce
     Path tmpPath1 = new Path(tmp1.toURI()), tmpPath2 = new Path(tmp2.toURI());
 
     HashMap<Property,String> testProps = new HashMap<>();
-    testProps.put(Property.INSTANCE_VOLUMES, v1.toURI().toString() + "," + v2.toURI().toString());
+    testProps.put(Property.INSTANCE_VOLUMES, v1.toURI() + "," + v2.toURI());
 
     VolumeManager fs = VolumeManagerImpl.getLocal(accumuloDir.getAbsolutePath());
 
diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java
index c8b2e4eafd..c663d9a21b 100644
--- a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java
+++ b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogs.java
@@ -244,7 +244,7 @@ private long removeTabletServerMarkers(Map<UUID,TServerInstance> uidMap,
         try {
           walMarker.forget(entry.getKey());
         } catch (WalMarkerException ex) {
-          log.info("Error removing znode for " + entry.getKey() + " " + ex.toString());
+          log.info("Error removing znode for " + entry.getKey() + " " + ex);
         }
       }
     }
diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferences.java b/server/gc/src/main/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferences.java
index 614de92ebd..fef1d349b3 100644
--- a/server/gc/src/main/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferences.java
+++ b/server/gc/src/main/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferences.java
@@ -110,7 +110,7 @@ public void run() {
       findWalsSpan.stop();
     }
 
-    log.info("Found {} WALs referenced in metadata in {}", closed.size(), sw.toString());
+    log.info("Found {} WALs referenced in metadata in {}", closed.size(), sw);
     sw.reset();
 
     Span updateReplicationSpan = Trace.start("updateReplicationTable");
@@ -123,8 +123,7 @@ public void run() {
       updateReplicationSpan.stop();
     }
 
-    log.info("Closed {} WAL replication references in replication table in {}", recordsClosed,
-        sw.toString());
+    log.info("Closed {} WAL replication references in replication table in {}", recordsClosed, sw);
   }
 
   /**
@@ -142,7 +141,7 @@ public void run() {
       for (Entry<Path,WalState> entry : wals.getAllState().entrySet()) {
         if (entry.getValue() == WalState.UNREFERENCED || entry.getValue() == WalState.CLOSED) {
           Path path = entry.getKey();
-          log.debug("Found closed WAL " + path.toString());
+          log.debug("Found closed WAL " + path);
           result.add(path.toString());
         }
       }
diff --git a/server/gc/src/test/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogsTest.java b/server/gc/src/test/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogsTest.java
index a2ca9b414e..19259b2e2e 100644
--- a/server/gc/src/test/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogsTest.java
+++ b/server/gc/src/test/java/org/apache/accumulo/gc/GarbageCollectWriteAheadLogsTest.java
@@ -242,7 +242,7 @@ public void replicationDelaysFileCollection() throws Exception {
     Connector conn = EasyMock.createMock(Connector.class);
     Scanner mscanner = EasyMock.createMock(Scanner.class);
     Scanner rscanner = EasyMock.createMock(Scanner.class);
-    String row = MetadataSchema.ReplicationSection.getRowPrefix() + path.toString();
+    String row = MetadataSchema.ReplicationSection.getRowPrefix() + path;
     String colf = MetadataSchema.ReplicationSection.COLF.toString();
     String colq = "1";
     Map<Key,Value> replicationWork = Collections.singletonMap(new Key(row, colf, colq),
diff --git a/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java b/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java
index 952226f51b..03dec3789b 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java
@@ -897,7 +897,7 @@ private void flushChanges(SortedMap<TServerInstance,TabletServerStatus> currentT
             // Don't let the log message get too gigantic
             if (builder.length() > ASSINGMENT_BUFFER_MAX_LENGTH) {
               builder.append("]");
-              Master.log.debug("{} assigning tablets: [{}", store.name(), builder.toString());
+              Master.log.debug("{} assigning tablets: [{}", store.name(), builder);
               builder.setLength(0);
             }
 
@@ -913,7 +913,7 @@ private void flushChanges(SortedMap<TServerInstance,TabletServerStatus> currentT
       if (builder.length() > 0) {
         // Make sure to log any leftover assignments
         builder.append("]");
-        Master.log.debug("{} assigning tablets: [{}", store.name(), builder.toString());
+        Master.log.debug("{} assigning tablets: [{}", store.name(), builder);
       }
 
       if (!unassigned.isEmpty() && assignedOut.isEmpty())
diff --git a/server/master/src/main/java/org/apache/accumulo/master/replication/RemoveCompleteReplicationRecords.java b/server/master/src/main/java/org/apache/accumulo/master/replication/RemoveCompleteReplicationRecords.java
index 067136f537..8c9a126eae 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/RemoveCompleteReplicationRecords.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/RemoveCompleteReplicationRecords.java
@@ -205,7 +205,7 @@ protected long removeRowIfNecessary(BatchWriter bw, SortedMap<Key,Value> columns
     mutations.add(m);
     for (Entry<Table.ID,Long> entry : tableToTimeCreated.entrySet()) {
       log.info("Removing order mutation for table {} at {} for {}", entry.getKey(),
-          entry.getValue(), row.toString());
+          entry.getValue(), row);
       Mutation orderMutation = OrderSection.createMutation(row.toString(), entry.getValue());
       orderMutation.putDelete(OrderSection.NAME, new Text(entry.getKey().getUtf8()));
       mutations.add(orderMutation);
diff --git a/server/master/src/main/java/org/apache/accumulo/master/replication/StatusMaker.java b/server/master/src/main/java/org/apache/accumulo/master/replication/StatusMaker.java
index 9611ce8dc3..1873ee79b0 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/StatusMaker.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/StatusMaker.java
@@ -289,7 +289,7 @@ private long setAndGetCreatedTime(Path file, String tableId)
     }
 
     Status status = Status.newBuilder().setCreatedTime(createdTime).build();
-    Mutation m = new Mutation(new Text(ReplicationSection.getRowPrefix() + file.toString()));
+    Mutation m = new Mutation(new Text(ReplicationSection.getRowPrefix() + file));
     m.put(MetadataSchema.ReplicationSection.COLF, new Text(tableId), ProtobufUtil.toValue(status));
     replicationWriter.addMutation(m);
     replicationWriter.flush();
diff --git a/server/master/src/main/java/org/apache/accumulo/master/replication/WorkMaker.java b/server/master/src/main/java/org/apache/accumulo/master/replication/WorkMaker.java
index 084e8f364c..de6d18ffb0 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/WorkMaker.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/WorkMaker.java
@@ -109,7 +109,7 @@ public void run() {
         // Don't create the record if we have nothing to do.
         // TODO put this into a filter on serverside
         if (!shouldCreateWork(status)) {
-          log.debug("Not creating work: {}", status.toString());
+          log.debug("Not creating work: {}", status);
           continue;
         }
 
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/MapImportFileNames.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/MapImportFileNames.java
index b087e00523..84f54cdc6f 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/MapImportFileNames.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/MapImportFileNames.java
@@ -67,7 +67,7 @@
 
       for (FileStatus fileStatus : files) {
         String fileName = fileStatus.getPath().getName();
-        log.info("filename " + fileStatus.getPath().toString());
+        log.info("filename " + fileStatus.getPath());
         String sa[] = fileName.split("\\.");
         String extension = "";
         if (sa.length > 1) {
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/BulkImport.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/BulkImport.java
index e09c236418..368816f3fc 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/BulkImport.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/BulkImport.java
@@ -246,7 +246,7 @@ private String prepareBulkImport(Master master, final VolumeManager fs, String d
             fs.rename(fileStatus.getPath(), newPath);
             log.debug("Moved {} to {}", fileStatus.getPath(), newPath);
           } catch (IOException E1) {
-            log.error("Could not move: {} {}", fileStatus.getPath().toString(), E1.getMessage());
+            log.error("Could not move: {} {}", fileStatus.getPath(), E1.getMessage());
           }
 
         } catch (Exception ex) {
diff --git a/server/master/src/test/java/org/apache/accumulo/master/replication/DistributedWorkQueueWorkAssignerHelperTest.java b/server/master/src/test/java/org/apache/accumulo/master/replication/DistributedWorkQueueWorkAssignerHelperTest.java
index d45cc506cb..aa30f181d4 100644
--- a/server/master/src/test/java/org/apache/accumulo/master/replication/DistributedWorkQueueWorkAssignerHelperTest.java
+++ b/server/master/src/test/java/org/apache/accumulo/master/replication/DistributedWorkQueueWorkAssignerHelperTest.java
@@ -34,7 +34,7 @@
 
   @Test
   public void createsValidZKNodeName() {
-    Path p = new Path("/accumulo/wals/tserver+port/" + UUID.randomUUID().toString());
+    Path p = new Path("/accumulo/wals/tserver+port/" + UUID.randomUUID());
     ReplicationTarget target = new ReplicationTarget("cluster1", "table1", Table.ID.of("1"));
 
     String key = DistributedWorkQueueWorkAssignerHelper.getQueueKey(p.toString(), target);
@@ -44,7 +44,7 @@ public void createsValidZKNodeName() {
 
   @Test
   public void queueKeySerialization() {
-    Path p = new Path("/accumulo/wals/tserver+port/" + UUID.randomUUID().toString());
+    Path p = new Path("/accumulo/wals/tserver+port/" + UUID.randomUUID());
     ReplicationTarget target = new ReplicationTarget("cluster1", "table1", Table.ID.of("1"));
 
     String key = DistributedWorkQueueWorkAssignerHelper.getQueueKey(p.toString(), target);
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
index c5e02afc5b..d4e39b8929 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
@@ -2669,8 +2669,7 @@ private HostAndPort startReplicationService() throws UnknownHostException {
       // Advertise the host and port for replication service given the host and port for the
       // tserver.
       ZooReaderWriter.getInstance().putPersistentData(
-          ZooUtil.getRoot(getInstance()) + ReplicationConstants.ZOO_TSERVERS + "/"
-              + clientAddress.toString(),
+          ZooUtil.getRoot(getInstance()) + ReplicationConstants.ZOO_TSERVERS + "/" + clientAddress,
           sp.address.toString().getBytes(UTF_8), NodeExistsPolicy.OVERWRITE);
     } catch (Exception e) {
       log.error("Could not advertise replication service port", e);
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/CompactionPlan.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/CompactionPlan.java
index 62d2a23f50..7beb5de0bf 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/CompactionPlan.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/compaction/CompactionPlan.java
@@ -38,7 +38,7 @@
   @Override
   public String toString() {
     StringBuilder b = new StringBuilder();
-    b.append(inputFiles.toString());
+    b.append(inputFiles);
     if (!deleteFiles.isEmpty()) {
       b.append(" files to be deleted ");
       b.append(deleteFiles);
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/constraints/ConstraintChecker.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/constraints/ConstraintChecker.java
index 48576e5baa..4ae8362eb1 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/constraints/ConstraintChecker.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/constraints/ConstraintChecker.java
@@ -77,7 +77,7 @@ public ConstraintChecker(TableConfiguration conf) {
       loader = null;
       constrains.add(new UnsatisfiableConstraint((short) -1,
           "Failed to load constraints, not accepting mutations."));
-      log.error("Failed to load constraints " + conf.getTableId() + " " + e.toString(), e);
+      log.error("Failed to load constraints " + conf.getTableId() + " " + e, e);
     }
   }
 
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java
index 4ad001a77a..9e7a7531e8 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java
@@ -532,7 +532,7 @@ public synchronized void open(String address) throws IOException {
     }
 
     syncThread = new Daemon(new LoggingRunnable(log, new LogSyncingTask()));
-    syncThread.setName("Accumulo WALog thread " + toString());
+    syncThread.setName("Accumulo WALog thread " + this);
     syncThread.start();
     op.await();
     log.debug("Got new write-ahead log: {}", this);
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/scan/LookupTask.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/scan/LookupTask.java
index 5c44cfddc2..f011e31ab7 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/scan/LookupTask.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/scan/LookupTask.java
@@ -102,7 +102,7 @@ public void run() {
           continue;
         }
         Thread.currentThread().setName("Client: " + session.client + " User: " + session.getUser()
-            + " Start: " + session.startTime + " Tablet: " + entry.getKey().toString());
+            + " Start: " + session.startTime + " Tablet: " + entry.getKey());
 
         LookupResult lookupResult;
         try {
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/BulkImportCacheCleaner.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/BulkImportCacheCleaner.java
index b2d847bbf6..9f8de0c8da 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/BulkImportCacheCleaner.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/BulkImportCacheCleaner.java
@@ -54,7 +54,7 @@ public void run() {
       }
     } catch (KeeperException | InterruptedException e) {
       // we'll just clean it up again later
-      log.debug("Error reading bulk import live transactions {}", e.toString());
+      log.debug("Error reading bulk import live transactions {}", e);
     }
   }
 
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Compactor.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Compactor.java
index 548f046fca..692c76e684 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Compactor.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Compactor.java
@@ -193,7 +193,7 @@ public CompactionStats call() throws IOException, CompactionCanceledException {
     final Path outputFilePath = outputFile.path();
     final String outputFilePathName = outputFilePath.toString();
     String oldThreadName = Thread.currentThread().getName();
-    String newThreadName = "MajC compacting " + extent.toString() + " started "
+    String newThreadName = "MajC compacting " + extent + " started "
         + dateFormatter.format(new Date()) + " file: " + outputFile;
     Thread.currentThread().setName(newThreadName);
     thread = Thread.currentThread();
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/RootFiles.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/RootFiles.java
index 99a84a986e..32fb00901a 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/RootFiles.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/RootFiles.java
@@ -106,7 +106,7 @@ public static void replaceFiles(AccumuloConfiguration acuTableConf, VolumeManage
         if (fs.exists(new Path(expectedCompactedFile))) {
           // compaction finished, but did not finish deleting compacted files.. so delete it
           if (!fs.deleteRecursively(file.getPath()))
-            log.warn("Delete of file: {} return false", file.getPath().toString());
+            log.warn("Delete of file: {} return false", file.getPath());
           continue;
         }
         // compaction did not finish, so put files back
@@ -122,7 +122,7 @@ public static void replaceFiles(AccumuloConfiguration acuTableConf, VolumeManage
         if (deleteTmp) {
           log.warn("cleaning up old tmp file: {}", path);
           if (!fs.deleteRecursively(file.getPath()))
-            log.warn("Delete of tmp file: {} return false", file.getPath().toString());
+            log.warn("Delete of tmp file: {} return false", file.getPath());
 
         }
         continue;
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/ScanDataSource.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/ScanDataSource.java
index 3c2f19e10e..0ddd362546 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/ScanDataSource.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/ScanDataSource.java
@@ -155,7 +155,7 @@ public boolean isCurrent() {
 
       if (interruptFlag.get())
         throw new IterationInterruptedException(
-            tablet.getExtent().toString() + " " + interruptFlag.hashCode());
+            tablet.getExtent() + " " + interruptFlag.hashCode());
 
       // only acquire the file manager when we know the tablet is open
       if (fileManager == null) {
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java
index a30a3b992d..9ad246cc3a 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java
@@ -269,8 +269,8 @@ public int getLogId() {
   FileRef getNextMapFilename(String prefix) throws IOException {
     String extension = FileOperations.getNewFileExtension(tableConfiguration);
     checkTabletDir();
-    return new FileRef(location.toString() + "/" + prefix
-        + UniqueNameAllocator.getInstance().getNextName() + "." + extension);
+    return new FileRef(location + "/" + prefix + UniqueNameAllocator.getInstance().getNextName()
+        + "." + extension);
   }
 
   private void checkTabletDir() throws IOException {
@@ -381,7 +381,7 @@ public void propertiesChanged() {
         try {
           setupDefaultSecurityLabels(extent);
         } catch (Exception e) {
-          log.error("Failed to reload default security labels for extent: {}", extent.toString());
+          log.error("Failed to reload default security labels for extent: {}", extent);
         }
       }
 
@@ -391,10 +391,10 @@ public void propertyChanged(String prop) {
           reloadConstraints();
         else if (prop.equals(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY.getKey())) {
           try {
-            log.info("Default security labels changed for extent: {}", extent.toString());
+            log.info("Default security labels changed for extent: {}", extent);
             setupDefaultSecurityLabels(extent);
           } catch (Exception e) {
-            log.error("Failed to reload default security labels for extent: {}", extent.toString());
+            log.error("Failed to reload default security labels for extent: {}", extent);
           }
         }
 
@@ -1066,7 +1066,7 @@ private MinorCompactionTask createMinorCompactionTask(long flushId,
 
           logMessage = new StringBuilder();
 
-          logMessage.append(extent.toString());
+          logMessage.append(extent);
           logMessage.append(" closeState " + closeState);
           logMessage.append(" majorCompactionState " + majorCompactionState);
           if (getTabletMemory() != null)
@@ -1943,7 +1943,7 @@ private CompactionStats _majorCompact(MajorCompactionReason reason)
 
         FileRef fileName = getNextMapFilename(
             (filesToCompact.size() == 0 && !propogateDeletes) ? "A" : "C");
-        FileRef compactTmpName = new FileRef(fileName.path().toString() + "_tmp");
+        FileRef compactTmpName = new FileRef(fileName.path() + "_tmp");
 
         AccumuloConfiguration tableConf = createTableConfiguration(tableConfiguration, plan);
 
@@ -2469,7 +2469,7 @@ public synchronized void removeInUseLogs(Set<DfsLogger> candidates) {
 
     // do debug logging outside tablet lock
     for (String logger : otherLogsCopy) {
-      log.debug("Logs for memory compacted: {} {}", getExtent(), logger.toString());
+      log.debug("Logs for memory compacted: {} {}", getExtent(), logger);
     }
 
     for (String logger : currentLogsCopy) {
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/TabletData.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/TabletData.java
index a235bd77d0..ed7f81c47f 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/TabletData.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/TabletData.java
@@ -145,7 +145,7 @@ public TabletData(VolumeManager fs, ZooReader rdr, AccumuloConfiguration conf)
     for (String good : goodPaths) {
       Path path = new Path(good);
       String filename = path.getName();
-      FileRef ref = new FileRef(location.toString() + "/" + filename, path);
+      FileRef ref = new FileRef(location + "/" + filename, path);
       DataFileValue dfv = new DataFileValue(0, 0);
       dataFiles.put(ref, dfv);
 
diff --git a/server/tserver/src/test/java/org/apache/accumulo/tserver/replication/AccumuloReplicaSystemTest.java b/server/tserver/src/test/java/org/apache/accumulo/tserver/replication/AccumuloReplicaSystemTest.java
index 25cf68bee3..19bb2a0451 100644
--- a/server/tserver/src/test/java/org/apache/accumulo/tserver/replication/AccumuloReplicaSystemTest.java
+++ b/server/tserver/src/test/java/org/apache/accumulo/tserver/replication/AccumuloReplicaSystemTest.java
@@ -461,7 +461,7 @@ public void dontSendEmptyDataToPeer() throws Exception {
 
     ReplicationTarget target = new ReplicationTarget("peer", "2", Table.ID.of("1"));
     DataInputStream input = null;
-    Path p = new Path("/accumulo/wals/tserver+port/" + UUID.randomUUID().toString());
+    Path p = new Path("/accumulo/wals/tserver+port/" + UUID.randomUUID());
     Status status = null;
     long sizeLimit = Long.MAX_VALUE;
     String remoteTableId = target.getRemoteIdentifier();
@@ -491,7 +491,7 @@ public void consumedButNotSentDataShouldBeRecorded() throws Exception {
 
     ReplicationTarget target = new ReplicationTarget("peer", "2", Table.ID.of("1"));
     DataInputStream input = null;
-    Path p = new Path("/accumulo/wals/tserver+port/" + UUID.randomUUID().toString());
+    Path p = new Path("/accumulo/wals/tserver+port/" + UUID.randomUUID());
     Status status = null;
     long sizeLimit = Long.MAX_VALUE;
     String remoteTableId = target.getRemoteIdentifier();
diff --git a/shell/src/main/java/org/apache/accumulo/shell/Shell.java b/shell/src/main/java/org/apache/accumulo/shell/Shell.java
index 2707fef8e4..11042b6d82 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/Shell.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/Shell.java
@@ -279,7 +279,7 @@ public boolean config(String... args) throws IOException {
     }
 
     if (options.getUnrecognizedOptions() != null) {
-      logError("Unrecognized Options: " + options.getUnrecognizedOptions().toString());
+      logError("Unrecognized Options: " + options.getUnrecognizedOptions());
       jc.usage();
       exitCode = 1;
       return false;
diff --git a/shell/src/main/java/org/apache/accumulo/shell/commands/AddAuthsCommand.java b/shell/src/main/java/org/apache/accumulo/shell/commands/AddAuthsCommand.java
index 6af5a7b927..173a4d17d9 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/AddAuthsCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/AddAuthsCommand.java
@@ -44,7 +44,7 @@ public int execute(final String fullCommand, final CommandLine cl, final Shell s
         .getUserAuthorizations(user);
     StringBuilder userAuths = new StringBuilder();
     if (!auths.isEmpty()) {
-      userAuths.append(auths.toString());
+      userAuths.append(auths);
       userAuths.append(",");
     }
     userAuths.append(scanOpts);
diff --git a/shell/src/main/java/org/apache/accumulo/shell/commands/InsertCommand.java b/shell/src/main/java/org/apache/accumulo/shell/commands/InsertCommand.java
index 987b51f78a..aa6a84f4c3 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/InsertCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/InsertCommand.java
@@ -70,7 +70,7 @@ public int execute(final String fullCommand, final CommandLine cl, final Shell s
 
     if (cl.hasOption(insertOptAuths.getOpt())) {
       final ColumnVisibility le = new ColumnVisibility(cl.getOptionValue(insertOptAuths.getOpt()));
-      Shell.log.debug("Authorization label will be set to: " + le.toString());
+      Shell.log.debug("Authorization label will be set to: " + le);
 
       if (cl.hasOption(timestampOpt.getOpt()))
         m.put(colf, colq, le, Long.parseLong(cl.getOptionValue(timestampOpt.getOpt())), val);
@@ -120,7 +120,7 @@ public int execute(final String fullCommand, final CommandLine cl, final Shell s
         lines.add("\tConstraint Failures:");
       }
       for (ConstraintViolationSummary cvs : e.getConstraintViolationSummaries()) {
-        lines.add("\t\t" + cvs.toString());
+        lines.add("\t\t" + cvs);
       }
 
       if (lines.size() == 0 || e.getUnknownExceptions() > 0) {
diff --git a/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java b/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java
index 7df70f024a..68fd757751 100644
--- a/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java
+++ b/start/src/main/java/org/apache/accumulo/start/classloader/AccumuloClassLoader.java
@@ -248,7 +248,7 @@ public static synchronized ClassLoader getClassLoader() throws IOException {
 
       ClassLoader parentClassLoader = AccumuloClassLoader.class.getClassLoader();
 
-      log.debug("Create 2nd tier ClassLoader using URLs: {}", urls.toString());
+      log.debug("Create 2nd tier ClassLoader using URLs: {}", urls);
       classloader = new URLClassLoader(urls.toArray(new URL[urls.size()]), parentClassLoader) {
         @Override
         protected synchronized Class<?> loadClass(String name, boolean resolve)
diff --git a/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloReloadingVFSClassLoader.java b/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloReloadingVFSClassLoader.java
index bdaa181f46..f8f9e40fde 100644
--- a/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloReloadingVFSClassLoader.java
+++ b/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloReloadingVFSClassLoader.java
@@ -182,21 +182,21 @@ public void close() {
   @Override
   public void fileCreated(FileChangeEvent event) throws Exception {
     if (log.isDebugEnabled())
-      log.debug("{} created, recreating classloader", event.getFile().getURL().toString());
+      log.debug("{} created, recreating classloader", event.getFile().getURL());
     scheduleRefresh();
   }
 
   @Override
   public void fileDeleted(FileChangeEvent event) throws Exception {
     if (log.isDebugEnabled())
-      log.debug("{} deleted, recreating classloader", event.getFile().getURL().toString());
+      log.debug("{} deleted, recreating classloader", event.getFile().getURL());
     scheduleRefresh();
   }
 
   @Override
   public void fileChanged(FileChangeEvent event) throws Exception {
     if (log.isDebugEnabled())
-      log.debug("{} changed, recreating classloader", event.getFile().getURL().toString());
+      log.debug("{} changed, recreating classloader", event.getFile().getURL());
     scheduleRefresh();
   }
 
@@ -206,7 +206,7 @@ public String toString() {
 
     for (FileObject f : files) {
       try {
-        buf.append("\t").append(f.getURL().toString()).append("\n");
+        buf.append("\t").append(f.getURL()).append("\n");
       } catch (FileSystemException e) {
         log.error("Error getting URL for file", e);
       }
diff --git a/start/src/test/java/org/apache/accumulo/start/classloader/vfs/AccumuloReloadingVFSClassLoaderTest.java b/start/src/test/java/org/apache/accumulo/start/classloader/vfs/AccumuloReloadingVFSClassLoaderTest.java
index adf7213b30..e23130a0c2 100644
--- a/start/src/test/java/org/apache/accumulo/start/classloader/vfs/AccumuloReloadingVFSClassLoaderTest.java
+++ b/start/src/test/java/org/apache/accumulo/start/classloader/vfs/AccumuloReloadingVFSClassLoaderTest.java
@@ -48,7 +48,7 @@ public void setup() throws Exception {
     vfs = ContextManagerTest.getVFS();
 
     folder1.create();
-    folderPath = folder1.getRoot().toURI().toString() + ".*";
+    folderPath = folder1.getRoot().toURI() + ".*";
 
     FileUtils.copyURLToFile(this.getClass().getResource("/HelloWorld.jar"),
         folder1.newFile("HelloWorld.jar"));
diff --git a/start/src/test/java/org/apache/accumulo/start/classloader/vfs/ContextManagerTest.java b/start/src/test/java/org/apache/accumulo/start/classloader/vfs/ContextManagerTest.java
index 792df3fd9a..44db3389f2 100644
--- a/start/src/test/java/org/apache/accumulo/start/classloader/vfs/ContextManagerTest.java
+++ b/start/src/test/java/org/apache/accumulo/start/classloader/vfs/ContextManagerTest.java
@@ -64,7 +64,7 @@ public void setup() throws Exception {
         folder2.newFile("HelloWorld.jar"));
 
     uri1 = new File(folder1.getRoot(), "HelloWorld.jar").toURI().toString();
-    uri2 = folder2.getRoot().toURI().toString() + ".*";
+    uri2 = folder2.getRoot().toURI() + ".*";
 
   }
 
diff --git a/test/src/main/java/org/apache/accumulo/test/AuditMessageIT.java b/test/src/main/java/org/apache/accumulo/test/AuditMessageIT.java
index e5775c3f4e..7a9f08c53c 100644
--- a/test/src/main/java/org/apache/accumulo/test/AuditMessageIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/AuditMessageIT.java
@@ -263,18 +263,18 @@ public void testUserOperationsAudits() throws AccumuloSecurityException, Accumul
     assertEquals(2,
         findAuditMessage(auditMessages, "action: createUser; targetUser: " + AUDIT_USER_2));
     assertEquals(1, findAuditMessage(auditMessages, "action: grantSystemPermission; permission: "
-        + SystemPermission.ALTER_TABLE.toString() + "; targetUser: " + AUDIT_USER_2));
+        + SystemPermission.ALTER_TABLE + "; targetUser: " + AUDIT_USER_2));
     assertEquals(1, findAuditMessage(auditMessages, "action: revokeSystemPermission; permission: "
-        + SystemPermission.ALTER_TABLE.toString() + "; targetUser: " + AUDIT_USER_2));
+        + SystemPermission.ALTER_TABLE + "; targetUser: " + AUDIT_USER_2));
     assertEquals(1, findAuditMessage(auditMessages, "action: grantTablePermission; permission: "
-        + TablePermission.READ.toString() + "; targetTable: " + NEW_TEST_TABLE_NAME));
+        + TablePermission.READ + "; targetTable: " + NEW_TEST_TABLE_NAME));
     assertEquals(1, findAuditMessage(auditMessages, "action: revokeTablePermission; permission: "
-        + TablePermission.READ.toString() + "; targetTable: " + NEW_TEST_TABLE_NAME));
+        + TablePermission.READ + "; targetTable: " + NEW_TEST_TABLE_NAME));
     // changePassword is allowed and succeeded
     assertEquals(2, findAuditMessage(auditMessages,
         "action: changePassword; targetUser: " + AUDIT_USER_2 + ""));
     assertEquals(1, findAuditMessage(auditMessages, "action: changeAuthorizations; targetUser: "
-        + AUDIT_USER_2 + "; authorizations: " + auths.toString()));
+        + AUDIT_USER_2 + "; authorizations: " + auths));
 
     // allowed to dropUser and succeeded
     assertEquals(2,
@@ -305,14 +305,14 @@ public void testImportExportOperationsAudits()
     bw.close();
 
     // Prepare to export the table
-    File exportDir = new File(getCluster().getConfig().getDir().toString() + "/export");
+    File exportDir = new File(getCluster().getConfig().getDir() + "/export");
 
     auditConnector.tableOperations().offline(OLD_TEST_TABLE_NAME);
     auditConnector.tableOperations().exportTable(OLD_TEST_TABLE_NAME, exportDir.toString());
 
     // We've exported the table metadata to the MiniAccumuloCluster root dir. Grab the .rf file path
     // to re-import it
-    File distCpTxt = new File(exportDir.toString() + "/distcp.txt");
+    File distCpTxt = new File(exportDir + "/distcp.txt");
     File importFile = null;
 
     // Just grab the first rf file, it will do for now.
@@ -354,14 +354,13 @@ public void testImportExportOperationsAudits()
     assertEquals(1,
         findAuditMessage(auditMessages,
             String.format(AuditedSecurityOperation.CAN_IMPORT_AUDIT_TEMPLATE, NEW_TEST_TABLE_NAME,
-                filePrefix + exportDir.toString())));
+                filePrefix + exportDir)));
     assertEquals(1, findAuditMessage(auditMessages, String
         .format(AuditedSecurityOperation.CAN_CREATE_TABLE_AUDIT_TEMPLATE, THIRD_TEST_TABLE_NAME)));
     assertEquals(1,
         findAuditMessage(auditMessages,
             String.format(AuditedSecurityOperation.CAN_BULK_IMPORT_AUDIT_TEMPLATE,
-                THIRD_TEST_TABLE_NAME, filePrefix + exportDir.toString(),
-                filePrefix + failDir.toString())));
+                THIRD_TEST_TABLE_NAME, filePrefix + exportDir, filePrefix + failDir)));
     assertEquals(1,
         findAuditMessage(auditMessages,
             String.format(AuditedSecurityOperation.CAN_ONLINE_OFFLINE_TABLE_AUDIT_TEMPLATE,
diff --git a/test/src/main/java/org/apache/accumulo/test/BadDeleteMarkersCreatedIT.java b/test/src/main/java/org/apache/accumulo/test/BadDeleteMarkersCreatedIT.java
index e0e32bb622..3a9ecfdc97 100644
--- a/test/src/main/java/org/apache/accumulo/test/BadDeleteMarkersCreatedIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/BadDeleteMarkersCreatedIT.java
@@ -173,7 +173,7 @@ public void test() throws Exception {
           continue;
         }
         Assert.fail("Delete entry should have been deleted by the garbage collector: "
-            + entry.getKey().getRow().toString());
+            + entry.getKey().getRow());
       }
     }
   }
diff --git a/test/src/main/java/org/apache/accumulo/test/BulkImportMonitoringIT.java b/test/src/main/java/org/apache/accumulo/test/BulkImportMonitoringIT.java
index f77b0ca477..7b78bc9f70 100644
--- a/test/src/main/java/org/apache/accumulo/test/BulkImportMonitoringIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/BulkImportMonitoringIT.java
@@ -96,7 +96,7 @@ public void test() throws Exception {
         fs.mkdirs(files);
         for (int i1 = 0; i1 < 10; i1++) {
           FileSKVWriter writer = FileOperations.getInstance().newWriterBuilder()
-              .forFile(files.toString() + "/bulk_" + i1 + "." + RFile.EXTENSION, fs, fs.getConf())
+              .forFile(files + "/bulk_" + i1 + "." + RFile.EXTENSION, fs, fs.getConf())
               .withTableConfiguration(DefaultConfiguration.getInstance()).build();
           writer.startDefaultLocalityGroup();
           for (int j = 0x100; j < 0xfff; j += 3) {
diff --git a/test/src/main/java/org/apache/accumulo/test/BulkImportSequentialRowsIT.java b/test/src/main/java/org/apache/accumulo/test/BulkImportSequentialRowsIT.java
index 7a58e98882..1e65184361 100644
--- a/test/src/main/java/org/apache/accumulo/test/BulkImportSequentialRowsIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/BulkImportSequentialRowsIT.java
@@ -82,7 +82,7 @@ public void testBulkImportFailure() throws Exception {
 
     Path rfile = new Path(bulk, "file.rf");
 
-    log.info("Generating RFile {}", rfile.toUri().toString());
+    log.info("Generating RFile {}", rfile.toUri());
 
     GenerateSequentialRFile.main(new String[] {"-f", rfile.toUri().toString(), "-nr",
         Long.toString(NR), "-nv", Long.toString(NV)});
diff --git a/test/src/main/java/org/apache/accumulo/test/BulkImportVolumeIT.java b/test/src/main/java/org/apache/accumulo/test/BulkImportVolumeIT.java
index e892f6a07a..9f8d7454ff 100644
--- a/test/src/main/java/org/apache/accumulo/test/BulkImportVolumeIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/BulkImportVolumeIT.java
@@ -55,7 +55,7 @@ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoo
     v2 = new Path("file://" + v2f.getAbsolutePath());
 
     // Run MAC on two locations in the local file system
-    cfg.setProperty(Property.INSTANCE_VOLUMES, v1.toString() + "," + v2.toString());
+    cfg.setProperty(Property.INSTANCE_VOLUMES, v1 + "," + v2);
 
     // use raw local file system so walogs sync and flush will work
     hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
diff --git a/test/src/main/java/org/apache/accumulo/test/GetFileInfoBulkIT.java b/test/src/main/java/org/apache/accumulo/test/GetFileInfoBulkIT.java
index d6cf212b59..0be588f296 100644
--- a/test/src/main/java/org/apache/accumulo/test/GetFileInfoBulkIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/GetFileInfoBulkIT.java
@@ -121,7 +121,7 @@ public void test() throws Exception {
         fs.mkdirs(files);
         for (int i1 = 0; i1 < 100; i1++) {
           FileSKVWriter writer = FileOperations.getInstance().newWriterBuilder()
-              .forFile(files.toString() + "/bulk_" + i1 + "." + RFile.EXTENSION, fs, fs.getConf())
+              .forFile(files + "/bulk_" + i1 + "." + RFile.EXTENSION, fs, fs.getConf())
               .withTableConfiguration(DefaultConfiguration.getInstance()).build();
           writer.startDefaultLocalityGroup();
           for (int j = 0x100; j < 0xfff; j += 3) {
diff --git a/test/src/main/java/org/apache/accumulo/test/InMemoryMapIT.java b/test/src/main/java/org/apache/accumulo/test/InMemoryMapIT.java
index c4c76d34f0..0444730f93 100644
--- a/test/src/main/java/org/apache/accumulo/test/InMemoryMapIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/InMemoryMapIT.java
@@ -335,7 +335,7 @@ private String dumpInMemoryMap(InMemoryMap map, List<MemKey> memkeys) {
 
     for (MemKey mk : memkeys) {
       sb.append("  ");
-      sb.append(mk.toString());
+      sb.append(mk);
       sb.append("\n");
     }
 
diff --git a/test/src/main/java/org/apache/accumulo/test/RewriteTabletDirectoriesIT.java b/test/src/main/java/org/apache/accumulo/test/RewriteTabletDirectoriesIT.java
index 69116b3338..9668743709 100644
--- a/test/src/main/java/org/apache/accumulo/test/RewriteTabletDirectoriesIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/RewriteTabletDirectoriesIT.java
@@ -133,7 +133,7 @@ public void test() throws Exception {
       Configuration conf = new Configuration(false);
       conf.addResource(
           new Path(cluster.getConfig().getConfDir().toURI().toString(), "accumulo-site.xml"));
-      conf.set(Property.INSTANCE_VOLUMES.getKey(), v1.toString() + "," + v2.toString());
+      conf.set(Property.INSTANCE_VOLUMES.getKey(), v1 + "," + v2);
       BufferedOutputStream fos = new BufferedOutputStream(
           new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml")));
       conf.writeXml(fos);
diff --git a/test/src/main/java/org/apache/accumulo/test/ShellServerIT.java b/test/src/main/java/org/apache/accumulo/test/ShellServerIT.java
index 26c56d6e3f..bf0b569264 100644
--- a/test/src/main/java/org/apache/accumulo/test/ShellServerIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/ShellServerIT.java
@@ -347,11 +347,11 @@ public void exporttableImporttable() throws Exception {
     ts.exec("config -t " + table + " -s table.split.threshold=345M", true);
     ts.exec("offline " + table, true);
     File exportDir = new File(rootPath, "ShellServerIT.export");
-    String exportUri = "file://" + exportDir.toString();
-    String localTmp = "file://" + new File(rootPath, "ShellServerIT.tmp").toString();
+    String exportUri = "file://" + exportDir;
+    String localTmp = "file://" + new File(rootPath, "ShellServerIT.tmp");
     ts.exec("exporttable -t " + table + " " + exportUri, true);
     DistCp cp = newDistCp(new Configuration(false));
-    String import_ = "file://" + new File(rootPath, "ShellServerIT.import").toString();
+    String import_ = "file://" + new File(rootPath, "ShellServerIT.import");
     if (getCluster().getConnectionInfo().saslEnabled()) {
       // DistCp bugs out trying to get a fs delegation token to perform the cp. Just copy it
       // ourselves by hand.
@@ -718,7 +718,7 @@ public String getErrorMessage() {
                 try {
                   Connector c = getConnector();
                   return "Current auths for root are: "
-                      + c.securityOperations().getUserAuthorizations("root").toString();
+                      + c.securityOperations().getUserAuthorizations("root");
                 } catch (Exception e) {
                   return "Could not check authorizations";
                 }
@@ -1619,7 +1619,7 @@ public void testPertableClasspath() throws Exception {
     fooConstraintJar.deleteOnExit();
 
     ts.exec("config -s " + Property.VFS_CONTEXT_CLASSPATH_PROPERTY.getKey() + "cx1="
-        + fooFilterJar.toURI().toString() + "," + fooConstraintJar.toURI().toString(), true);
+        + fooFilterJar.toURI() + "," + fooConstraintJar.toURI(), true);
 
     ts.exec("createtable " + table, true);
     ts.exec("config -t " + table + " -s " + Property.TABLE_CLASSPATH.getKey() + "=cx1", true);
diff --git a/test/src/main/java/org/apache/accumulo/test/TableOperationsIT.java b/test/src/main/java/org/apache/accumulo/test/TableOperationsIT.java
index a6266cead2..fd42199170 100644
--- a/test/src/main/java/org/apache/accumulo/test/TableOperationsIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/TableOperationsIT.java
@@ -184,7 +184,7 @@ public void createTable() throws TableExistsException, AccumuloException,
         .getProperties(tableName);
     Map<String,String> props = propsToMap(itrProps);
     assertEquals(DefaultKeySizeConstraint.class.getName(),
-        props.get(Property.TABLE_CONSTRAINT_PREFIX.toString() + "1"));
+        props.get(Property.TABLE_CONSTRAINT_PREFIX + "1"));
     connector.tableOperations().delete(tableName);
   }
 
diff --git a/test/src/main/java/org/apache/accumulo/test/TestBinaryRows.java b/test/src/main/java/org/apache/accumulo/test/TestBinaryRows.java
index 8f22c9f5c5..3b78473f7f 100644
--- a/test/src/main/java/org/apache/accumulo/test/TestBinaryRows.java
+++ b/test/src/main/java/org/apache/accumulo/test/TestBinaryRows.java
@@ -122,7 +122,7 @@ public static void runTest(Connector connector, Opts opts, BatchWriterOpts bwOpt
 
         for (Entry<Key,Value> entry : s) {
           throw new Exception("ERROR : saw entries in range that should be deleted ( first value : "
-              + entry.getValue().toString() + ")");
+              + entry.getValue() + ")");
         }
       }
     } else if (opts.mode.equals("verify")) {
@@ -225,7 +225,7 @@ private static void checkKeyValue(long expected, Key k, Value v) throws Exceptio
     }
 
     if (!v.toString().equals("" + expected)) {
-      throw new Exception("ERROR : expected value " + expected + " saw " + v.toString());
+      throw new Exception("ERROR : expected value " + expected + " saw " + v);
     }
   }
 
diff --git a/test/src/main/java/org/apache/accumulo/test/TestRandomDeletes.java b/test/src/main/java/org/apache/accumulo/test/TestRandomDeletes.java
index 44e173dd67..a0d00d180e 100644
--- a/test/src/main/java/org/apache/accumulo/test/TestRandomDeletes.java
+++ b/test/src/main/java/org/apache/accumulo/test/TestRandomDeletes.java
@@ -75,7 +75,7 @@ public int compareTo(RowColumn other) {
 
     @Override
     public String toString() {
-      return row.toString() + ":" + column.toString();
+      return row + ":" + column;
     }
   }
 
diff --git a/test/src/main/java/org/apache/accumulo/test/VolumeChooserFailureIT.java b/test/src/main/java/org/apache/accumulo/test/VolumeChooserFailureIT.java
index 7ec892bc8e..3de8ebaef1 100644
--- a/test/src/main/java/org/apache/accumulo/test/VolumeChooserFailureIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/VolumeChooserFailureIT.java
@@ -90,8 +90,7 @@ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite)
 
     // Only add volumes 1, 2, and 4 to the list of instance volumes to have one volume that isn't in
     // the options list when they are choosing
-    cfg.setProperty(Property.INSTANCE_VOLUMES,
-        v1.toString() + "," + v2.toString() + "," + v4.toString());
+    cfg.setProperty(Property.INSTANCE_VOLUMES, v1 + "," + v2 + "," + v4);
     // no not set preferred volumes
 
     // use raw local file system so walogs sync and flush will work
diff --git a/test/src/main/java/org/apache/accumulo/test/VolumeChooserIT.java b/test/src/main/java/org/apache/accumulo/test/VolumeChooserIT.java
index fb85930f62..da9d9749d4 100644
--- a/test/src/main/java/org/apache/accumulo/test/VolumeChooserIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/VolumeChooserIT.java
@@ -104,7 +104,7 @@ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite)
     v3 = new Path("file://" + v3f.getAbsolutePath());
     v4 = new Path("file://" + v4f.getAbsolutePath());
 
-    systemPreferredVolumes = v1.toString() + "," + v2.toString();
+    systemPreferredVolumes = v1 + "," + v2;
     // exclude v4
     siteConfig.put(PreferredVolumeChooser.TABLE_PREFERRED_VOLUMES, systemPreferredVolumes);
     cfg.setSiteConfig(siteConfig);
@@ -117,8 +117,7 @@ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite)
 
     // Only add volumes 1, 2, and 4 to the list of instance volumes to have one volume that isn't in
     // the options list when they are choosing
-    cfg.setProperty(Property.INSTANCE_VOLUMES,
-        v1.toString() + "," + v2.toString() + "," + v4.toString());
+    cfg.setProperty(Property.INSTANCE_VOLUMES, v1 + "," + v2 + "," + v4);
 
     // use raw local file system so walogs sync and flush will work
     hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
@@ -181,13 +180,15 @@ public static void verifyVolumes(Connector connector, String tableName, Range ta
             inVolume = true;
           }
         }
-        assertTrue("Data not written to the correct volumes.  "
-            + entry.getKey().getColumnQualifier().toString(), inVolume);
+        assertTrue(
+            "Data not written to the correct volumes.  " + entry.getKey().getColumnQualifier(),
+            inVolume);
         fileCount++;
       }
     }
-    assertEquals("Did not see all the volumes. volumes: " + volumes.toString() + " volumes seen: "
-        + volumesSeen.toString(), volumes.size(), volumesSeen.size());
+    assertEquals(
+        "Did not see all the volumes. volumes: " + volumes + " volumes seen: " + volumesSeen,
+        volumes.size(), volumesSeen.size());
     assertEquals("Wrong number of files", 26, fileCount);
   }
 
@@ -197,7 +198,7 @@ public static void verifyNoVolumes(Connector connector, String tableName, Range
       scanner.setRange(tableRange);
       scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
       for (Entry<Key,Value> entry : scanner) {
-        fail("Data incorrectly written to " + entry.getKey().getColumnQualifier().toString());
+        fail("Data incorrectly written to " + entry.getKey().getColumnQualifier());
       }
     }
   }
@@ -245,8 +246,9 @@ public static void verifyWaLogVolumes(Connector connector, Range tableRange, Str
             volumesSeen.add(volume);
           inVolume = true;
         }
-        assertTrue("Data not written to the correct volumes.  "
-            + entry.getKey().getColumnQualifier().toString(), inVolume);
+        assertTrue(
+            "Data not written to the correct volumes.  " + entry.getKey().getColumnQualifier(),
+            inVolume);
       }
     }
   }
@@ -307,8 +309,7 @@ public void twoTablesRandomVolumeChooser() throws Exception {
     writeAndReadData(connector, tableName);
     // Verify the new files are written to the Volumes specified
 
-    verifyVolumes(connector, tableName, TabletsSection.getRange(tableID),
-        v1.toString() + "," + v2.toString() + "," + v4.toString());
+    verifyVolumes(connector, tableName, TabletsSection.getRange(tableID), v1 + "," + v2 + "," + v4);
 
     connector.namespaceOperations().create(namespace2);
 
@@ -327,7 +328,7 @@ public void twoTablesRandomVolumeChooser() throws Exception {
     writeAndReadData(connector, tableName2);
     // Verify the new files are written to the Volumes specified
     verifyVolumes(connector, tableName2, TabletsSection.getRange(tableID2),
-        v1.toString() + "," + v2.toString() + "," + v4.toString());
+        v1 + "," + v2 + "," + v4);
   }
 
   // Test that uses two tables with 10 split points each. The first uses the RandomVolumeChooser and
@@ -346,8 +347,7 @@ public void twoTablesDiffChoosers() throws Exception {
         PerTableVolumeChooser.TABLE_VOLUME_CHOOSER, RandomVolumeChooser.class.getName());
 
     // Create table1 on namespace1
-    verifyVolumesForWritesToNewTable(connector, namespace1,
-        v1.toString() + "," + v2.toString() + "," + v4.toString());
+    verifyVolumesForWritesToNewTable(connector, namespace1, v1 + "," + v2 + "," + v4);
     connector.namespaceOperations().create(namespace2);
 
     connector.namespaceOperations().setProperty(namespace2,
diff --git a/test/src/main/java/org/apache/accumulo/test/VolumeIT.java b/test/src/main/java/org/apache/accumulo/test/VolumeIT.java
index 645661c7df..0433aeeec5 100644
--- a/test/src/main/java/org/apache/accumulo/test/VolumeIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/VolumeIT.java
@@ -108,7 +108,7 @@ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite)
     URI v1Uri = v1.toUri();
     cfg.setProperty(Property.INSTANCE_DFS_DIR, v1Uri.getPath());
     cfg.setProperty(Property.INSTANCE_DFS_URI, v1Uri.getScheme() + v1Uri.getHost());
-    cfg.setProperty(Property.INSTANCE_VOLUMES, v1.toString() + "," + v2.toString());
+    cfg.setProperty(Property.INSTANCE_VOLUMES, v1 + "," + v2);
     cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "15s");
 
     // use raw local file system so walogs sync and flush will work
@@ -267,7 +267,7 @@ public void testRelativePaths() throws Exception {
       for (Entry<Key,Value> entry : metaScanner) {
         String cq = entry.getKey().getColumnQualifier().toString();
         Path path = new Path(cq);
-        Assert.assertTrue("relative path not deleted " + path.toString(), path.depth() > 2);
+        Assert.assertTrue("relative path not deleted " + path, path.depth() > 2);
       }
     }
   }
@@ -293,8 +293,7 @@ public void testAddVolumes() throws Exception {
     assertTrue(v3f.mkdir() || v3f.isDirectory());
     Path v3 = new Path("file://" + v3f.getAbsolutePath());
 
-    conf.set(Property.INSTANCE_VOLUMES.getKey(),
-        v1.toString() + "," + v2.toString() + "," + v3.toString());
+    conf.set(Property.INSTANCE_VOLUMES.getKey(), v1 + "," + v2 + "," + v3);
     BufferedOutputStream fos = new BufferedOutputStream(
         new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml")));
     conf.writeXml(fos);
@@ -339,7 +338,7 @@ public void testNonConfiguredVolumes() throws Exception {
     assertTrue(v3f.mkdir() || v3f.isDirectory());
     Path v3 = new Path("file://" + v3f.getAbsolutePath());
 
-    conf.set(Property.INSTANCE_VOLUMES.getKey(), v2.toString() + "," + v3.toString());
+    conf.set(Property.INSTANCE_VOLUMES.getKey(), v2 + "," + v3);
     BufferedOutputStream fos = new BufferedOutputStream(
         new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml")));
     conf.writeXml(fos);
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/BatchWriterFlushIT.java b/test/src/main/java/org/apache/accumulo/test/functional/BatchWriterFlushIT.java
index bfb416f216..771ab8e90f 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/BatchWriterFlushIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/BatchWriterFlushIT.java
@@ -246,7 +246,7 @@ public void run() {
             break;
           }
         }
-        Assert.assertTrue("Mutation not found: " + m.toString(), found);
+        Assert.assertTrue("Mutation not found: " + m, found);
       }
 
       for (int m = 0; m < NUM_THREADS; m++) {
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/CleanTmpIT.java b/test/src/main/java/org/apache/accumulo/test/functional/CleanTmpIT.java
index 83e5a3561e..38e859e699 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/CleanTmpIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/CleanTmpIT.java
@@ -102,7 +102,7 @@ public void test() throws Exception {
     Path tmp = new Path(tabletDir, "junk.rf_tmp");
     // Make the file
     fs.create(tmp).close();
-    log.info("Created tmp file {}", tmp.toString());
+    log.info("Created tmp file {}", tmp);
     getCluster().stop();
     getCluster().start();
 
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/CloneTestIT.java b/test/src/main/java/org/apache/accumulo/test/functional/CloneTestIT.java
index 38ca23040c..f5f6f43d96 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/CloneTestIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/CloneTestIT.java
@@ -131,8 +131,8 @@ private void checkData(String table2, Connector c) throws TableNotFoundException
       HashMap<String,String> actual = new HashMap<>();
 
       for (Entry<Key,Value> entry : scanner)
-        actual.put(entry.getKey().getRowData().toString() + ":"
-            + entry.getKey().getColumnQualifierData().toString(), entry.getValue().toString());
+        actual.put(entry.getKey().getRowData() + ":" + entry.getKey().getColumnQualifierData(),
+            entry.getValue().toString());
 
       Assert.assertEquals(expected, actual);
     }
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsIT.java b/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsIT.java
index 547b4b3a56..ad952a600e 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsIT.java
@@ -138,7 +138,7 @@ private void testSplit(String table, String start, String end, String result, in
     StringBuilder sb = new StringBuilder();
     // See that whole tablets are removed
     for (Text split : remainingSplits)
-      sb.append(split.toString());
+      sb.append(split);
     assertEquals(result, sb.toString());
     // See that the rows are really deleted
     try (Scanner scanner = c.createScanner(table, Authorizations.EMPTY)) {
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java b/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
index 6f07cf7224..5309dd8def 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
@@ -90,7 +90,7 @@ public void run() throws Exception {
         public void run() {
           try {
             // split the table
-            final SortedSet<Text> afterEnd = SPLITS.tailSet(new Text(end.toString() + "\0"));
+            final SortedSet<Text> afterEnd = SPLITS.tailSet(new Text(end + "\0"));
             conn.tableOperations().addSplits(tableName, afterEnd);
           } catch (Exception ex) {
             log.error("Exception", ex);
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ScanIdIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ScanIdIT.java
index 4dfbc19a06..7ec2e2e861 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/ScanIdIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ScanIdIT.java
@@ -151,7 +151,7 @@ public void testScanId() throws Exception {
 
     List<String> tservers = conn.instanceOperations().getTabletServers();
 
-    log.debug("tablet servers {}", tservers.toString());
+    log.debug("tablet servers {}", tservers);
 
     for (String tserver : tservers) {
 
@@ -248,7 +248,7 @@ public void run() {
 
           Text row = entry.getKey().getRow();
 
-          log.debug("worker {}, row {}", workerIndex, row.toString());
+          log.debug("worker {}, row {}", workerIndex, row);
 
           if (entry.getValue() != null) {
 
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/TableChangeStateIT.java b/test/src/main/java/org/apache/accumulo/test/functional/TableChangeStateIT.java
index 7acb4eede0..5f53fa7561 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/TableChangeStateIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/TableChangeStateIT.java
@@ -412,7 +412,7 @@ public void run() {
       List<IteratorSetting> compactIterators = new ArrayList<>();
       compactIterators.add(slow);
 
-      log.trace("Slow iterator {}", slow.toString());
+      log.trace("Slow iterator {}", slow);
 
       try {
 
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/WALSunnyDayIT.java b/test/src/main/java/org/apache/accumulo/test/functional/WALSunnyDayIT.java
index a5996c0708..4eaa2d8b1d 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/WALSunnyDayIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/WALSunnyDayIT.java
@@ -243,8 +243,8 @@ private void writeSomeData(Connector conn, String tableName, int row, int col) t
       }
     }
 
-    fail("Unable to get the correct number of WALs, expected " + expectedCount + " but got "
-        + wals.toString());
+    fail(
+        "Unable to get the correct number of WALs, expected " + expectedCount + " but got " + wals);
     return new HashMap<>();
   }
 
diff --git a/test/src/main/java/org/apache/accumulo/test/mrit/IntegrationTestMapReduce.java b/test/src/main/java/org/apache/accumulo/test/mrit/IntegrationTestMapReduce.java
index 7511f87ea7..2cbfdfd055 100644
--- a/test/src/main/java/org/apache/accumulo/test/mrit/IntegrationTestMapReduce.java
+++ b/test/src/main/java/org/apache/accumulo/test/mrit/IntegrationTestMapReduce.java
@@ -164,7 +164,7 @@ protected void reduce(Text code, Iterable<Text> tests,
       StringBuilder result = new StringBuilder("\n");
       for (Text test : tests) {
         result.append("   ");
-        result.append(test.toString());
+        result.append(test);
         result.append("\n");
       }
       context.write(code, new Text(result.toString()));
diff --git a/test/src/main/java/org/apache/accumulo/test/performance/metadata/FastBulkImportIT.java b/test/src/main/java/org/apache/accumulo/test/performance/metadata/FastBulkImportIT.java
index 58884225ff..17bd9a210b 100644
--- a/test/src/main/java/org/apache/accumulo/test/performance/metadata/FastBulkImportIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/performance/metadata/FastBulkImportIT.java
@@ -91,7 +91,7 @@ public void test() throws Exception {
     fs.mkdirs(files);
     for (int i = 0; i < 100; i++) {
       FileSKVWriter writer = FileOperations.getInstance().newWriterBuilder()
-          .forFile(files.toString() + "/bulk_" + i + "." + RFile.EXTENSION, fs, fs.getConf())
+          .forFile(files + "/bulk_" + i + "." + RFile.EXTENSION, fs, fs.getConf())
           .withTableConfiguration(DefaultConfiguration.getInstance()).build();
       writer.startDefaultLocalityGroup();
       for (int j = 0x100; j < 0xfff; j += 3) {
diff --git a/test/src/main/java/org/apache/accumulo/test/replication/ReplicationIT.java b/test/src/main/java/org/apache/accumulo/test/replication/ReplicationIT.java
index 3a3eed3793..411b8f1144 100644
--- a/test/src/main/java/org/apache/accumulo/test/replication/ReplicationIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/replication/ReplicationIT.java
@@ -477,8 +477,7 @@ public void twoEntriesForTwoTables() throws Exception {
       s.setRange(MetadataSchema.ReplicationSection.getRange());
       for (Entry<Key,Value> metadata : s) {
         records.add(metadata);
-        log.debug("Meta: {} => {}", metadata.getKey().toStringNoTruncate(),
-            metadata.getValue().toString());
+        log.debug("Meta: {} => {}", metadata.getKey().toStringNoTruncate(), metadata.getValue());
       }
 
       Assert.assertEquals("Expected to find 2 records, but actually found " + records, 2,
diff --git a/test/src/main/java/org/apache/accumulo/test/replication/UnorderedWorkAssignerIT.java b/test/src/main/java/org/apache/accumulo/test/replication/UnorderedWorkAssignerIT.java
index 5f6d2be9a5..85290a01a2 100644
--- a/test/src/main/java/org/apache/accumulo/test/replication/UnorderedWorkAssignerIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/replication/UnorderedWorkAssignerIT.java
@@ -225,7 +225,7 @@ public void workNotReAdded() throws Exception {
         + DistributedWorkQueueWorkAssignerHelper.KEY_SEPARATOR + target.getRemoteIdentifier()
         + DistributedWorkQueueWorkAssignerHelper.KEY_SEPARATOR + target.getSourceTableId();
 
-    queuedWork.add("wal1|" + serializedTarget.toString());
+    queuedWork.add("wal1|" + serializedTarget);
 
     // Create two mutations, both of which need replication work done
     BatchWriter bw = ReplicationTable.getBatchWriter(conn);
diff --git a/test/src/main/java/org/apache/accumulo/test/replication/UnusedWalDoesntCloseReplicationStatusIT.java b/test/src/main/java/org/apache/accumulo/test/replication/UnusedWalDoesntCloseReplicationStatusIT.java
index 1146c8ec03..2e51cc6b75 100644
--- a/test/src/main/java/org/apache/accumulo/test/replication/UnusedWalDoesntCloseReplicationStatusIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/replication/UnusedWalDoesntCloseReplicationStatusIT.java
@@ -185,8 +185,7 @@ public void test() throws Exception {
       bw.addMutation(m);
 
       // Add a replication entry for our fake WAL
-      m = new Mutation(
-          MetadataSchema.ReplicationSection.getRowPrefix() + new Path(walUri).toString());
+      m = new Mutation(MetadataSchema.ReplicationSection.getRowPrefix() + new Path(walUri));
       m.put(MetadataSchema.ReplicationSection.COLF, new Text(tableId.getUtf8()),
           new Value(StatusUtil.fileCreated(System.currentTimeMillis()).toByteArray()));
       bw.addMutation(m);


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services