You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by mm...@apache.org on 2018/12/21 23:44:33 UTC

[accumulo] branch master updated: Remove unused code (#856)

This is an automated email from the ASF dual-hosted git repository.

mmiller pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/master by this push:
     new b66ba5c  Remove unused code (#856)
b66ba5c is described below

commit b66ba5c3f9843f813dfde97664e4ed24445b279e
Author: Mike Miller <mm...@apache.org>
AuthorDate: Fri Dec 21 18:44:29 2018 -0500

    Remove unused code (#856)
    
    * Remove many unused methods
    * Remove unused config from CachableBlockFile
    * Remove unused blockfile constants
    * Deleted unused TableParentConfiguration and MapRLogCloser
    * Remove unused replication code
    * Remove gc dependency no longer used
---
 .../accumulo/core/client/rfile/RFileScanner.java   |   5 +-
 .../accumulo/core/cryptoImpl/AESCryptoService.java |   4 -
 .../apache/accumulo/core/file/FileOperations.java  |  10 --
 .../core/file/blockfile/cache/impl/ClassSize.java  |  60 --------
 .../file/blockfile/cache/impl/SizeConstants.java   |  20 ---
 .../file/blockfile/impl/CachableBlockFile.java     |  26 ++--
 .../apache/accumulo/core/file/rfile/PrintInfo.java |   2 +-
 .../accumulo/core/file/rfile/RFileOperations.java  |   2 +-
 .../accumulo/core/file/rfile/SplitLarge.java       |   2 +-
 .../replication/AccumuloReplicationReplayer.java   |   3 -
 .../core/replication/ReplicationSchema.java        |  17 ---
 .../accumulo/core/summary/SummaryReader.java       |   4 +-
 .../core/file/rfile/MultiLevelIndexTest.java       |   2 +-
 .../core/file/rfile/MultiThreadedRFileTest.java    |   2 +-
 .../apache/accumulo/core/file/rfile/RFileTest.java |   4 +-
 .../mapred/AccumuloOutputFormatImpl.java           |  13 --
 .../mapreduce/AccumuloOutputFormatImpl.java        |  13 --
 .../apache/accumulo/server/ServerConstants.java    |   4 -
 .../org/apache/accumulo/server/ServerContext.java  |  10 --
 .../org/apache/accumulo/server/ServerInfo.java     |   4 -
 .../org/apache/accumulo/server/TabletLevel.java    |  10 --
 .../accumulo/server/client/BulkImporter.java       |   3 -
 .../server/conf/TableParentConfiguration.java      |  48 -------
 .../accumulo/server/conf/ZooConfiguration.java     |   9 --
 .../server/conf/ZooConfigurationFactory.java       |  13 --
 .../server/master/recovery/MapRLogCloser.java      |  48 -------
 .../server/metrics/MetricsConfiguration.java       |  11 --
 .../replication/PrintReplicationRecords.java       |  97 -------------
 .../server/replication/ReplicationUtil.java        |  97 -------------
 .../security/delegation/AuthenticationKey.java     |  14 --
 .../server/util/CheckForMetadataProblems.java      |   3 -
 server/gc/pom.xml                                  |   4 -
 .../replication/CloseWriteAheadLogReferences.java  |  77 -----------
 .../accumulo/master/tableOps/delete/CleanUp.java   |  26 ----
 .../org/apache/accumulo/tserver/TabletServer.java  |  10 +-
 .../tserver/replication/AccumuloReplicaSystem.java | 154 +++++----------------
 .../BatchWriterReplicationReplayer.java            |   6 -
 .../replication/ReplicationServicerHandler.java    |   2 +-
 .../tserver/session/ConditionalSession.java        |   4 +-
 .../accumulo/tserver/tablet/CommitSession.java     |   4 -
 .../vfs/AccumuloReloadingVFSClassLoader.java       |  13 --
 41 files changed, 55 insertions(+), 805 deletions(-)

diff --git a/core/src/main/java/org/apache/accumulo/core/client/rfile/RFileScanner.java b/core/src/main/java/org/apache/accumulo/core/client/rfile/RFileScanner.java
index e7849c7..53f78df 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/rfile/RFileScanner.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/rfile/RFileScanner.java
@@ -339,9 +339,8 @@ class RFileScanner extends ScannerOptions implements Scanner {
       for (int i = 0; i < sources.length; i++) {
         // TODO may have been a bug with multiple files and caching in older version...
         FSDataInputStream inputStream = (FSDataInputStream) sources[i].getInputStream();
-        readers.add(new RFile.Reader(
-            new CachableBlockFile.Reader("source-" + i, inputStream, sources[i].getLength(),
-                opts.in.getConf(), dataCache, indexCache, tableConf, cryptoService)));
+        readers.add(new RFile.Reader(new CachableBlockFile.Reader("source-" + i, inputStream,
+            sources[i].getLength(), opts.in.getConf(), dataCache, indexCache, cryptoService)));
       }
 
       if (getSamplerConfiguration() != null) {
diff --git a/core/src/main/java/org/apache/accumulo/core/cryptoImpl/AESCryptoService.java b/core/src/main/java/org/apache/accumulo/core/cryptoImpl/AESCryptoService.java
index a2b184d..b191a9f 100644
--- a/core/src/main/java/org/apache/accumulo/core/cryptoImpl/AESCryptoService.java
+++ b/core/src/main/java/org/apache/accumulo/core/cryptoImpl/AESCryptoService.java
@@ -144,10 +144,6 @@ public class AESCryptoService implements CryptoService {
     String kekId;
     byte[] encFek;
 
-    public String getCryptoServiceName() {
-      return cryptoServiceName;
-    }
-
     public void setCryptoServiceName(String cryptoServiceName) {
       this.cryptoServiceName = cryptoServiceName;
     }
diff --git a/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java b/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
index 42077cc..1c125da 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
@@ -533,16 +533,6 @@ public abstract class FileOperations {
       return this;
     }
 
-    /** The range over which this reader should scan. */
-    public Range getRange() {
-      return range;
-    }
-
-    /** The column families which this reader should scan. */
-    public Set<ByteSequence> getColumnFamilies() {
-      return columnFamilies;
-    }
-
     /** Execute the operation, constructing a scan iterator. */
     public FileSKVIterator build() throws IOException {
       return openScanReader(toScanReaderBuilderOptions(range, columnFamilies, inclusive));
diff --git a/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/impl/ClassSize.java b/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/impl/ClassSize.java
index f7ceb21..1c783b8 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/impl/ClassSize.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/impl/ClassSize.java
@@ -20,9 +20,6 @@ package org.apache.accumulo.core.file.blockfile.cache.impl;
 
 import java.util.Properties;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
 /**
  * Class for determining the "size" of a class, an attempt to calculate the actual bytes that an
  * object of this class will occupy in memory
@@ -30,23 +27,13 @@ import org.apache.commons.logging.LogFactory;
  * The core of this class is taken from the Derby project
  */
 public class ClassSize {
-  static final Log LOG = LogFactory.getLog(ClassSize.class);
 
   /** Array overhead */
   public static final int ARRAY;
 
-  /** Overhead for ArrayList(0) */
-  public static final int ARRAYLIST;
-
   /** Overhead for ByteBuffer */
   public static final int BYTE_BUFFER;
 
-  /** Overhead for an Integer */
-  public static final int INTEGER;
-
-  /** Overhead for entry in map */
-  public static final int MAP_ENTRY;
-
   /** Object overhead is minimum 2 * reference size (8 bytes on 64-bit) */
   public static final int OBJECT;
 
@@ -56,9 +43,6 @@ public class ClassSize {
   /** String overhead */
   public static final int STRING;
 
-  /** Overhead for TreeMap */
-  public static final int TREEMAP;
-
   /** Overhead for ConcurrentHashMap */
   public static final int CONCURRENT_HASHMAP;
 
@@ -68,30 +52,9 @@ public class ClassSize {
   /** Overhead for ConcurrentHashMap.Segment */
   public static final int CONCURRENT_HASHMAP_SEGMENT;
 
-  /** Overhead for ConcurrentSkipListMap */
-  public static final int CONCURRENT_SKIPLISTMAP;
-
-  /** Overhead for ConcurrentSkipListMap Entry */
-  public static final int CONCURRENT_SKIPLISTMAP_ENTRY;
-
-  /** Overhead for ReentrantReadWriteLock */
-  public static final int REENTRANT_LOCK;
-
-  /** Overhead for AtomicLong */
-  public static final int ATOMIC_LONG;
-
   /** Overhead for AtomicInteger */
   public static final int ATOMIC_INTEGER;
 
-  /** Overhead for AtomicBoolean */
-  public static final int ATOMIC_BOOLEAN;
-
-  /** Overhead for CopyOnWriteArraySet */
-  public static final int COPYONWRITE_ARRAYSET;
-
-  /** Overhead for CopyOnWriteArrayList */
-  public static final int COPYONWRITE_ARRAYLIST;
-
   private static final String THIRTY_TWO = "32";
 
   /**
@@ -110,17 +73,9 @@ public class ClassSize {
 
     ARRAY = 3 * REFERENCE;
 
-    ARRAYLIST = align(OBJECT + align(REFERENCE) + align(ARRAY) + (2 * SizeConstants.SIZEOF_INT));
-
     BYTE_BUFFER = align(OBJECT + align(REFERENCE) + align(ARRAY) + (5 * SizeConstants.SIZEOF_INT)
         + (3 * SizeConstants.SIZEOF_BOOLEAN) + SizeConstants.SIZEOF_LONG);
 
-    INTEGER = align(OBJECT + SizeConstants.SIZEOF_INT);
-
-    MAP_ENTRY = align(OBJECT + 5 * REFERENCE + SizeConstants.SIZEOF_BOOLEAN);
-
-    TREEMAP = align(OBJECT + (2 * SizeConstants.SIZEOF_INT) + align(7 * REFERENCE));
-
     STRING = align(OBJECT + ARRAY + REFERENCE + 3 * SizeConstants.SIZEOF_INT);
 
     CONCURRENT_HASHMAP = align((2 * SizeConstants.SIZEOF_INT) + ARRAY + (6 * REFERENCE) + OBJECT);
@@ -131,22 +86,7 @@ public class ClassSize {
     CONCURRENT_HASHMAP_SEGMENT = align(
         REFERENCE + OBJECT + (3 * SizeConstants.SIZEOF_INT) + SizeConstants.SIZEOF_FLOAT + ARRAY);
 
-    CONCURRENT_SKIPLISTMAP = align(SizeConstants.SIZEOF_INT + OBJECT + (8 * REFERENCE));
-
-    CONCURRENT_SKIPLISTMAP_ENTRY = align(align(OBJECT + (3 * REFERENCE)) + /* one node per entry */
-        align((OBJECT + (3 * REFERENCE)) / 2)); /* one index per two entries */
-
-    REENTRANT_LOCK = align(OBJECT + (3 * REFERENCE));
-
-    ATOMIC_LONG = align(OBJECT + SizeConstants.SIZEOF_LONG);
-
     ATOMIC_INTEGER = align(OBJECT + SizeConstants.SIZEOF_INT);
-
-    ATOMIC_BOOLEAN = align(OBJECT + SizeConstants.SIZEOF_BOOLEAN);
-
-    COPYONWRITE_ARRAYSET = align(OBJECT + REFERENCE);
-
-    COPYONWRITE_ARRAYLIST = align(OBJECT + (2 * REFERENCE) + ARRAY);
   }
 
   /**
diff --git a/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/impl/SizeConstants.java b/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/impl/SizeConstants.java
index aef769d..0864f32 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/impl/SizeConstants.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/blockfile/cache/impl/SizeConstants.java
@@ -21,21 +21,6 @@ public class SizeConstants {
   public static final int SIZEOF_BOOLEAN = Byte.SIZE / Byte.SIZE;
 
   /**
-   * Size of byte in bytes
-   */
-  public static final int SIZEOF_BYTE = SIZEOF_BOOLEAN;
-
-  /**
-   * Size of char in bytes
-   */
-  public static final int SIZEOF_CHAR = Character.SIZE / Byte.SIZE;
-
-  /**
-   * Size of double in bytes
-   */
-  public static final int SIZEOF_DOUBLE = Double.SIZE / Byte.SIZE;
-
-  /**
    * Size of float in bytes
    */
   public static final int SIZEOF_FLOAT = Float.SIZE / Byte.SIZE;
@@ -50,9 +35,4 @@ public class SizeConstants {
    */
   public static final int SIZEOF_LONG = Long.SIZE / Byte.SIZE;
 
-  /**
-   * Size of short in bytes
-   */
-  public static final int SIZEOF_SHORT = Short.SIZE / Byte.SIZE;
-
 }
diff --git a/core/src/main/java/org/apache/accumulo/core/file/blockfile/impl/CachableBlockFile.java b/core/src/main/java/org/apache/accumulo/core/file/blockfile/impl/CachableBlockFile.java
index f0f5aca..97deb4a 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/blockfile/impl/CachableBlockFile.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/blockfile/impl/CachableBlockFile.java
@@ -28,7 +28,6 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.function.Supplier;
 
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.file.rfile.bcfile.BCFile;
 import org.apache.accumulo.core.file.rfile.bcfile.BCFile.Reader.BlockReader;
 import org.apache.accumulo.core.file.rfile.bcfile.MetaBlockDoesNotExist;
@@ -80,7 +79,6 @@ public class CachableBlockFile {
     private volatile InputStream fin = null;
     private boolean closed = false;
     private final Configuration conf;
-    private final AccumuloConfiguration accumuloConfiguration;
     private final CryptoService cryptoService;
 
     private final IoeSupplier<InputStream> inputSupplier;
@@ -304,7 +302,7 @@ public class CachableBlockFile {
     private Reader(String cacheId, IoeSupplier<InputStream> inputSupplier,
         IoeSupplier<Long> lenghtSupplier, Cache<String,Long> fileLenCache, BlockCache data,
         BlockCache index, RateLimiter readLimiter, Configuration conf,
-        AccumuloConfiguration accumuloConfiguration, CryptoService cryptoService) {
+        CryptoService cryptoService) {
       Preconditions.checkArgument(cacheId != null || (data == null && index == null));
       this.cacheId = cacheId;
       this.inputSupplier = inputSupplier;
@@ -314,36 +312,30 @@ public class CachableBlockFile {
       this._iCache = index;
       this.readLimiter = readLimiter;
       this.conf = conf;
-      this.accumuloConfiguration = accumuloConfiguration;
       this.cryptoService = Objects.requireNonNull(cryptoService);
     }
 
     public Reader(FileSystem fs, Path dataFile, Configuration conf, BlockCache data,
-        BlockCache index, AccumuloConfiguration accumuloConfiguration,
-        CryptoService cryptoService) {
-      this(fs, dataFile, conf, null, data, index, null, accumuloConfiguration, cryptoService);
+        BlockCache index, CryptoService cryptoService) {
+      this(fs, dataFile, conf, null, data, index, null, cryptoService);
     }
 
     public Reader(FileSystem fs, Path dataFile, Configuration conf, Cache<String,Long> fileLenCache,
-        BlockCache data, BlockCache index, RateLimiter readLimiter,
-        AccumuloConfiguration accumuloConfiguration, CryptoService cryptoService) {
+        BlockCache data, BlockCache index, RateLimiter readLimiter, CryptoService cryptoService) {
       this(pathToCacheId(dataFile), () -> fs.open(dataFile),
           () -> fs.getFileStatus(dataFile).getLen(), fileLenCache, data, index, readLimiter, conf,
-          accumuloConfiguration, cryptoService);
+          cryptoService);
     }
 
     public <InputStreamType extends InputStream & Seekable> Reader(String cacheId,
         InputStreamType fsin, long len, Configuration conf, BlockCache data, BlockCache index,
-        AccumuloConfiguration accumuloConfiguration, CryptoService cryptoService) {
-      this(cacheId, () -> fsin, () -> len, null, data, index, null, conf, accumuloConfiguration,
-          cryptoService);
+        CryptoService cryptoService) {
+      this(cacheId, () -> fsin, () -> len, null, data, index, null, conf, cryptoService);
     }
 
     public <InputStreamType extends InputStream & Seekable> Reader(InputStreamType fsin, long len,
-        Configuration conf, AccumuloConfiguration accumuloConfiguration,
-        CryptoService cryptoService) {
-      this(null, () -> fsin, () -> len, null, null, null, null, conf, accumuloConfiguration,
-          cryptoService);
+        Configuration conf, CryptoService cryptoService) {
+      this(null, () -> fsin, () -> len, null, null, null, null, conf, cryptoService);
     }
 
     /**
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java
index 36c5f6b..7278cb8 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java
@@ -179,7 +179,7 @@ public class PrintInfo implements KeywordExecutable {
       printCryptoParams(path, fs);
 
       CachableBlockFile.Reader _rdr = new CachableBlockFile.Reader(fs, path, conf, null, null,
-          siteConfig, CryptoServiceFactory.newInstance(siteConfig, ClassloaderType.ACCUMULO));
+          CryptoServiceFactory.newInstance(siteConfig, ClassloaderType.ACCUMULO));
       Reader iter = new RFile.Reader(_rdr);
       MetricsGatherer<Map<String,ArrayList<VisibilityMetric>>> vmg = new VisMetricsGatherer();
 
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/RFileOperations.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/RFileOperations.java
index 96aa67d..0be4fa6 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/RFileOperations.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/RFileOperations.java
@@ -48,7 +48,7 @@ public class RFileOperations extends FileOperations {
     CachableBlockFile.Reader _cbr = new CachableBlockFile.Reader(options.getFileSystem(),
         new Path(options.getFilename()), options.getConfiguration(), options.getFileLenCache(),
         options.getDataCache(), options.getIndexCache(), options.getRateLimiter(),
-        options.getTableConfiguration(), options.getCryptoService());
+        options.getCryptoService());
     return new RFile.Reader(_cbr);
   }
 
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/SplitLarge.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/SplitLarge.java
index deba52c..69af66a 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/SplitLarge.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/SplitLarge.java
@@ -66,7 +66,7 @@ public class SplitLarge {
       CryptoService cryptoService = ConfigurationTypeHelper.getClassInstance(null, opts.cryptoClass,
           CryptoService.class, CryptoServiceFactory.newDefaultInstance());
       Path path = new Path(file);
-      CachableBlockFile.Reader rdr = new CachableBlockFile.Reader(fs, path, conf, null, null, aconf,
+      CachableBlockFile.Reader rdr = new CachableBlockFile.Reader(fs, path, conf, null, null,
           cryptoService);
       try (Reader iter = new RFile.Reader(rdr)) {
 
diff --git a/core/src/main/java/org/apache/accumulo/core/replication/AccumuloReplicationReplayer.java b/core/src/main/java/org/apache/accumulo/core/replication/AccumuloReplicationReplayer.java
index 39dea3b..c30d9fe 100644
--- a/core/src/main/java/org/apache/accumulo/core/replication/AccumuloReplicationReplayer.java
+++ b/core/src/main/java/org/apache/accumulo/core/replication/AccumuloReplicationReplayer.java
@@ -26,7 +26,4 @@ public interface AccumuloReplicationReplayer {
 
   long replicateLog(ClientContext context, String tableName, WalEdits data)
       throws RemoteReplicationException, AccumuloException, AccumuloSecurityException;
-
-  long replicateKeyValues() throws RemoteReplicationException;
-
 }
diff --git a/core/src/main/java/org/apache/accumulo/core/replication/ReplicationSchema.java b/core/src/main/java/org/apache/accumulo/core/replication/ReplicationSchema.java
index 21fa902..71d941c 100644
--- a/core/src/main/java/org/apache/accumulo/core/replication/ReplicationSchema.java
+++ b/core/src/main/java/org/apache/accumulo/core/replication/ReplicationSchema.java
@@ -61,10 +61,6 @@ public class ReplicationSchema {
       _getFile(k, buff);
     }
 
-    public static ReplicationTarget getTarget(Key k) {
-      return getTarget(k, new Text());
-    }
-
     public static ReplicationTarget getTarget(Key k, Text buff) {
       checkArgument(BYTE_SEQ_NAME.equals(k.getColumnFamilyData()),
           "Given replication work key with incorrect colfam");
@@ -160,19 +156,6 @@ public class ReplicationSchema {
     private static final ULongLexicoder longEncoder = new ULongLexicoder();
 
     /**
-     * Extract the table ID from the given key (inefficiently if called repeatedly)
-     *
-     * @param k
-     *          OrderSection Key
-     * @return source table id
-     */
-    public static String getTableId(Key k) {
-      Text buff = new Text();
-      getTableId(k, buff);
-      return buff.toString();
-    }
-
-    /**
      * Extract the table ID from the given key
      *
      * @param k
diff --git a/core/src/main/java/org/apache/accumulo/core/summary/SummaryReader.java b/core/src/main/java/org/apache/accumulo/core/summary/SummaryReader.java
index ed71059..5730e74 100644
--- a/core/src/main/java/org/apache/accumulo/core/summary/SummaryReader.java
+++ b/core/src/main/java/org/apache/accumulo/core/summary/SummaryReader.java
@@ -179,7 +179,7 @@ public class SummaryReader {
       SummarizerFactory factory, CryptoService cryptoService) throws IOException {
     // @formatter:off
     org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile.Reader bcReader =
-      new CachableBlockFile.Reader((InputStream & Seekable) inputStream, length, conf, aConf,
+      new CachableBlockFile.Reader((InputStream & Seekable) inputStream, length, conf,
               cryptoService);
     // @formatter:on
     return load(bcReader, summarySelector, factory);
@@ -196,7 +196,7 @@ public class SummaryReader {
       // only summary data is wanted.
       CompositeCache compositeCache = new CompositeCache(summaryCache, indexCache);
       bcReader = new CachableBlockFile.Reader(fs, file, conf, fileLenCache, null, compositeCache,
-          null, aConf, cryptoService);
+          null, cryptoService);
       return load(bcReader, summarySelector, factory);
     } catch (FileNotFoundException fne) {
       SummaryReader sr = new SummaryReader();
diff --git a/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiLevelIndexTest.java b/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiLevelIndexTest.java
index c776de8..134c185 100644
--- a/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiLevelIndexTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiLevelIndexTest.java
@@ -83,7 +83,7 @@ public class MultiLevelIndexTest {
     SeekableByteArrayInputStream bais = new SeekableByteArrayInputStream(data);
     FSDataInputStream in = new FSDataInputStream(bais);
     CachableBlockFile.Reader _cbr = new CachableBlockFile.Reader(in, data.length,
-        CachedConfiguration.getInstance(), aconf,
+        CachedConfiguration.getInstance(),
         CryptoServiceFactory.newInstance(aconf, ClassloaderType.JAVA));
 
     Reader reader = new Reader(_cbr, RFile.RINDEX_VER_8);
diff --git a/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiThreadedRFileTest.java b/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiThreadedRFileTest.java
index 7cca7b1..9eca5f3 100644
--- a/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiThreadedRFileTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/file/rfile/MultiThreadedRFileTest.java
@@ -187,7 +187,7 @@ public class MultiThreadedRFileTest {
 
       // the caches used to obfuscate the multithreaded issues
       CachableBlockFile.Reader _cbr = new CachableBlockFile.Reader(fs, path, conf, null, null,
-          defaultConf, CryptoServiceFactory.newInstance(defaultConf, ClassloaderType.JAVA));
+          CryptoServiceFactory.newInstance(defaultConf, ClassloaderType.JAVA));
       reader = new RFile.Reader(_cbr);
       iter = new ColumnFamilySkippingIterator(reader);
 
diff --git a/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java b/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
index e955239..82affa5 100644
--- a/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
@@ -305,7 +305,7 @@ public class RFileTest {
       LruBlockCache dataCache = (LruBlockCache) manager.getBlockCache(CacheType.DATA);
 
       CachableBlockFile.Reader _cbr = new CachableBlockFile.Reader("source-1", in, fileLength, conf,
-          dataCache, indexCache, accumuloConfiguration,
+          dataCache, indexCache,
           CryptoServiceFactory.newInstance(accumuloConfiguration, ClassloaderType.JAVA));
       reader = new RFile.Reader(_cbr);
       if (cfsi)
@@ -1736,7 +1736,7 @@ public class RFileTest {
     SeekableByteArrayInputStream bais = new SeekableByteArrayInputStream(data);
     FSDataInputStream in2 = new FSDataInputStream(bais);
     CachableBlockFile.Reader _cbr = new CachableBlockFile.Reader(in2, data.length,
-        CachedConfiguration.getInstance(), aconf,
+        CachedConfiguration.getInstance(),
         CryptoServiceFactory.newInstance(aconf, ClassloaderType.JAVA));
     Reader reader = new RFile.Reader(_cbr);
     checkIndex(reader);
diff --git a/hadoop-mapreduce/src/main/java/org/apache/accumulo/hadoopImpl/mapred/AccumuloOutputFormatImpl.java b/hadoop-mapreduce/src/main/java/org/apache/accumulo/hadoopImpl/mapred/AccumuloOutputFormatImpl.java
index 7b7cbed..0823a03 100644
--- a/hadoop-mapreduce/src/main/java/org/apache/accumulo/hadoopImpl/mapred/AccumuloOutputFormatImpl.java
+++ b/hadoop-mapreduce/src/main/java/org/apache/accumulo/hadoopImpl/mapred/AccumuloOutputFormatImpl.java
@@ -91,19 +91,6 @@ public class AccumuloOutputFormatImpl {
   }
 
   /**
-   * Set Accumulo client properties file used to connect to Accumulo
-   *
-   * @param job
-   *          Hadoop job to be configured
-   * @param clientPropsFile
-   *          URL (hdfs:// or http://) to Accumulo client properties file
-   * @since 2.0.0
-   */
-  protected static void setClientPropertiesFile(JobConf job, String clientPropsFile) {
-    OutputConfigurator.setClientPropertiesFile(CLASS, job, clientPropsFile);
-  }
-
-  /**
    * Sets the default table name to use if one emits a null in place of a table name for a given
    * mutation. Table names can only be alpha-numeric and underscores.
    *
diff --git a/hadoop-mapreduce/src/main/java/org/apache/accumulo/hadoopImpl/mapreduce/AccumuloOutputFormatImpl.java b/hadoop-mapreduce/src/main/java/org/apache/accumulo/hadoopImpl/mapreduce/AccumuloOutputFormatImpl.java
index 80a292b..c1aa333 100644
--- a/hadoop-mapreduce/src/main/java/org/apache/accumulo/hadoopImpl/mapreduce/AccumuloOutputFormatImpl.java
+++ b/hadoop-mapreduce/src/main/java/org/apache/accumulo/hadoopImpl/mapreduce/AccumuloOutputFormatImpl.java
@@ -93,19 +93,6 @@ public class AccumuloOutputFormatImpl {
   }
 
   /**
-   * Set Accumulo client properties file used to connect to Accumulo
-   *
-   * @param job
-   *          Hadoop job to be configured
-   * @param clientPropsFile
-   *          URL to Accumulo client properties file
-   * @since 2.0.0
-   */
-  protected static void setClientPropertiesFile(Job job, String clientPropsFile) {
-    OutputConfigurator.setClientPropertiesFile(CLASS, job.getConfiguration(), clientPropsFile);
-  }
-
-  /**
    * Sets the default table name to use if one emits a null in place of a table name for a given
    * mutation. Table names can only be alpha-numeric and underscores.
    *
diff --git a/server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java b/server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java
index 9a53939..11b6b4b 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/ServerConstants.java
@@ -156,10 +156,6 @@ public class ServerConstants {
     return VolumeConfiguration.prefix(getBaseUris(conf), RECOVERY_DIR);
   }
 
-  public static String[] getWalDirs(AccumuloConfiguration conf) {
-    return VolumeConfiguration.prefix(getBaseUris(conf), WAL_DIR);
-  }
-
   public static Path getInstanceIdLocation(Volume v) {
     // all base dirs should have the same instance id, so can choose any one
     return v.prefixChild(INSTANCE_ID_DIR);
diff --git a/server/base/src/main/java/org/apache/accumulo/server/ServerContext.java b/server/base/src/main/java/org/apache/accumulo/server/ServerContext.java
index 22541c5..23b0c6c 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/ServerContext.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/ServerContext.java
@@ -123,16 +123,6 @@ public class ServerContext extends ClientContext {
     DistributedTrace.disable();
   }
 
-  public String getApplicationName() {
-    Objects.requireNonNull(applicationName);
-    return applicationName;
-  }
-
-  public String getApplicationClassName() {
-    Objects.requireNonNull(applicationClassName);
-    return applicationName;
-  }
-
   public String getHostname() {
     Objects.requireNonNull(hostname);
     return hostname;
diff --git a/server/base/src/main/java/org/apache/accumulo/server/ServerInfo.java b/server/base/src/main/java/org/apache/accumulo/server/ServerInfo.java
index 717c9d6..7ddee3c 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/ServerInfo.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/ServerInfo.java
@@ -111,10 +111,6 @@ public class ServerInfo implements ClientInfo {
     return instanceID;
   }
 
-  public String getZooKeeperRoot() {
-    return zooKeeperRoot;
-  }
-
   @Override
   public String getZooKeepers() {
     return zooKeepers;
diff --git a/server/base/src/main/java/org/apache/accumulo/server/TabletLevel.java b/server/base/src/main/java/org/apache/accumulo/server/TabletLevel.java
index 2659ce0..7120804 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/TabletLevel.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/TabletLevel.java
@@ -16,17 +16,7 @@
  */
 package org.apache.accumulo.server;
 
-import org.apache.accumulo.core.dataImpl.KeyExtent;
-
 public enum TabletLevel {
   ROOT, META, NORMAL;
 
-  public static TabletLevel getLevel(KeyExtent extent) {
-    if (!extent.isMeta())
-      return NORMAL;
-    if (extent.isRootTablet())
-      return ROOT;
-    return META;
-  }
-
 }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/client/BulkImporter.java b/server/base/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
index 3ed9edb..9246645 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
@@ -56,7 +56,6 @@ import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.rpc.ThriftUtil;
 import org.apache.accumulo.core.tabletserver.thrift.TabletClientService;
 import org.apache.accumulo.core.trace.Tracer;
-import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.HostAndPort;
 import org.apache.accumulo.core.util.NamingThreadFactory;
 import org.apache.accumulo.core.util.StopWatch;
@@ -65,7 +64,6 @@ import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
 import org.apache.accumulo.server.util.FileUtil;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
@@ -115,7 +113,6 @@ public class BulkImporter {
     timer = new StopWatch<>(Timers.class);
     timer.start(Timers.TOTAL);
 
-    Configuration conf = CachedConfiguration.getInstance();
     VolumeManagerImpl.get(context.getConfiguration());
     final VolumeManager fs = VolumeManagerImpl.get(context.getConfiguration());
 
diff --git a/server/base/src/main/java/org/apache/accumulo/server/conf/TableParentConfiguration.java b/server/base/src/main/java/org/apache/accumulo/server/conf/TableParentConfiguration.java
deleted file mode 100644
index 0746d03..0000000
--- a/server/base/src/main/java/org/apache/accumulo/server/conf/TableParentConfiguration.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.conf;
-
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.clientImpl.Namespace;
-import org.apache.accumulo.core.clientImpl.Table;
-import org.apache.accumulo.core.clientImpl.Tables;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.server.ServerContext;
-
-/**
- * Used by TableConfiguration to dynamically get the NamespaceConfiguration if the namespace changes
- */
-public class TableParentConfiguration extends NamespaceConfiguration {
-
-  private Table.ID tableId;
-
-  public TableParentConfiguration(Table.ID tableId, ServerContext context,
-      AccumuloConfiguration parent) {
-    super(null, context, parent);
-    this.tableId = tableId;
-    this.namespaceId = getNamespaceId();
-  }
-
-  @Override
-  protected Namespace.ID getNamespaceId() {
-    try {
-      return Tables.getNamespaceId(context, tableId);
-    } catch (TableNotFoundException e) {
-      throw new RuntimeException(e);
-    }
-  }
-}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/conf/ZooConfiguration.java b/server/base/src/main/java/org/apache/accumulo/server/conf/ZooConfiguration.java
index a45b34d..d157e85 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/conf/ZooConfiguration.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/conf/ZooConfiguration.java
@@ -58,15 +58,6 @@ public class ZooConfiguration extends AccumuloConfiguration {
       propCache.clear();
   }
 
-  /**
-   * Gets the parent configuration of this configuration.
-   *
-   * @return parent configuration
-   */
-  public AccumuloConfiguration getParentConfiguration() {
-    return parent;
-  }
-
   private String _get(Property property) {
     String key = property.getKey();
     String value = null;
diff --git a/server/base/src/main/java/org/apache/accumulo/server/conf/ZooConfigurationFactory.java b/server/base/src/main/java/org/apache/accumulo/server/conf/ZooConfigurationFactory.java
index 715b7c7..5953abc 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/conf/ZooConfigurationFactory.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/conf/ZooConfigurationFactory.java
@@ -68,17 +68,4 @@ class ZooConfigurationFactory {
     return config;
   }
 
-  /**
-   * Gets a configuration object for the given instance with the given parent. Repeated calls will
-   * return the same object.
-   *
-   * @param context
-   *          ServerContext; if null, instance ID is determined from HDFS
-   * @param parent
-   *          parent configuration (required)
-   * @return configuration
-   */
-  public ZooConfiguration getInstance(ServerContext context, AccumuloConfiguration parent) {
-    return getInstance(context, new ZooCacheFactory(), parent);
-  }
 }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/recovery/MapRLogCloser.java b/server/base/src/main/java/org/apache/accumulo/server/master/recovery/MapRLogCloser.java
deleted file mode 100644
index 043bf15..0000000
--- a/server/base/src/main/java/org/apache/accumulo/server/master/recovery/MapRLogCloser.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.master.recovery;
-
-import java.io.IOException;
-
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class MapRLogCloser implements LogCloser {
-
-  private static final Logger log = LoggerFactory.getLogger(MapRLogCloser.class);
-
-  @Override
-  public long close(AccumuloConfiguration conf, VolumeManager fs, Path path) {
-    log.info("Recovering file {} by changing permission to readonly", path);
-    FileSystem ns = fs.getVolumeByPath(path).getFileSystem();
-    FsPermission roPerm = new FsPermission((short) 0444);
-    try {
-      ns.setPermission(path, roPerm);
-      return 0;
-    } catch (IOException ex) {
-      log.error("error recovering lease ", ex);
-      // lets do this again
-      return 1000;
-    }
-  }
-
-}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/metrics/MetricsConfiguration.java b/server/base/src/main/java/org/apache/accumulo/server/metrics/MetricsConfiguration.java
index 3f2ee7c..094b275 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/metrics/MetricsConfiguration.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/metrics/MetricsConfiguration.java
@@ -23,7 +23,6 @@ import org.apache.commons.configuration.AbstractFileConfiguration;
 import org.apache.commons.configuration.Configuration;
 import org.apache.commons.configuration.ConfigurationException;
 import org.apache.commons.configuration.EnvironmentConfiguration;
-import org.apache.commons.configuration.SystemConfiguration;
 import org.apache.commons.configuration.XMLConfiguration;
 import org.apache.commons.configuration.event.ConfigurationEvent;
 import org.apache.commons.configuration.event.ConfigurationListener;
@@ -41,8 +40,6 @@ public class MetricsConfiguration {
 
   private int notFoundCount = 0;
 
-  private static SystemConfiguration sysConfig = null;
-
   private static EnvironmentConfiguration envConfig = null;
 
   private XMLConfiguration xConfig = null;
@@ -115,14 +112,6 @@ public class MetricsConfiguration {
     }
   }
 
-  public Configuration getSystemConfiguration() {
-    synchronized (MetricsConfiguration.class) {
-      if (sysConfig == null)
-        sysConfig = new SystemConfiguration();
-      return sysConfig;
-    }
-  }
-
   public Configuration getMetricsConfiguration() {
     if (notFound) {
       if (notFoundCount <= CONFIG_FILE_CHECK_COUNTER) {
diff --git a/server/base/src/main/java/org/apache/accumulo/server/replication/PrintReplicationRecords.java b/server/base/src/main/java/org/apache/accumulo/server/replication/PrintReplicationRecords.java
deleted file mode 100644
index 4847eae..0000000
--- a/server/base/src/main/java/org/apache/accumulo/server/replication/PrintReplicationRecords.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.replication;
-
-import java.io.PrintStream;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.AccumuloClient;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.ReplicationSection;
-import org.apache.accumulo.core.protobuf.ProtobufUtil;
-import org.apache.accumulo.core.replication.ReplicationTable;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.server.replication.proto.Replication.Status;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.protobuf.InvalidProtocolBufferException;
-
-public class PrintReplicationRecords implements Runnable {
-  private static final Logger log = LoggerFactory.getLogger(PrintReplicationRecords.class);
-
-  private AccumuloClient client;
-  private PrintStream out;
-  private SimpleDateFormat sdf;
-
-  public PrintReplicationRecords(AccumuloClient client, PrintStream out) {
-    this.client = client;
-    this.out = out;
-    this.sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS");
-  }
-
-  @Override
-  public void run() {
-    Scanner s;
-
-    out.println(sdf.format(new Date()) + " Replication entries from metadata table");
-    out.println("------------------------------------------------------------------");
-    try {
-      s = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-    } catch (TableNotFoundException e) {
-      log.error("Metadata table does not exist");
-      return;
-    }
-
-    s.setRange(ReplicationSection.getRange());
-    s.fetchColumnFamily(ReplicationSection.COLF);
-    for (Entry<Key,Value> entry : s) {
-      try {
-        out.println(entry.getKey().toStringNoTruncate() + "="
-            + ProtobufUtil.toString(Status.parseFrom(entry.getValue().get())));
-      } catch (InvalidProtocolBufferException e) {
-        out.println(entry.getKey().toStringNoTruncate() + "= Could not deserialize Status message");
-      }
-    }
-
-    out.println();
-    out.println(sdf.format(new Date()) + " Replication entries from replication table");
-    out.println("--------------------------------------------------------------------");
-
-    try {
-      s = client.createScanner(ReplicationTable.NAME, Authorizations.EMPTY);
-    } catch (TableNotFoundException e) {
-      log.error("Replication table does not exist");
-      return;
-    }
-
-    for (Entry<Key,Value> entry : s) {
-      try {
-        out.println(entry.getKey().toStringNoTruncate() + "="
-            + ProtobufUtil.toString(Status.parseFrom(entry.getValue().get())));
-      } catch (InvalidProtocolBufferException e) {
-        out.println(entry.getKey().toStringNoTruncate() + "= Could not deserialize Status message");
-      }
-    }
-  }
-}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicationUtil.java b/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicationUtil.java
index cdc081f..da7993e 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicationUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicationUtil.java
@@ -16,19 +16,14 @@
  */
 package org.apache.accumulo.server.replication;
 
-import static java.nio.charset.StandardCharsets.UTF_8;
-
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
 import java.util.Map.Entry;
-import java.util.NoSuchElementException;
 import java.util.Set;
 
-import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.clientImpl.Table;
 import org.apache.accumulo.core.clientImpl.Tables;
@@ -42,21 +37,16 @@ import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.replication.ReplicationSchema.StatusSection;
 import org.apache.accumulo.core.replication.ReplicationSchema.WorkSection;
 import org.apache.accumulo.core.replication.ReplicationTable;
-import org.apache.accumulo.core.replication.ReplicationTableOfflineException;
 import org.apache.accumulo.core.replication.ReplicationTarget;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.fate.zookeeper.ZooCache;
 import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.conf.TableConfiguration;
-import org.apache.accumulo.server.replication.proto.Replication.Status;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.collect.Iterables;
-import com.google.protobuf.InvalidProtocolBufferException;
-
 public class ReplicationUtil {
   private static final Logger log = LoggerFactory.getLogger(ReplicationUtil.class);
   public static final String STATUS_FORMATTER_CLASS_NAME = StatusFormatter.class.getName();
@@ -220,91 +210,4 @@ public class ReplicationUtil {
     return paths;
   }
 
-  /**
-   * Fetches the absolute path of the file to be replicated.
-   *
-   * @param workQueuePath
-   *          Root path for the Replication WorkQueue
-   * @param queueKey
-   *          The Replication work queue key
-   * @return The absolute path for the file, or null if the key is no longer in ZooKeeper
-   */
-  public String getAbsolutePath(String workQueuePath, String queueKey) {
-    byte[] data = zooCache.get(workQueuePath + "/" + queueKey);
-    if (data != null) {
-      return new String(data, UTF_8);
-    }
-
-    return null;
-  }
-
-  /**
-   * Compute a progress string for the replication of the given WAL
-   *
-   * @param client
-   *          Accumulo Client
-   * @param path
-   *          Absolute path to a WAL, or null
-   * @param target
-   *          ReplicationTarget the WAL is being replicated to
-   * @return A status message for a file being replicated
-   */
-  public String getProgress(AccumuloClient client, String path, ReplicationTarget target) {
-    // We could try to grep over the table, but without knowing the full file path, we
-    // can't find the status quickly
-    String status = "Unknown";
-    if (path != null) {
-      Scanner s;
-      try {
-        s = ReplicationTable.getScanner(client);
-      } catch (ReplicationTableOfflineException e) {
-        log.debug("Replication table no longer online", e);
-        return status;
-      }
-
-      s.setRange(Range.exact(path));
-      s.fetchColumn(WorkSection.NAME, target.toText());
-
-      // Fetch the work entry for this item
-      Entry<Key,Value> kv = null;
-      try {
-        kv = Iterables.getOnlyElement(s);
-      } catch (NoSuchElementException e) {
-        log.trace("Could not find status of {} replicating to {}", path, target);
-        status = "Unknown";
-      } finally {
-        s.close();
-      }
-
-      // If we found the work entry for it, try to compute some progress
-      if (kv != null) {
-        try {
-          Status stat = Status.parseFrom(kv.getValue().get());
-          if (StatusUtil.isFullyReplicated(stat)) {
-            status = "Finished";
-          } else {
-            if (stat.getInfiniteEnd()) {
-              status = stat.getBegin() + "/&infin; records";
-            } else {
-              status = stat.getBegin() + "/" + stat.getEnd() + " records";
-            }
-          }
-        } catch (InvalidProtocolBufferException e) {
-          log.warn("Could not deserialize protobuf for {}", kv.getKey(), e);
-          status = "Unknown";
-        }
-      }
-    }
-
-    return status;
-  }
-
-  public Map<String,String> invert(Map<String,String> map) {
-    Map<String,String> newMap = new HashMap<>(map.size());
-    for (Entry<String,String> entry : map.entrySet()) {
-      newMap.put(entry.getValue(), entry.getKey());
-    }
-    return newMap;
-  }
-
 }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/security/delegation/AuthenticationKey.java b/server/base/src/main/java/org/apache/accumulo/server/security/delegation/AuthenticationKey.java
index 9bc1bae..ebeeac3 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/security/delegation/AuthenticationKey.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/security/delegation/AuthenticationKey.java
@@ -66,29 +66,15 @@ public class AuthenticationKey implements Writable {
     return authKey.getCreationDate();
   }
 
-  public void setCreationDate(long creationDate) {
-    requireNonNull(authKey);
-    authKey.setCreationDate(creationDate);
-  }
-
   public long getExpirationDate() {
     requireNonNull(authKey);
     return authKey.getExpirationDate();
   }
 
-  public void setExpirationDate(long expirationDate) {
-    requireNonNull(authKey);
-    authKey.setExpirationDate(expirationDate);
-  }
-
   SecretKey getKey() {
     return secret;
   }
 
-  void setKey(SecretKey secret) {
-    this.secret = secret;
-  }
-
   @Override
   public int hashCode() {
     if (authKey == null) {
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/CheckForMetadataProblems.java b/server/base/src/main/java/org/apache/accumulo/server/util/CheckForMetadataProblems.java
index 44cb5f0..248bb08 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/CheckForMetadataProblems.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/CheckForMetadataProblems.java
@@ -34,7 +34,6 @@ import org.apache.accumulo.core.metadata.schema.MetadataSchema;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.server.cli.ServerUtilOpts;
-import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.hadoop.io.Text;
 
 public class CheckForMetadataProblems {
@@ -168,8 +167,6 @@ public class CheckForMetadataProblems {
     ServerUtilOpts opts = new ServerUtilOpts();
     opts.parseArgs(CheckForMetadataProblems.class.getName(), args);
 
-    VolumeManager fs = opts.getServerContext().getVolumeManager();
-
     checkMetadataAndRootTableEntries(RootTable.NAME, opts);
     checkMetadataAndRootTableEntries(MetadataTable.NAME, opts);
     opts.stopTracing();
diff --git a/server/gc/pom.xml b/server/gc/pom.xml
index 502be73..1229b3d 100644
--- a/server/gc/pom.xml
+++ b/server/gc/pom.xml
@@ -65,10 +65,6 @@
       <artifactId>htrace-core</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.apache.thrift</groupId>
-      <artifactId>libthrift</artifactId>
-    </dependency>
-    <dependency>
       <groupId>org.apache.zookeeper</groupId>
       <artifactId>zookeeper</artifactId>
     </dependency>
diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferences.java b/server/gc/src/main/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferences.java
index 97d2af7..f699203 100644
--- a/server/gc/src/main/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferences.java
+++ b/server/gc/src/main/java/org/apache/accumulo/gc/replication/CloseWriteAheadLogReferences.java
@@ -18,7 +18,6 @@ package org.apache.accumulo.gc.replication;
 
 import java.util.Collections;
 import java.util.HashSet;
-import java.util.List;
 import java.util.Map.Entry;
 import java.util.Set;
 
@@ -33,18 +32,13 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.file.rfile.RFile;
-import org.apache.accumulo.core.master.thrift.MasterClientService;
 import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema.ReplicationSection;
 import org.apache.accumulo.core.replication.ReplicationTable;
-import org.apache.accumulo.core.rpc.ThriftUtil;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.tabletserver.thrift.TabletClientService;
 import org.apache.accumulo.core.trace.Span;
 import org.apache.accumulo.core.trace.Trace;
-import org.apache.accumulo.core.trace.thrift.TInfo;
-import org.apache.accumulo.core.util.HostAndPort;
 import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.log.WalStateManager;
 import org.apache.accumulo.server.log.WalStateManager.WalMarkerException;
@@ -53,7 +47,6 @@ import org.apache.accumulo.server.replication.StatusUtil;
 import org.apache.accumulo.server.replication.proto.Replication.Status;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
-import org.apache.thrift.TException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -221,74 +214,4 @@ public class CloseWriteAheadLogReferences implements Runnable {
     bw.addMutation(m);
   }
 
-  private HostAndPort getMasterAddress() {
-    try {
-      List<String> locations = context.getMasterLocations();
-      if (locations.size() == 0)
-        return null;
-      return HostAndPort.fromString(locations.get(0));
-    } catch (Exception e) {
-      log.warn("Failed to obtain master host", e);
-    }
-
-    return null;
-  }
-
-  private MasterClientService.Client getMasterConnection() {
-    final HostAndPort address = getMasterAddress();
-    try {
-      if (address == null) {
-        log.warn("Could not fetch Master address");
-        return null;
-      }
-      return ThriftUtil.getClient(new MasterClientService.Client.Factory(), address, context);
-    } catch (Exception e) {
-      log.warn("Issue with masterConnection (" + address + ") " + e, e);
-    }
-    return null;
-  }
-
-  /**
-   * Get the active tabletservers as seen by the master.
-   *
-   * @return The active tabletservers, null if they can't be computed.
-   */
-  protected List<String> getActiveTservers(TInfo tinfo) {
-    MasterClientService.Client client = null;
-
-    List<String> tservers = null;
-    try {
-      client = getMasterConnection();
-
-      // Could do this through InstanceOperations, but that would set a bunch of new Watchers via ZK
-      // on every tserver
-      // node. The master is already tracking all of this info, so hopefully this is less overall
-      // work.
-      if (client != null) {
-        tservers = client.getActiveTservers(tinfo, context.rpcCreds());
-      }
-    } catch (TException e) {
-      // If we can't fetch the tabletservers, we can't fetch any active WALs
-      log.warn("Failed to fetch active tabletservers from the master", e);
-      return null;
-    } finally {
-      ThriftUtil.returnClient(client);
-    }
-
-    return tservers;
-  }
-
-  protected List<String> getActiveWalsForServer(TInfo tinfo, HostAndPort server) {
-    TabletClientService.Client tserverClient = null;
-    try {
-      tserverClient = ThriftUtil.getClient(new TabletClientService.Client.Factory(), server,
-          context);
-      return tserverClient.getActiveLogs(tinfo, context.rpcCreds());
-    } catch (TException e) {
-      log.warn("Failed to fetch active write-ahead logs from " + server, e);
-      return null;
-    } finally {
-      ThriftUtil.returnClient(tserverClient);
-    }
-  }
 }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/delete/CleanUp.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/delete/CleanUp.java
index 750b1cf..c1606a6 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/delete/CleanUp.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/delete/CleanUp.java
@@ -50,7 +50,6 @@ import org.apache.accumulo.server.master.state.TabletState;
 import org.apache.accumulo.server.problems.ProblemReports;
 import org.apache.accumulo.server.security.AuditedSecurityOperation;
 import org.apache.accumulo.server.util.MetadataTableUtil;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -214,31 +213,6 @@ class CleanUp extends MasterRepo {
     return null;
   }
 
-  protected void merge(VolumeManager fs, Path src, Path dest) throws IOException {
-    for (FileStatus child : fs.listStatus(src)) {
-      final String childName = child.getPath().getName();
-      final Path childInSrc = new Path(src, childName), childInDest = new Path(dest, childName);
-
-      if (child.isFile()) {
-        if (fs.exists(childInDest)) {
-          log.warn("File already exists in archive, ignoring. " + childInDest);
-        } else {
-          fs.rename(childInSrc, childInDest);
-        }
-      } else if (child.isDirectory()) {
-        if (fs.exists(childInDest)) {
-          // Recurse
-          merge(fs, childInSrc, childInDest);
-        } else {
-          fs.rename(childInSrc, childInDest);
-        }
-      } else {
-        // Symlinks shouldn't exist in table directories..
-        log.warn("Ignoring archiving of non file/directory: " + child);
-      }
-    }
-  }
-
   @Override
   public void undo(long tid, Master environment) {
     // nothing to do
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
index 77ec84b..ee1d455 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
@@ -1509,8 +1509,7 @@ public class TabletServer implements Runnable {
               SecurityErrorCode.BAD_AUTHORIZATIONS);
 
       ConditionalSession cs = new ConditionalSession(credentials,
-          new Authorizations(authorizations), tableId, DurabilityImpl.fromThrift(tdurabilty),
-          classLoaderContext);
+          new Authorizations(authorizations), tableId, DurabilityImpl.fromThrift(tdurabilty));
 
       long sid = sessionManager.createSession(cs, false);
       return new TConditionalSession(sid, lockID, sessionManager.getMaxIdleTime());
@@ -3436,13 +3435,6 @@ public class TabletServer implements Runnable {
 
   // avoid unnecessary redundant markings to meta
   final ConcurrentHashMap<DfsLogger,EnumSet<TabletLevel>> metadataTableLogs = new ConcurrentHashMap<>();
-  final Object levelLocks[] = new Object[TabletLevel.values().length];
-
-  {
-    for (int i = 0; i < levelLocks.length; i++) {
-      levelLocks[i] = new Object();
-    }
-  }
 
   // This is a set of WALs that are closed but may still be referenced by tablets. A LinkedHashSet
   // is used because its very import to know the order in which WALs were closed when deciding if a
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/AccumuloReplicaSystem.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/AccumuloReplicaSystem.java
index 9aaf214..83f994a 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/AccumuloReplicaSystem.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/AccumuloReplicaSystem.java
@@ -53,7 +53,6 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.file.rfile.RFile;
 import org.apache.accumulo.core.protobuf.ProtobufUtil;
 import org.apache.accumulo.core.replication.ReplicationTarget;
-import org.apache.accumulo.core.replication.thrift.KeyValues;
 import org.apache.accumulo.core.replication.thrift.ReplicationServicer;
 import org.apache.accumulo.core.replication.thrift.ReplicationServicer.Client;
 import org.apache.accumulo.core.replication.thrift.WalEdits;
@@ -93,38 +92,10 @@ public class AccumuloReplicaSystem implements ReplicaSystem {
   private AccumuloConfiguration conf;
   private VolumeManager fs;
 
-  protected String getInstanceName() {
-    return instanceName;
-  }
-
-  protected void setInstanceName(String instanceName) {
-    this.instanceName = instanceName;
-  }
-
-  protected String getZookeepers() {
-    return zookeepers;
-  }
-
-  protected void setZookeepers(String zookeepers) {
-    this.zookeepers = zookeepers;
-  }
-
-  protected AccumuloConfiguration getConf() {
-    return conf;
-  }
-
   protected void setConf(AccumuloConfiguration conf) {
     this.conf = conf;
   }
 
-  protected VolumeManager getFs() {
-    return fs;
-  }
-
-  protected void setFs(VolumeManager fs) {
-    this.fs = fs;
-  }
-
   /**
    * Generate the configuration string for this ReplicaSystem
    */
@@ -288,8 +259,7 @@ public class AccumuloReplicaSystem implements ReplicaSystem {
           if (p.getName().endsWith(RFILE_SUFFIX)) {
             span = Trace.start("RFile replication");
             try {
-              finalStatus = replicateRFiles(peerContext, peerTserver, target, p, status, sizeLimit,
-                  remoteTableId, peerContext.rpcCreds(), timeout);
+              finalStatus = replicateRFiles(peerContext, peerTserver, target, p, status, timeout);
             } finally {
               span.stop();
             }
@@ -324,51 +294,44 @@ public class AccumuloReplicaSystem implements ReplicaSystem {
   }
 
   protected Status replicateRFiles(ClientContext peerContext, final HostAndPort peerTserver,
-      final ReplicationTarget target, final Path p, final Status status, final long sizeLimit,
-      final String remoteTableId, final TCredentials tcreds, long timeout)
+      final ReplicationTarget target, final Path p, final Status status, long timeout)
       throws TTransportException, AccumuloException, AccumuloSecurityException {
-    try (final DataInputStream input = getRFileInputStream()) {
-      Status lastStatus = status, currentStatus = status;
-      while (true) {
-        // Read and send a batch of mutations
-        ReplicationStats replResult = ReplicationClient.executeServicerWithReturn(peerContext,
-            peerTserver, new RFileClientExecReturn(target, input, p, currentStatus, sizeLimit,
-                remoteTableId, tcreds),
-            timeout);
 
-        // Catch the overflow
-        long newBegin = currentStatus.getBegin() + replResult.entriesConsumed;
-        if (newBegin < 0) {
-          newBegin = Long.MAX_VALUE;
-        }
+    Status lastStatus = status, currentStatus = status;
+    while (true) {
+      // Read and send a batch of mutations
+      ReplicationStats replResult = ReplicationClient.executeServicerWithReturn(peerContext,
+          peerTserver, new RFileClientExecReturn(), timeout);
 
-        currentStatus = Status.newBuilder(currentStatus).setBegin(newBegin).build();
+      // Catch the overflow
+      long newBegin = currentStatus.getBegin() + replResult.entriesConsumed;
+      if (newBegin < 0) {
+        newBegin = Long.MAX_VALUE;
+      }
 
-        log.debug("Sent batch for replication of {} to {}, with new Status {}", p, target,
-            ProtobufUtil.toString(currentStatus));
+      currentStatus = Status.newBuilder(currentStatus).setBegin(newBegin).build();
 
-        // If we got a different status
-        if (!currentStatus.equals(lastStatus)) {
-          // If we don't have any more work, just quit
-          if (!StatusUtil.isWorkRequired(currentStatus)) {
-            return currentStatus;
-          } else {
-            // Otherwise, let it loop and replicate some more data
-            lastStatus = currentStatus;
-          }
-        } else {
-          log.debug("Did not replicate any new data for {} to {}, (state was {})", p, target,
-              ProtobufUtil.toString(lastStatus));
+      log.debug("Sent batch for replication of {} to {}, with new Status {}", p, target,
+          ProtobufUtil.toString(currentStatus));
 
-          // otherwise, we didn't actually replicate (likely because there was error sending the
-          // data)
-          // we can just not record any updates, and it will be picked up again by the work assigner
-          return status;
+      // If we got a different status
+      if (!currentStatus.equals(lastStatus)) {
+        // If we don't have any more work, just quit
+        if (!StatusUtil.isWorkRequired(currentStatus)) {
+          return currentStatus;
+        } else {
+          // Otherwise, let it loop and replicate some more data
+          lastStatus = currentStatus;
         }
+      } else {
+        log.debug("Did not replicate any new data for {} to {}, (state was {})", p, target,
+            ProtobufUtil.toString(lastStatus));
+
+        // otherwise, we didn't actually replicate (likely because there was error sending the
+        // data)
+        // we can just not record any updates, and it will be picked up again by the work assigner
+        return status;
       }
-    } catch (IOException e) {
-      log.error("Could not create input stream from RFile, will retry", e);
-      return status;
     }
   }
 
@@ -591,40 +554,8 @@ public class AccumuloReplicaSystem implements ReplicaSystem {
   protected class RFileClientExecReturn
       implements ClientExecReturn<ReplicationStats,ReplicationServicer.Client> {
 
-    private ReplicationTarget target;
-    private DataInputStream input;
-    private Path p;
-    private Status status;
-    private long sizeLimit;
-    private String remoteTableId;
-    private TCredentials tcreds;
-
-    public RFileClientExecReturn(ReplicationTarget target, DataInputStream input, Path p,
-        Status status, long sizeLimit, String remoteTableId, TCredentials tcreds) {
-      this.target = target;
-      this.input = input;
-      this.p = p;
-      this.status = status;
-      this.sizeLimit = sizeLimit;
-      this.remoteTableId = remoteTableId;
-      this.tcreds = tcreds;
-    }
-
     @Override
-    public ReplicationStats execute(Client client) throws Exception {
-      RFileReplication kvs = getKeyValues();
-      if (kvs.keyValues.getKeyValuesSize() > 0) {
-        long entriesReplicated = client.replicateKeyValues(remoteTableId, kvs.keyValues, tcreds);
-        if (entriesReplicated != kvs.keyValues.getKeyValuesSize()) {
-          log.warn(
-              "Sent {} KeyValue entries for replication but only {} were reported as replicated",
-              kvs.keyValues.getKeyValuesSize(), entriesReplicated);
-        }
-
-        // Not as important to track as WALs because we don't skip any KVs in an RFile
-        return kvs;
-      }
-
+    public ReplicationStats execute(Client client) {
       // No data sent (bytes nor records) and no progress made
       return new ReplicationStats(0L, 0L, 0L);
     }
@@ -690,11 +621,6 @@ public class AccumuloReplicaSystem implements ReplicaSystem {
     return new ClientContext(ClientInfo.from(properties, token), localConf);
   }
 
-  protected RFileReplication getKeyValues() {
-    // TODO ACCUMULO-2580 Implement me
-    throw new UnsupportedOperationException();
-  }
-
   protected Set<Integer> consumeWalPrefix(ReplicationTarget target, DataInputStream wal,
       Status status) throws IOException {
     Set<Integer> tids = new HashSet<>();
@@ -839,10 +765,6 @@ public class AccumuloReplicaSystem implements ReplicaSystem {
     return mutationsToSend;
   }
 
-  protected DataInputStream getRFileInputStream() {
-    throw new UnsupportedOperationException("Not yet implemented");
-  }
-
   public static class ReplicationStats {
     /**
      * The size, in bytes, of the data sent
@@ -883,18 +805,6 @@ public class AccumuloReplicaSystem implements ReplicaSystem {
     }
   }
 
-  public static class RFileReplication extends ReplicationStats {
-    /**
-     * The data to send
-     */
-    public KeyValues keyValues;
-
-    public RFileReplication(KeyValues kvs, long size) {
-      super(size, kvs.keyValues.size(), kvs.keyValues.size());
-      this.keyValues = kvs;
-    }
-  }
-
   /**
    * A "struct" to avoid a nested Entry. Contains the resultant information from collecting data for
    * replication
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/BatchWriterReplicationReplayer.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/BatchWriterReplicationReplayer.java
index 7892b36..424c321 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/BatchWriterReplicationReplayer.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/BatchWriterReplicationReplayer.java
@@ -168,10 +168,4 @@ public class BatchWriterReplicationReplayer implements AccumuloReplicationReplay
     return mutationsApplied;
   }
 
-  @Override
-  public long replicateKeyValues() {
-    // TODO Implement me
-    throw new UnsupportedOperationException();
-  }
-
 }
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationServicerHandler.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationServicerHandler.java
index aa887ca..db53922 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationServicerHandler.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationServicerHandler.java
@@ -48,7 +48,7 @@ public class ReplicationServicerHandler implements Iface {
 
   @Override
   public long replicateLog(String tableIdStr, WalEdits data, TCredentials tcreds)
-      throws RemoteReplicationException, TException {
+      throws TException {
     Table.ID tableId = Table.ID.of(tableIdStr);
     log.debug("Got replication request to tableID {} with {} edits", tableId, data.getEditsSize());
     tabletServer.getSecurityOperation().authenticateUser(tabletServer.getContext().rpcCreds(),
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/session/ConditionalSession.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/session/ConditionalSession.java
index 3694144..4ec3dd8 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/session/ConditionalSession.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/session/ConditionalSession.java
@@ -29,16 +29,14 @@ public class ConditionalSession extends Session {
   public final Table.ID tableId;
   public final AtomicBoolean interruptFlag = new AtomicBoolean();
   public final Durability durability;
-  public final String classLoaderContext;
 
   public ConditionalSession(TCredentials credentials, Authorizations authorizations,
-      Table.ID tableId, Durability durability, String classLoaderContext) {
+      Table.ID tableId, Durability durability) {
     super(credentials);
     this.credentials = credentials;
     this.auths = authorizations;
     this.tableId = tableId;
     this.durability = durability;
-    this.classLoaderContext = classLoaderContext;
   }
 
   @Override
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/CommitSession.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/CommitSession.java
index 33ff797..1082ade 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/CommitSession.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/CommitSession.java
@@ -81,10 +81,6 @@ public class CommitSession {
     committer.commit(this, mutations);
   }
 
-  public TabletCommitter getTablet() {
-    return committer;
-  }
-
   public boolean beginUpdatingLogsUsed(DfsLogger copy, boolean mincFinish) {
     return committer.beginUpdatingLogsUsed(memTable, copy, mincFinish);
   }
diff --git a/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloReloadingVFSClassLoader.java b/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloReloadingVFSClassLoader.java
index 3ccc5e3..0c72462 100644
--- a/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloReloadingVFSClassLoader.java
+++ b/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloReloadingVFSClassLoader.java
@@ -19,7 +19,6 @@ package org.apache.accumulo.start.classloader.vfs;
 import static java.util.concurrent.TimeUnit.SECONDS;
 
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.concurrent.ArrayBlockingQueue;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.RejectedExecutionException;
@@ -195,10 +194,6 @@ public class AccumuloReloadingVFSClassLoader implements FileListener, ReloadingC
     this(uris, vfs, parent, DEFAULT_TIMEOUT, preDelegate);
   }
 
-  public synchronized FileObject[] getFiles() {
-    return Arrays.copyOf(this.files, this.files.length);
-  }
-
   /**
    * Should be ok if this is not called because the thread started by DefaultFileMonitor is a daemon
    * thread
@@ -250,14 +245,6 @@ public class AccumuloReloadingVFSClassLoader implements FileListener, ReloadingC
     this.maxRetries = maxRetries;
   }
 
-  long getMaxRetries() {
-    return maxRetries;
-  }
-
-  long getMaxWaitInterval() {
-    return maxWaitInterval;
-  }
-
   private boolean retryPermitted(long retries) {
     return (maxRetries < 0 || retries < maxRetries);
   }