You are viewing a plain text version of this content. The canonical link for it is here.
Posted to notifications@accumulo.apache.org by GitBox <gi...@apache.org> on 2018/08/10 14:31:20 UTC

[GitHub] mikewalch closed pull request #585: ServerContext is now a singleton class

mikewalch closed pull request #585: ServerContext is now a singleton class
URL: https://github.com/apache/accumulo/pull/585
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java b/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
index e352809578..4fe7f62049 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/Accumulo.java
@@ -261,11 +261,10 @@ public static void waitForZookeeperAndHdfs(VolumeManager fs) {
    *
    * see ACCUMULO-2519
    */
-  public static void abortIfFateTransactions() {
+  public static void abortIfFateTransactions(ServerContext context) {
     try {
-      final ReadOnlyTStore<Accumulo> fate = new ReadOnlyStore<>(
-          new ZooStore<>(ServerContext.getInstance().getZooKeeperRoot() + Constants.ZFATE,
-              ZooReaderWriter.getInstance()));
+      final ReadOnlyTStore<Accumulo> fate = new ReadOnlyStore<>(new ZooStore<>(
+          context.getZooKeeperRoot() + Constants.ZFATE, ZooReaderWriter.getInstance()));
       if (!(fate.list().isEmpty())) {
         throw new AccumuloException("Aborting upgrade because there are"
             + " outstanding FATE transactions from a previous Accumulo version."
diff --git a/server/base/src/main/java/org/apache/accumulo/server/ServerContext.java b/server/base/src/main/java/org/apache/accumulo/server/ServerContext.java
index 0111794d34..a2428d964c 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/ServerContext.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/ServerContext.java
@@ -41,6 +41,8 @@
 import org.apache.accumulo.server.rpc.ThriftServerType;
 import org.apache.accumulo.server.security.SecurityUtil;
 import org.apache.accumulo.server.security.delegation.AuthenticationTokenSecretManager;
+import org.apache.accumulo.server.tables.TableManager;
+import org.apache.accumulo.server.tablets.UniqueNameAllocator;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -56,13 +58,19 @@
   private static ServerContext serverContextInstance = null;
 
   private final ServerInfo info;
+  private TableManager tableManager;
+  private UniqueNameAllocator nameAllocator;
   private ServerConfigurationFactory serverConfFactory = null;
   private String applicationName = null;
   private String applicationClassName = null;
   private String hostname = null;
   private AuthenticationTokenSecretManager secretManager;
 
-  public ServerContext(ServerInfo info) {
+  private ServerContext() {
+    this(new ServerInfo());
+  }
+
+  private ServerContext(ServerInfo info) {
     super(info, SiteConfiguration.getInstance());
     this.info = info;
   }
@@ -75,13 +83,24 @@ public ServerContext(ClientInfo info) {
     this(new ServerInfo(info));
   }
 
-  public ServerContext(ClientContext context) {
-    this(new ServerInfo(context.getClientInfo()));
+  synchronized public static ServerContext getInstance() {
+    if (serverContextInstance == null) {
+      serverContextInstance = new ServerContext();
+    }
+    return serverContextInstance;
   }
 
-  synchronized public static ServerContext getInstance() {
+  synchronized public static ServerContext getInstance(ClientInfo info) {
+    if (serverContextInstance == null) {
+      serverContextInstance = new ServerContext(info);
+    }
+    return serverContextInstance;
+  }
+
+  synchronized public static ServerContext getInstance(String instanceName, String zooKeepers,
+      int zooKeepersSessionTimeOut) {
     if (serverContextInstance == null) {
-      serverContextInstance = new ServerContext(new ServerInfo());
+      serverContextInstance = new ServerContext(instanceName, zooKeepers, zooKeepersSessionTimeOut);
     }
     return serverContextInstance;
   }
@@ -230,4 +249,18 @@ public Connector getConnector(String principal, AuthenticationToken token)
       throws AccumuloSecurityException, AccumuloException {
     return Connector.builder().usingClientInfo(info).usingToken(principal, token).build();
   }
+
+  public synchronized TableManager getTableManager() {
+    if (tableManager == null) {
+      tableManager = new TableManager(this);
+    }
+    return tableManager;
+  }
+
+  public synchronized UniqueNameAllocator getUniqueNameAllocator() {
+    if (nameAllocator == null) {
+      nameAllocator = new UniqueNameAllocator(this);
+    }
+    return nameAllocator;
+  }
 }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/cli/ClientOnDefaultTable.java b/server/base/src/main/java/org/apache/accumulo/server/cli/ClientOnDefaultTable.java
index 74a5e03d2b..8d3c24582a 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/cli/ClientOnDefaultTable.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/cli/ClientOnDefaultTable.java
@@ -16,7 +16,6 @@
  */
 package org.apache.accumulo.server.cli;
 
-import org.apache.accumulo.core.client.ClientInfo;
 import org.apache.accumulo.server.ServerContext;
 
 public class ClientOnDefaultTable extends org.apache.accumulo.core.cli.ClientOnDefaultTable {
@@ -28,8 +27,7 @@ public ServerContext getServerContext() {
     if (instance == null) {
       return ServerContext.getInstance();
     }
-    ClientInfo info = getClientInfo();
-    return new ServerContext(instance, info.getZooKeepers(), info.getZooKeepersSessionTimeOut());
+    return ServerContext.getInstance(getClientInfo());
   }
 
   public ClientOnDefaultTable(String table) {
diff --git a/server/base/src/main/java/org/apache/accumulo/server/cli/ClientOnRequiredTable.java b/server/base/src/main/java/org/apache/accumulo/server/cli/ClientOnRequiredTable.java
index c681f01916..d231310baa 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/cli/ClientOnRequiredTable.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/cli/ClientOnRequiredTable.java
@@ -16,7 +16,6 @@
  */
 package org.apache.accumulo.server.cli;
 
-import org.apache.accumulo.core.client.ClientInfo;
 import org.apache.accumulo.server.ServerContext;
 
 public class ClientOnRequiredTable extends org.apache.accumulo.core.cli.ClientOnRequiredTable {
@@ -28,7 +27,6 @@ public ServerContext getServerContext() {
     if (instance == null) {
       return ServerContext.getInstance();
     }
-    ClientInfo info = getClientInfo();
-    return new ServerContext(instance, info.getZooKeepers(), info.getZooKeepersSessionTimeOut());
+    return ServerContext.getInstance(getClientInfo());
   }
 }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/cli/ClientOpts.java b/server/base/src/main/java/org/apache/accumulo/server/cli/ClientOpts.java
index a2c0346729..b204226666 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/cli/ClientOpts.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/cli/ClientOpts.java
@@ -16,7 +16,6 @@
  */
 package org.apache.accumulo.server.cli;
 
-import org.apache.accumulo.core.client.ClientInfo;
 import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.server.ServerContext;
 
@@ -33,7 +32,6 @@ public ServerContext getServerContext() {
     if (instance == null) {
       return ServerContext.getInstance();
     }
-    ClientInfo info = getClientInfo();
-    return new ServerContext(instance, info.getZooKeepers(), info.getZooKeepersSessionTimeOut());
+    return ServerContext.getInstance(getClientInfo());
   }
 }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java b/server/base/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
index 49479ad910..854f88303b 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
@@ -59,7 +59,7 @@
  * An implementation of Instance that looks in HDFS and ZooKeeper to find the master and root tablet
  * location.
  *
- * @deprecated since 2.0.0, Use {@link ServerContext#getInstance()} instead
+ * @deprecated since 2.0.0, Use {@link ServerContext} instead
  */
 @Deprecated
 public class HdfsZooInstance implements org.apache.accumulo.core.client.Instance {
diff --git a/server/base/src/main/java/org/apache/accumulo/server/conf/ConfigSanityCheck.java b/server/base/src/main/java/org/apache/accumulo/server/conf/ConfigSanityCheck.java
index 1137ba9c09..a0069bdbf7 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/conf/ConfigSanityCheck.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/conf/ConfigSanityCheck.java
@@ -25,7 +25,8 @@
 public class ConfigSanityCheck implements KeywordExecutable {
 
   public static void main(String[] args) {
-    ServerContext.getInstance().getServerConfFactory().getSystemConfiguration();
+    ServerContext context = ServerContext.getInstance();
+    context.getServerConfFactory().getSystemConfiguration();
   }
 
   @Override
diff --git a/server/base/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java b/server/base/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
index 95b6767b95..253ca1986a 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
@@ -280,7 +280,6 @@ private static boolean isValidColumn(ColumnUpdate cu) {
             violations = addViolation(violations, 7);
           }
         }
-
       }
     }
 
@@ -297,7 +296,7 @@ private static boolean isValidColumn(ColumnUpdate cu) {
   }
 
   protected Arbitrator getArbitrator() {
-    return new ZooArbitrator();
+    return new ZooArbitrator(ServerContext.getInstance());
   }
 
   @Override
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
index 249c681c87..631ceca9b6 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
@@ -163,11 +163,12 @@ public TabletFiles(String dir, List<LogEntry> logEntries,
     }
   }
 
-  public static String switchRootTableVolume(String location) throws IOException {
+  public static String switchRootTableVolume(ServerContext context, String location)
+      throws IOException {
     String newLocation = switchVolume(location, FileType.TABLE,
         ServerConstants.getVolumeReplacements());
     if (newLocation != null) {
-      MetadataTableUtil.setRootTabletDir(newLocation);
+      MetadataTableUtil.setRootTabletDir(context, newLocation);
       log.info("Volume replaced: {} -> {}", location, newLocation);
       return new Path(newLocation).toString();
     }
@@ -304,7 +305,7 @@ private static String decommisionedTabletDir(ServerContext context, ZooLock zooL
 
         // only set the new location in zookeeper after a successful copy
         log.info("setting root tablet location to {}", newDir);
-        MetadataTableUtil.setRootTabletDir(newDir.toString());
+        MetadataTableUtil.setRootTabletDir(context, newDir.toString());
 
         // rename the old dir to avoid confusion when someone looks at filesystem... its ok if we
         // fail here and this does not happen because the location in
@@ -315,7 +316,7 @@ private static String decommisionedTabletDir(ServerContext context, ZooLock zooL
 
       } else {
         log.info("setting root tablet location to {}", newDir);
-        MetadataTableUtil.setRootTabletDir(newDir.toString());
+        MetadataTableUtil.setRootTabletDir(context, newDir.toString());
       }
 
       return newDir.toString();
diff --git a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
index 82007c692f..059ac580f1 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
@@ -442,7 +442,7 @@ private boolean initialize(Opts opts, String instanceNamePath, VolumeManager fs,
           String key = entry.getKey();
           String value = entry.getValue();
           if (Property.isValidZooPropertyKey(key)) {
-            SystemPropUtil.setSystemProperty(key, value);
+            SystemPropUtil.setSystemProperty(context, key, value);
             log.info("Uploaded - {} = {}", key, Property.isSensitive(key) ? "<hidden>" : value);
           } else {
             log.info("Skipped - {} = {}", key, Property.isSensitive(key) ? "<hidden>" : value);
@@ -485,7 +485,7 @@ private void initFileSystem(Opts opts, VolumeManager fs, UUID uuid, String rootT
     initDirs(fs, uuid, VolumeConfiguration.getVolumeUris(SiteConfiguration.getInstance()), false);
 
     // initialize initial system tables config in zookeeper
-    initSystemTablesConfig();
+    initSystemTablesConfig(Constants.ZROOT + "/" + uuid);
 
     VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(ChooserScope.INIT);
     String tableMetadataTabletDir = fs.choose(chooserEnv, ServerConstants.getBaseUris())
@@ -778,7 +778,7 @@ private static void initSecurity(ServerContext context, Opts opts, String iid, S
         rootUser, opts.rootpass);
   }
 
-  public static void initSystemTablesConfig() throws IOException {
+  public static void initSystemTablesConfig(String zooKeeperRoot) throws IOException {
     try {
       Configuration conf = CachedConfiguration.getInstance();
       int max = conf.getInt("dfs.replication.max", 512);
@@ -790,20 +790,24 @@ public static void initSystemTablesConfig() throws IOException {
       if (min > 5)
         setMetadataReplication(min, "min");
       for (Entry<String,String> entry : initialMetadataConf.entrySet()) {
-        if (!TablePropUtil.setTableProperty(RootTable.ID, entry.getKey(), entry.getValue()))
+        if (!TablePropUtil.setTableProperty(zooKeeperRoot, RootTable.ID, entry.getKey(),
+            entry.getValue()))
           throw new IOException("Cannot create per-table property " + entry.getKey());
-        if (!TablePropUtil.setTableProperty(MetadataTable.ID, entry.getKey(), entry.getValue()))
+        if (!TablePropUtil.setTableProperty(zooKeeperRoot, MetadataTable.ID, entry.getKey(),
+            entry.getValue()))
           throw new IOException("Cannot create per-table property " + entry.getKey());
       }
       // Only add combiner config to accumulo.metadata table (ACCUMULO-3077)
       for (Entry<String,String> entry : initialMetadataCombinerConf.entrySet()) {
-        if (!TablePropUtil.setTableProperty(MetadataTable.ID, entry.getKey(), entry.getValue()))
+        if (!TablePropUtil.setTableProperty(zooKeeperRoot, MetadataTable.ID, entry.getKey(),
+            entry.getValue()))
           throw new IOException("Cannot create per-table property " + entry.getKey());
       }
 
       // add configuration to the replication table
       for (Entry<String,String> entry : initialReplicationTableConf.entrySet()) {
-        if (!TablePropUtil.setTableProperty(ReplicationTable.ID, entry.getKey(), entry.getValue()))
+        if (!TablePropUtil.setTableProperty(zooKeeperRoot, ReplicationTable.ID, entry.getKey(),
+            entry.getValue()))
           throw new IOException("Cannot create per-table property " + entry.getKey());
       }
     } catch (Exception e) {
diff --git a/server/base/src/main/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilter.java b/server/base/src/main/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilter.java
index 80ffacc8be..244e9f9dfa 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilter.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilter.java
@@ -29,6 +29,7 @@
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.fate.zookeeper.TransactionWatcher.Arbitrator;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.zookeeper.TransactionWatcher.ZooArbitrator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -87,6 +88,6 @@ public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> op
   }
 
   protected Arbitrator getArbitrator() {
-    return new ZooArbitrator();
+    return new ZooArbitrator(ServerContext.getInstance());
   }
 }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/TableLoadBalancer.java b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/TableLoadBalancer.java
index 23066091fe..fc7e67d19c 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/balancer/TableLoadBalancer.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/balancer/TableLoadBalancer.java
@@ -35,7 +35,6 @@
 import org.apache.accumulo.core.master.thrift.TabletServerStatus;
 import org.apache.accumulo.server.master.state.TServerInstance;
 import org.apache.accumulo.server.master.state.TabletMigration;
-import org.apache.accumulo.server.tables.TableManager;
 import org.apache.accumulo.start.classloader.vfs.AccumuloVFSClassLoader;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -62,7 +61,7 @@ private TabletBalancer constructNewBalancerForTable(String clazzName, Table.ID t
   }
 
   protected String getLoadBalancerClassNameForTable(Table.ID table) {
-    TableState tableState = TableManager.getInstance().getTableState(table);
+    TableState tableState = context.getTableManager().getTableState(table);
     if (tableState == null)
       return null;
     if (tableState.equals(TableState.ONLINE))
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateStore.java b/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateStore.java
index a32f03f610..cd92959bd4 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateStore.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/state/TabletStateStore.java
@@ -103,7 +103,7 @@ public static void setLocation(ServerContext context, Assignment assignment)
   protected static TabletStateStore getStoreForTablet(KeyExtent extent, ServerContext context)
       throws DistributedStoreException {
     if (extent.isRootTablet()) {
-      return new ZooTabletStateStore();
+      return new ZooTabletStateStore(context);
     } else if (extent.isMeta()) {
       return new RootTabletStateStore(context);
     } else {
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/state/ZooStore.java b/server/base/src/main/java/org/apache/accumulo/server/master/state/ZooStore.java
index 908b097b1f..dd671df702 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/state/ZooStore.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/state/ZooStore.java
@@ -21,10 +21,10 @@
 import java.io.IOException;
 import java.util.List;
 
+import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
-import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.zookeeper.ZooCache;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.slf4j.Logger;
@@ -44,8 +44,8 @@ public ZooStore(String basePath) throws IOException {
     this.basePath = basePath;
   }
 
-  public ZooStore() throws IOException {
-    this(ServerContext.getInstance().getZooKeeperRoot());
+  public ZooStore(ClientContext context) throws IOException {
+    this(context.getZooKeeperRoot());
   }
 
   @Override
diff --git a/server/base/src/main/java/org/apache/accumulo/server/master/state/ZooTabletStateStore.java b/server/base/src/main/java/org/apache/accumulo/server/master/state/ZooTabletStateStore.java
index b32216e973..bb86cdcb0f 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/master/state/ZooTabletStateStore.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/master/state/ZooTabletStateStore.java
@@ -26,6 +26,7 @@
 import java.util.List;
 import java.util.Map;
 
+import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.tabletserver.log.LogEntry;
 import org.apache.accumulo.core.util.HostAndPort;
@@ -43,9 +44,9 @@ public ZooTabletStateStore(DistributedStore store) {
     this.store = store;
   }
 
-  public ZooTabletStateStore() throws DistributedStoreException {
+  public ZooTabletStateStore(ClientContext context) throws DistributedStoreException {
     try {
-      store = new ZooStore();
+      store = new ZooStore(context);
     } catch (IOException ex) {
       throw new DistributedStoreException(ex);
     }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/problems/ProblemReport.java b/server/base/src/main/java/org/apache/accumulo/server/problems/ProblemReport.java
index 0955023d1d..19d886793a 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/problems/ProblemReport.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/problems/ProblemReport.java
@@ -149,8 +149,8 @@ void saveToMetadataTable(ServerContext context) throws Exception {
     MetadataTableUtil.getMetadataTable(context).update(m);
   }
 
-  void removeFromZooKeeper() throws Exception {
-    removeFromZooKeeper(ZooReaderWriter.getInstance(), ServerContext.getInstance());
+  void removeFromZooKeeper(ServerContext context) throws Exception {
+    removeFromZooKeeper(ZooReaderWriter.getInstance(), context);
   }
 
   void removeFromZooKeeper(ZooReaderWriter zoorw, ServerContext context)
@@ -159,8 +159,8 @@ void removeFromZooKeeper(ZooReaderWriter zoorw, ServerContext context)
     zoorw.recursiveDelete(zpath, NodeMissingPolicy.SKIP);
   }
 
-  void saveToZooKeeper() throws Exception {
-    saveToZooKeeper(ZooReaderWriter.getInstance(), ServerContext.getInstance());
+  void saveToZooKeeper(ServerContext context) throws Exception {
+    saveToZooKeeper(ZooReaderWriter.getInstance(), context);
   }
 
   void saveToZooKeeper(ZooReaderWriter zoorw, ServerContext context)
@@ -182,8 +182,8 @@ private String getZPath(String zkRoot) throws IOException {
         + Encoding.encodeAsBase64FileName(new Text(baos.toByteArray()));
   }
 
-  static ProblemReport decodeZooKeeperEntry(String node) throws Exception {
-    return decodeZooKeeperEntry(node, ZooReaderWriter.getInstance(), ServerContext.getInstance());
+  static ProblemReport decodeZooKeeperEntry(ServerContext context, String node) throws Exception {
+    return decodeZooKeeperEntry(node, ZooReaderWriter.getInstance(), context);
   }
 
   static ProblemReport decodeZooKeeperEntry(String node, ZooReaderWriter zoorw,
diff --git a/server/base/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java b/server/base/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java
index 60550751bb..ad95fdac0c 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java
@@ -97,7 +97,7 @@ public void run() {
         try {
           if (isMeta(pr.getTableId())) {
             // file report in zookeeper
-            pr.saveToZooKeeper();
+            pr.saveToZooKeeper(context);
           } else {
             // file report in metadata table
             pr.saveToMetadataTable(context);
@@ -136,7 +136,7 @@ public void run() {
         try {
           if (isMeta(pr.getTableId())) {
             // file report in zookeeper
-            pr.removeFromZooKeeper();
+            pr.removeFromZooKeeper(context);
           } else {
             // file report in metadata table
             pr.removeFromMetadataTable(context);
@@ -163,7 +163,7 @@ public void deleteProblemReports(Table.ID table) throws Exception {
     if (isMeta(table)) {
       Iterator<ProblemReport> pri = iterator(table);
       while (pri.hasNext()) {
-        pri.next().removeFromZooKeeper();
+        pri.next().removeFromZooKeeper(context);
       }
       return;
     }
@@ -261,7 +261,7 @@ public ProblemReport next() {
           try {
             if (getIter1().hasNext()) {
               iter1Count++;
-              return ProblemReport.decodeZooKeeperEntry(getIter1().next());
+              return ProblemReport.decodeZooKeeperEntry(context, getIter1().next());
             }
 
             if (getIter2().hasNext()) {
diff --git a/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicaSystem.java b/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicaSystem.java
index 66a4ca2a7d..99b876cbc5 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicaSystem.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicaSystem.java
@@ -17,6 +17,7 @@
 package org.apache.accumulo.server.replication;
 
 import org.apache.accumulo.core.replication.ReplicationTarget;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.replication.proto.Replication.Status;
 import org.apache.hadoop.fs.Path;
 
@@ -46,5 +47,5 @@
    * For example, we only need one implementation for Accumulo, but, for each peer, we have a ZK
    * quorum and instance name
    */
-  void configure(String configuration);
+  void configure(ServerContext context, String configuration);
 }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicaSystemFactory.java b/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicaSystemFactory.java
index 9335669b31..accaf4efe8 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicaSystemFactory.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/replication/ReplicaSystemFactory.java
@@ -20,6 +20,7 @@
 
 import java.util.Map.Entry;
 
+import org.apache.accumulo.server.ServerContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -33,7 +34,7 @@
    *          {@link ReplicaSystem} implementation class name
    * @return A {@link ReplicaSystem} object from the given name
    */
-  public ReplicaSystem get(String value) {
+  public ReplicaSystem get(ServerContext context, String value) {
     final Entry<String,String> entry = parseReplicaSystemConfiguration(value);
 
     try {
@@ -42,7 +43,7 @@ public ReplicaSystem get(String value) {
       if (ReplicaSystem.class.isAssignableFrom(clz)) {
         Object o = clz.newInstance();
         ReplicaSystem rs = (ReplicaSystem) o;
-        rs.configure(entry.getValue());
+        rs.configure(context, entry.getValue());
         return rs;
       }
 
diff --git a/server/base/src/main/java/org/apache/accumulo/server/tables/TableManager.java b/server/base/src/main/java/org/apache/accumulo/server/tables/TableManager.java
index 20d1358227..adfc96cec0 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/tables/TableManager.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/tables/TableManager.java
@@ -57,8 +57,7 @@
       .synchronizedMap(new HashMap<>());
   private static final byte[] ZERO_BYTE = {'0'};
 
-  private static TableManager tableManager = null;
-
+  private final ServerContext context;
   private final String zkRoot;
   private final String instanceID;
   private ZooCache zooStateCache;
@@ -99,14 +98,8 @@ public static void prepareNewTableState(String instanceId, Table.ID tableId,
         existsPolicy);
   }
 
-  public synchronized static TableManager getInstance() {
-    if (tableManager == null)
-      tableManager = new TableManager();
-    return tableManager;
-  }
-
-  private TableManager() {
-    ServerContext context = ServerContext.getInstance();
+  public TableManager(ServerContext context) {
+    this.context = context;
     zkRoot = context.getZooKeeperRoot();
     instanceID = context.getInstanceID();
     zooStateCache = new ZooCache(new TableStateWatcher());
@@ -245,7 +238,7 @@ public void cloneTable(Table.ID srcTableId, Table.ID tableId, String tableName,
         NodeExistsPolicy.OVERWRITE);
 
     for (Entry<String,String> entry : propertiesToSet.entrySet())
-      TablePropUtil.setTableProperty(tableId, entry.getKey(), entry.getValue());
+      TablePropUtil.setTableProperty(context, tableId, entry.getKey(), entry.getValue());
 
     for (String prop : propertiesToExclude)
       ZooReaderWriter.getInstance().recursiveDelete(Constants.ZROOT + "/" + instanceID
diff --git a/server/base/src/main/java/org/apache/accumulo/server/tablets/UniqueNameAllocator.java b/server/base/src/main/java/org/apache/accumulo/server/tablets/UniqueNameAllocator.java
index 72b4292a32..05380a4411 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/tablets/UniqueNameAllocator.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/tablets/UniqueNameAllocator.java
@@ -33,14 +33,16 @@
  * This is useful for filenames because it makes caching easy.
  */
 public class UniqueNameAllocator {
+
+  private ServerContext context;
   private long next = 0;
   private long maxAllocated = 0;
   private String nextNamePath;
   private Random rand;
 
-  private UniqueNameAllocator() {
-    nextNamePath = Constants.ZROOT + "/" + ServerContext.getInstance().getInstanceID()
-        + Constants.ZNEXT_FILE;
+  public UniqueNameAllocator(ServerContext context) {
+    this.context = context;
+    nextNamePath = Constants.ZROOT + "/" + context.getInstanceID() + Constants.ZNEXT_FILE;
     rand = new Random();
   }
 
@@ -71,14 +73,4 @@ public synchronized String getNextName() {
     return new String(FastFormat.toZeroPaddedString(next++, 7, Character.MAX_RADIX, new byte[0]),
         UTF_8);
   }
-
-  private static UniqueNameAllocator instance = null;
-
-  public static synchronized UniqueNameAllocator getInstance() {
-    if (instance == null)
-      instance = new UniqueNameAllocator();
-
-    return instance;
-  }
-
 }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/Admin.java b/server/base/src/main/java/org/apache/accumulo/server/util/Admin.java
index 2bc853f9ed..48bc90af8f 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/Admin.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/Admin.java
@@ -56,6 +56,7 @@
 import org.apache.accumulo.core.util.HostAndPort;
 import org.apache.accumulo.fate.zookeeper.ZooCache;
 import org.apache.accumulo.fate.zookeeper.ZooLock;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.server.security.SecurityUtil;
 import org.apache.accumulo.start.spi.KeywordExecutable;
@@ -213,7 +214,7 @@ public void execute(final String[] args) {
     }
 
     try {
-      ClientContext context = opts.getServerContext();
+      ServerContext context = opts.getServerContext();
 
       int rc = 0;
 
@@ -245,7 +246,8 @@ public void execute(final String[] args) {
       } else if (cl.getParsedCommand().equals("volumes")) {
         ListVolumesUsed.listVolumes(context);
       } else if (cl.getParsedCommand().equals("randomizeVolumes")) {
-        rc = RandomizeVolumes.randomize(context.getConnector(), randomizeVolumesOpts.tableName);
+        rc = RandomizeVolumes.randomize(context, context.getConnector(),
+            randomizeVolumesOpts.tableName);
       } else {
         everything = cl.getParsedCommand().equals("stopAll");
 
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java b/server/base/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java
index 3bdc2ce9c8..4cade6ca21 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java
@@ -22,7 +22,6 @@
 
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.core.client.impl.Table;
 import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.data.Range;
@@ -41,7 +40,6 @@
 import org.apache.accumulo.server.master.state.TabletLocationState;
 import org.apache.accumulo.server.master.state.TabletState;
 import org.apache.accumulo.server.master.state.ZooTabletStateStore;
-import org.apache.accumulo.server.tables.TableManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -55,7 +53,7 @@ public static void main(String[] args) throws Exception {
     findOffline(context, null);
   }
 
-  static int findOffline(ClientContext context, String tableName)
+  static int findOffline(ServerContext context, String tableName)
       throws AccumuloException, TableNotFoundException {
 
     final AtomicBoolean scanning = new AtomicBoolean(false);
@@ -75,7 +73,7 @@ public void update(LiveTServerSet current, Set<TServerInstance> deleted,
 
     Iterator<TabletLocationState> zooScanner;
     try {
-      zooScanner = new ZooTabletStateStore().iterator();
+      zooScanner = new ZooTabletStateStore(context).iterator();
     } catch (DistributedStoreException e) {
       throw new AccumuloException(e);
     }
@@ -83,7 +81,7 @@ public void update(LiveTServerSet current, Set<TServerInstance> deleted,
     int offline = 0;
 
     System.out.println("Scanning zookeeper");
-    if ((offline = checkTablets(zooScanner, tservers)) > 0)
+    if ((offline = checkTablets(context, zooScanner, tservers)) > 0)
       return offline;
 
     if (RootTable.NAME.equals(tableName))
@@ -92,7 +90,7 @@ public void update(LiveTServerSet current, Set<TServerInstance> deleted,
     System.out.println("Scanning " + RootTable.NAME);
     Iterator<TabletLocationState> rootScanner = new MetaDataTableScanner(context,
         MetadataSchema.TabletsSection.getRange(), RootTable.NAME);
-    if ((offline = checkTablets(rootScanner, tservers)) > 0)
+    if ((offline = checkTablets(context, rootScanner, tservers)) > 0)
       return offline;
 
     if (MetadataTable.NAME.equals(tableName))
@@ -108,17 +106,18 @@ public void update(LiveTServerSet current, Set<TServerInstance> deleted,
 
     try (MetaDataTableScanner metaScanner = new MetaDataTableScanner(context, range,
         MetadataTable.NAME)) {
-      return checkTablets(metaScanner, tservers);
+      return checkTablets(context, metaScanner, tservers);
     }
   }
 
-  private static int checkTablets(Iterator<TabletLocationState> scanner, LiveTServerSet tservers) {
+  private static int checkTablets(ServerContext context, Iterator<TabletLocationState> scanner,
+      LiveTServerSet tservers) {
     int offline = 0;
 
     while (scanner.hasNext() && !System.out.checkError()) {
       TabletLocationState locationState = scanner.next();
       TabletState state = locationState.getState(tservers.getCurrentServers());
-      if (state != null && state != TabletState.HOSTED && TableManager.getInstance()
+      if (state != null && state != TabletState.HOSTED && context.getTableManager()
           .getTableState(locationState.extent.getTableId()) != TableState.OFFLINE) {
         System.out
             .println(locationState + " is " + state + "  #walogs:" + locationState.walogs.size());
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/ListVolumesUsed.java b/server/base/src/main/java/org/apache/accumulo/server/util/ListVolumesUsed.java
index 3dafd0cd82..8af8f35533 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/ListVolumesUsed.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/ListVolumesUsed.java
@@ -21,7 +21,6 @@
 import java.util.TreeSet;
 
 import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.metadata.MetadataTable;
@@ -59,13 +58,13 @@ private static void getLogURIs(TreeSet<String> volumes, LogEntry logEntry) {
     volumes.add(getLogURI(logEntry.filename));
   }
 
-  private static void listZookeeper() throws Exception {
+  private static void listZookeeper(ServerContext context) throws Exception {
     System.out.println("Listing volumes referenced in zookeeper");
     TreeSet<String> volumes = new TreeSet<>();
 
-    volumes.add(getTableURI(MetadataTableUtil.getRootTabletDir()));
+    volumes.add(getTableURI(MetadataTableUtil.getRootTabletDir(context)));
     ArrayList<LogEntry> result = new ArrayList<>();
-    MetadataTableUtil.getRootLogEntries(result);
+    MetadataTableUtil.getRootLogEntries(context, result);
     for (LogEntry logEntry : result) {
       getLogURIs(volumes, logEntry);
     }
@@ -75,7 +74,7 @@ private static void listZookeeper() throws Exception {
 
   }
 
-  private static void listTable(String name, ClientContext context) throws Exception {
+  private static void listTable(String name, ServerContext context) throws Exception {
 
     System.out.println("Listing volumes referenced in " + name + " tablets section");
 
@@ -135,8 +134,8 @@ private static void listTable(String name, ClientContext context) throws Excepti
       System.out.println("\tVolume : " + volume);
   }
 
-  public static void listVolumes(ClientContext context) throws Exception {
-    listZookeeper();
+  public static void listVolumes(ServerContext context) throws Exception {
+    listZookeeper(context);
     System.out.println();
     listTable(RootTable.NAME, context);
     System.out.println();
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/LoginProperties.java b/server/base/src/main/java/org/apache/accumulo/server/util/LoginProperties.java
index 01be21474f..bc1ca89c2f 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/LoginProperties.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/LoginProperties.java
@@ -46,8 +46,8 @@ public String description() {
 
   @Override
   public void execute(String[] args) throws Exception {
-    AccumuloConfiguration config = ServerContext.getInstance().getServerConfFactory()
-        .getSystemConfiguration();
+    ServerContext context = ServerContext.getInstance();
+    AccumuloConfiguration config = context.getServerConfFactory().getSystemConfiguration();
     Authenticator authenticator = AccumuloVFSClassLoader.getClassLoader()
         .loadClass(config.get(Property.INSTANCE_SECURITY_AUTHENTICATOR))
         .asSubclass(Authenticator.class).newInstance();
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java
index e2223f10af..2d4472bef5 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/MasterMetadataUtil.java
@@ -33,7 +33,6 @@
 
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.core.client.impl.ScannerImpl;
 import org.apache.accumulo.core.client.impl.Table;
 import org.apache.accumulo.core.data.Key;
@@ -52,6 +51,7 @@
 import org.apache.accumulo.core.util.ColumnFQ;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.fs.FileRef;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
@@ -67,7 +67,7 @@
 
   private static final Logger log = LoggerFactory.getLogger(MasterMetadataUtil.class);
 
-  public static void addNewTablet(ClientContext context, KeyExtent extent, String path,
+  public static void addNewTablet(ServerContext context, KeyExtent extent, String path,
       TServerInstance location, Map<FileRef,DataFileValue> datafileSizes,
       Map<Long,? extends Collection<FileRef>> bulkLoadedFiles, String time, long lastFlushID,
       long lastCompactID, ZooLock zooLock) {
@@ -101,7 +101,7 @@ public static void addNewTablet(ClientContext context, KeyExtent extent, String
     MetadataTableUtil.update(context, zooLock, m, extent);
   }
 
-  public static KeyExtent fixSplit(ClientContext context, Text metadataEntry,
+  public static KeyExtent fixSplit(ServerContext context, Text metadataEntry,
       SortedMap<ColumnFQ,Value> columns, TServerInstance tserver, ZooLock lock)
       throws AccumuloException, IOException {
     log.info("Incomplete split {} attempting to fix", metadataEntry);
@@ -148,7 +148,7 @@ public static KeyExtent fixSplit(ClientContext context, Text metadataEntry,
         time.toString(), initFlushID, initCompactID, lock);
   }
 
-  private static KeyExtent fixSplit(ClientContext context, Table.ID tableId, Text metadataEntry,
+  private static KeyExtent fixSplit(ServerContext context, Table.ID tableId, Text metadataEntry,
       Text metadataPrevEndRow, Value oper, double splitRatio, TServerInstance tserver, String time,
       long initFlushID, long initCompactID, ZooLock lock) throws AccumuloException, IOException {
     if (metadataPrevEndRow == null)
@@ -214,7 +214,7 @@ private static TServerInstance getTServerInstance(String address, ZooLock zooLoc
     }
   }
 
-  public static void replaceDatafiles(ClientContext context, KeyExtent extent,
+  public static void replaceDatafiles(ServerContext context, KeyExtent extent,
       Set<FileRef> datafilesToDelete, Set<FileRef> scanFiles, FileRef path, Long compactionId,
       DataFileValue size, String address, TServerInstance lastLocation, ZooLock zooLock)
       throws IOException {
@@ -222,7 +222,7 @@ public static void replaceDatafiles(ClientContext context, KeyExtent extent,
         address, lastLocation, zooLock, true);
   }
 
-  public static void replaceDatafiles(ClientContext context, KeyExtent extent,
+  public static void replaceDatafiles(ServerContext context, KeyExtent extent,
       Set<FileRef> datafilesToDelete, Set<FileRef> scanFiles, FileRef path, Long compactionId,
       DataFileValue size, String address, TServerInstance lastLocation, ZooLock zooLock,
       boolean insertDeleteFlags) throws IOException {
@@ -265,14 +265,14 @@ public static void replaceDatafiles(ClientContext context, KeyExtent extent,
    *          should be relative to the table directory
    *
    */
-  public static void updateTabletDataFile(ClientContext context, KeyExtent extent, FileRef path,
+  public static void updateTabletDataFile(ServerContext context, KeyExtent extent, FileRef path,
       FileRef mergeFile, DataFileValue dfv, String time, Set<FileRef> filesInUseByScans,
       String address, ZooLock zooLock, Set<String> unusedWalLogs, TServerInstance lastLocation,
       long flushId) {
     if (extent.isRootTablet()) {
       if (unusedWalLogs != null) {
-        updateRootTabletDataFile(extent, path, mergeFile, dfv, time, filesInUseByScans, address,
-            zooLock, unusedWalLogs, lastLocation, flushId);
+        updateRootTabletDataFile(context, extent, path, mergeFile, dfv, time, filesInUseByScans,
+            address, zooLock, unusedWalLogs, lastLocation, flushId);
       }
       return;
     }
@@ -284,11 +284,12 @@ public static void updateTabletDataFile(ClientContext context, KeyExtent extent,
   /**
    * Update the data file for the root tablet
    */
-  private static void updateRootTabletDataFile(KeyExtent extent, FileRef path, FileRef mergeFile,
-      DataFileValue dfv, String time, Set<FileRef> filesInUseByScans, String address,
-      ZooLock zooLock, Set<String> unusedWalLogs, TServerInstance lastLocation, long flushId) {
+  private static void updateRootTabletDataFile(ServerContext context, KeyExtent extent,
+      FileRef path, FileRef mergeFile, DataFileValue dfv, String time,
+      Set<FileRef> filesInUseByScans, String address, ZooLock zooLock, Set<String> unusedWalLogs,
+      TServerInstance lastLocation, long flushId) {
     IZooReaderWriter zk = ZooReaderWriter.getInstance();
-    String root = MetadataTableUtil.getZookeeperLogLocation();
+    String root = MetadataTableUtil.getZookeeperLogLocation(context);
     for (String entry : unusedWalLogs) {
       String[] parts = entry.split("/");
       String zpath = root + "/" + parts[parts.length - 1];
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
index e47a9e1b1d..4f0361b3a3 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
@@ -48,7 +48,6 @@
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.impl.BatchWriterImpl;
-import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.core.client.impl.Credentials;
 import org.apache.accumulo.core.client.impl.ScannerImpl;
 import org.apache.accumulo.core.client.impl.Table;
@@ -114,7 +113,7 @@
 
   private MetadataTableUtil() {}
 
-  public synchronized static Writer getMetadataTable(ClientContext context) {
+  public synchronized static Writer getMetadataTable(ServerContext context) {
     Credentials credentials = context.getCredentials();
     Writer metadataTable = metadata_tables.get(credentials);
     if (metadataTable == null) {
@@ -124,7 +123,7 @@ public synchronized static Writer getMetadataTable(ClientContext context) {
     return metadataTable;
   }
 
-  public synchronized static Writer getRootTable(ClientContext context) {
+  public synchronized static Writer getRootTable(ServerContext context) {
     Credentials credentials = context.getCredentials();
     Writer rootTable = root_tables.get(credentials);
     if (rootTable == null) {
@@ -134,23 +133,23 @@ public synchronized static Writer getRootTable(ClientContext context) {
     return rootTable;
   }
 
-  public static void putLockID(ZooLock zooLock, Mutation m) {
-    TabletsSection.ServerColumnFamily.LOCK_COLUMN.put(m, new Value(zooLock.getLockID()
-        .serialize(ServerContext.getInstance().getZooKeeperRoot() + "/").getBytes(UTF_8)));
+  public static void putLockID(ServerContext context, ZooLock zooLock, Mutation m) {
+    TabletsSection.ServerColumnFamily.LOCK_COLUMN.put(m,
+        new Value(zooLock.getLockID().serialize(context.getZooKeeperRoot() + "/").getBytes(UTF_8)));
   }
 
-  private static void update(ClientContext context, Mutation m, KeyExtent extent) {
+  private static void update(ServerContext context, Mutation m, KeyExtent extent) {
     update(context, null, m, extent);
   }
 
-  public static void update(ClientContext context, ZooLock zooLock, Mutation m, KeyExtent extent) {
+  public static void update(ServerContext context, ZooLock zooLock, Mutation m, KeyExtent extent) {
     Writer t = extent.isMeta() ? getRootTable(context) : getMetadataTable(context);
-    update(t, zooLock, m);
+    update(context, t, zooLock, m);
   }
 
-  public static void update(Writer t, ZooLock zooLock, Mutation m) {
+  public static void update(ServerContext context, Writer t, ZooLock zooLock, Mutation m) {
     if (zooLock != null)
-      putLockID(zooLock, m);
+      putLockID(context, zooLock, m);
     while (true) {
       try {
         t.update(m);
@@ -166,7 +165,7 @@ public static void update(Writer t, ZooLock zooLock, Mutation m) {
     }
   }
 
-  public static void updateTabletFlushID(KeyExtent extent, long flushID, ClientContext context,
+  public static void updateTabletFlushID(KeyExtent extent, long flushID, ServerContext context,
       ZooLock zooLock) {
     if (!extent.isRootTablet()) {
       Mutation m = new Mutation(extent.getMetadataEntry());
@@ -176,7 +175,7 @@ public static void updateTabletFlushID(KeyExtent extent, long flushID, ClientCon
     }
   }
 
-  public static void updateTabletCompactID(KeyExtent extent, long compactID, ClientContext context,
+  public static void updateTabletCompactID(KeyExtent extent, long compactID, ServerContext context,
       ZooLock zooLock) {
     if (!extent.isRootTablet()) {
       Mutation m = new Mutation(extent.getMetadataEntry());
@@ -187,7 +186,7 @@ public static void updateTabletCompactID(KeyExtent extent, long compactID, Clien
   }
 
   public static void updateTabletDataFile(long tid, KeyExtent extent,
-      Map<FileRef,DataFileValue> estSizes, String time, ClientContext context, ZooLock zooLock) {
+      Map<FileRef,DataFileValue> estSizes, String time, ServerContext context, ZooLock zooLock) {
     Mutation m = new Mutation(extent.getMetadataEntry());
     byte[] tidBytes = Long.toString(tid).getBytes(UTF_8);
 
@@ -200,14 +199,14 @@ public static void updateTabletDataFile(long tid, KeyExtent extent,
     update(context, zooLock, m, extent);
   }
 
-  public static void updateTabletDir(KeyExtent extent, String newDir, ClientContext context,
+  public static void updateTabletDir(KeyExtent extent, String newDir, ServerContext context,
       ZooLock lock) {
     Mutation m = new Mutation(extent.getMetadataEntry());
     TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(newDir.getBytes(UTF_8)));
     update(context, lock, m, extent);
   }
 
-  public static void addTablet(KeyExtent extent, String path, ClientContext context, char timeType,
+  public static void addTablet(KeyExtent extent, String path, ServerContext context, char timeType,
       ZooLock lock) {
     Mutation m = extent.getPrevRowUpdateMutation();
 
@@ -262,7 +261,7 @@ public static void updateTabletVolumes(KeyExtent extent, List<LogEntry> logsToRe
     void run(IZooReaderWriter rw) throws KeeperException, InterruptedException, IOException;
   }
 
-  private static void retryZooKeeperUpdate(ClientContext context, ZooLock zooLock,
+  private static void retryZooKeeperUpdate(ServerContext context, ZooLock zooLock,
       ZooOperation op) {
     while (true) {
       try {
@@ -284,7 +283,7 @@ private static void addRootLogEntry(ServerContext context, ZooLock zooLock,
       @Override
       public void run(IZooReaderWriter rw)
           throws KeeperException, InterruptedException, IOException {
-        String root = getZookeeperLogLocation();
+        String root = getZookeeperLogLocation(context);
         rw.putPersistentData(root + "/" + entry.getUniqueID(), entry.toBytes(),
             NodeExistsPolicy.OVERWRITE);
       }
@@ -292,7 +291,7 @@ public void run(IZooReaderWriter rw)
   }
 
   public static SortedMap<FileRef,DataFileValue> getDataFileSizes(KeyExtent extent,
-      ClientContext context) throws IOException {
+      ServerContext context) throws IOException {
     TreeMap<FileRef,DataFileValue> sizes = new TreeMap<>();
 
     try (Scanner mdScanner = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) {
@@ -316,7 +315,7 @@ public void run(IZooReaderWriter rw)
     }
   }
 
-  public static void rollBackSplit(Text metadataEntry, Text oldPrevEndRow, ClientContext context,
+  public static void rollBackSplit(Text metadataEntry, Text oldPrevEndRow, ServerContext context,
       ZooLock zooLock) {
     KeyExtent ke = new KeyExtent(metadataEntry, oldPrevEndRow);
     Mutation m = ke.getPrevRowUpdateMutation();
@@ -326,7 +325,7 @@ public static void rollBackSplit(Text metadataEntry, Text oldPrevEndRow, ClientC
   }
 
   public static void splitTablet(KeyExtent extent, Text oldPrevEndRow, double splitRatio,
-      ClientContext context, ZooLock zooLock) {
+      ServerContext context, ZooLock zooLock) {
     Mutation m = extent.getPrevRowUpdateMutation(); //
 
     TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.put(m,
@@ -339,7 +338,7 @@ public static void splitTablet(KeyExtent extent, Text oldPrevEndRow, double spli
   }
 
   public static void finishSplit(Text metadataEntry, Map<FileRef,DataFileValue> datafileSizes,
-      List<FileRef> highDatafilesToRemove, final ClientContext context, ZooLock zooLock) {
+      List<FileRef> highDatafilesToRemove, final ServerContext context, ZooLock zooLock) {
     Mutation m = new Mutation(metadataEntry);
     TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.putDelete(m);
     TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.putDelete(m);
@@ -357,12 +356,12 @@ public static void finishSplit(Text metadataEntry, Map<FileRef,DataFileValue> da
   }
 
   public static void finishSplit(KeyExtent extent, Map<FileRef,DataFileValue> datafileSizes,
-      List<FileRef> highDatafilesToRemove, ClientContext context, ZooLock zooLock) {
+      List<FileRef> highDatafilesToRemove, ServerContext context, ZooLock zooLock) {
     finishSplit(extent.getMetadataEntry(), datafileSizes, highDatafilesToRemove, context, zooLock);
   }
 
   public static void addDeleteEntries(KeyExtent extent, Set<FileRef> datafilesToDelete,
-      ClientContext context) throws IOException {
+      ServerContext context) throws IOException {
 
     Table.ID tableId = extent.getTableId();
 
@@ -387,7 +386,7 @@ public static Mutation createDeleteMutation(Table.ID tableId, String pathToRemov
   }
 
   public static void removeScanFiles(KeyExtent extent, Set<FileRef> scanFiles,
-      ClientContext context, ZooLock zooLock) {
+      ServerContext context, ZooLock zooLock) {
     Mutation m = new Mutation(extent.getMetadataEntry());
 
     for (FileRef pathToRemove : scanFiles)
@@ -445,7 +444,7 @@ public static void splitDatafiles(Text midRow, double splitRatio,
     }
   }
 
-  public static void deleteTable(Table.ID tableId, boolean insertDeletes, ClientContext context,
+  public static void deleteTable(Table.ID tableId, boolean insertDeletes, ServerContext context,
       ZooLock lock) throws AccumuloException, IOException {
     try (Scanner ms = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY);
         BatchWriter bw = new BatchWriterImpl(context, MetadataTable.ID,
@@ -486,14 +485,14 @@ public static void deleteTable(Table.ID tableId, boolean insertDeletes, ClientCo
         if (m == null) {
           m = new Mutation(key.getRow());
           if (lock != null)
-            putLockID(lock, m);
+            putLockID(context, lock, m);
         }
 
         if (key.getRow().compareTo(m.getRow(), 0, m.getRow().length) != 0) {
           bw.addMutation(m);
           m = new Mutation(key.getRow());
           if (lock != null)
-            putLockID(lock, m);
+            putLockID(context, lock, m);
         }
         m.putDelete(key.getColumnFamily(), key.getColumnQualifier());
       }
@@ -503,13 +502,13 @@ public static void deleteTable(Table.ID tableId, boolean insertDeletes, ClientCo
     }
   }
 
-  static String getZookeeperLogLocation() {
-    return ServerContext.getInstance().getZooKeeperRoot() + RootTable.ZROOT_TABLET_WALOGS;
+  static String getZookeeperLogLocation(ServerContext context) {
+    return context.getZooKeeperRoot() + RootTable.ZROOT_TABLET_WALOGS;
   }
 
-  public static void setRootTabletDir(String dir) throws IOException {
+  public static void setRootTabletDir(ServerContext context, String dir) throws IOException {
     IZooReaderWriter zoo = ZooReaderWriter.getInstance();
-    String zpath = ServerContext.getInstance().getZooKeeperRoot() + RootTable.ZROOT_TABLET_PATH;
+    String zpath = context.getZooKeeperRoot() + RootTable.ZROOT_TABLET_PATH;
     try {
       zoo.putPersistentData(zpath, dir.getBytes(UTF_8), -1, NodeExistsPolicy.OVERWRITE);
     } catch (KeeperException e) {
@@ -520,9 +519,9 @@ public static void setRootTabletDir(String dir) throws IOException {
     }
   }
 
-  public static String getRootTabletDir() throws IOException {
+  public static String getRootTabletDir(ServerContext context) throws IOException {
     IZooReaderWriter zoo = ZooReaderWriter.getInstance();
-    String zpath = ServerContext.getInstance().getZooKeeperRoot() + RootTable.ZROOT_TABLET_PATH;
+    String zpath = context.getZooKeeperRoot() + RootTable.ZROOT_TABLET_PATH;
     try {
       return new String(zoo.getData(zpath, null), UTF_8);
     } catch (KeeperException e) {
@@ -534,15 +533,15 @@ public static String getRootTabletDir() throws IOException {
   }
 
   public static Pair<List<LogEntry>,SortedMap<FileRef,DataFileValue>> getFileAndLogEntries(
-      ClientContext context, KeyExtent extent)
+      ServerContext context, KeyExtent extent)
       throws KeeperException, InterruptedException, IOException {
     ArrayList<LogEntry> result = new ArrayList<>();
     TreeMap<FileRef,DataFileValue> sizes = new TreeMap<>();
 
     VolumeManager fs = VolumeManagerImpl.get();
     if (extent.isRootTablet()) {
-      getRootLogEntries(result);
-      Path rootDir = new Path(getRootTabletDir());
+      getRootLogEntries(context, result);
+      Path rootDir = new Path(getRootTabletDir(context));
       FileStatus[] files = fs.listStatus(rootDir);
       for (FileStatus fileStatus : files) {
         if (fileStatus.getPath().toString().endsWith("_tmp")) {
@@ -580,13 +579,13 @@ public static String getRootTabletDir() throws IOException {
     return new Pair<>(result, sizes);
   }
 
-  public static List<LogEntry> getLogEntries(ClientContext context, KeyExtent extent)
+  public static List<LogEntry> getLogEntries(ServerContext context, KeyExtent extent)
       throws IOException, KeeperException, InterruptedException {
     log.info("Scanning logging entries for {}", extent);
     ArrayList<LogEntry> result = new ArrayList<>();
     if (extent.equals(RootTable.EXTENT)) {
       log.info("Getting logs for root tablet from zookeeper");
-      getRootLogEntries(result);
+      getRootLogEntries(context, result);
     } else {
       log.info("Scanning metadata for logs used for tablet {}", extent);
       Scanner scanner = getTabletLogScanner(context, extent);
@@ -605,10 +604,10 @@ public static String getRootTabletDir() throws IOException {
     return result;
   }
 
-  static void getRootLogEntries(final ArrayList<LogEntry> result)
+  static void getRootLogEntries(ServerContext context, final ArrayList<LogEntry> result)
       throws KeeperException, InterruptedException, IOException {
     IZooReaderWriter zoo = ZooReaderWriter.getInstance();
-    String root = getZookeeperLogLocation();
+    String root = getZookeeperLogLocation(context);
     // there's a little race between getting the children and fetching
     // the data. The log can be removed in between.
     while (true) {
@@ -627,7 +626,7 @@ static void getRootLogEntries(final ArrayList<LogEntry> result)
     }
   }
 
-  private static Scanner getTabletLogScanner(ClientContext context, KeyExtent extent) {
+  private static Scanner getTabletLogScanner(ServerContext context, KeyExtent extent) {
     Table.ID tableId = MetadataTable.ID;
     if (extent.isMeta())
       tableId = RootTable.ID;
@@ -646,7 +645,7 @@ private static Scanner getTabletLogScanner(ClientContext context, KeyExtent exte
     Iterator<LogEntry> rootTableEntries = null;
     Iterator<Entry<Key,Value>> metadataEntries = null;
 
-    LogEntryIterator(ClientContext context)
+    LogEntryIterator(ServerContext context)
         throws IOException, KeeperException, InterruptedException {
       zookeeperEntries = getLogEntries(context, RootTable.EXTENT).iterator();
       rootTableEntries = getLogEntries(context, new KeyExtent(MetadataTable.ID, null, null))
@@ -686,7 +685,7 @@ public void remove() {
     }
   }
 
-  public static Iterator<LogEntry> getLogEntries(ClientContext context)
+  public static Iterator<LogEntry> getLogEntries(ServerContext context)
       throws IOException, KeeperException, InterruptedException {
     return new LogEntryIterator(context);
   }
@@ -698,7 +697,7 @@ public static void removeUnusedWALEntries(ServerContext context, KeyExtent exten
         @Override
         public void run(IZooReaderWriter rw)
             throws KeeperException, InterruptedException, IOException {
-          String root = getZookeeperLogLocation();
+          String root = getZookeeperLogLocation(context);
           for (LogEntry entry : entries) {
             String path = root + "/" + entry.getUniqueID();
             log.debug("Removing " + path + " from zookeeper");
@@ -879,7 +878,7 @@ public static int checkClone(String testTableName, Table.ID srcTableId, Table.ID
     return rewrites;
   }
 
-  public static void cloneTable(ClientContext context, Table.ID srcTableId, Table.ID tableId,
+  public static void cloneTable(ServerContext context, Table.ID srcTableId, Table.ID tableId,
       VolumeManager volumeManager) throws Exception {
 
     Connector conn = context.getConnector();
@@ -988,7 +987,7 @@ public static void removeBulkLoadEntries(Connector conn, Table.ID tableId, long
     }
   }
 
-  public static Map<Long,? extends Collection<FileRef>> getBulkFilesLoaded(ClientContext context,
+  public static Map<Long,? extends Collection<FileRef>> getBulkFilesLoaded(ServerContext context,
       KeyExtent extent) throws IOException {
     Text metadataRow = extent.getMetadataEntry();
     Map<Long,List<FileRef>> result = new HashMap<>();
@@ -1035,7 +1034,7 @@ public static void removeBulkLoadInProgressFlag(ServerContext context, String pa
   /**
    * During an upgrade from 1.6 to 1.7, we need to add the replication table
    */
-  public static void createReplicationTable(ClientContext context) throws IOException {
+  public static void createReplicationTable(ServerContext context) throws IOException {
 
     VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(ReplicationTable.ID);
     String dir = VolumeManagerImpl.get().choose(chooserEnv, ServerConstants.getBaseUris())
@@ -1049,14 +1048,14 @@ public static void createReplicationTable(ClientContext context) throws IOExcept
         new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes(UTF_8)));
     m.put(PREV_ROW_COLUMN.getColumnFamily(), PREV_ROW_COLUMN.getColumnQualifier(), 0,
         KeyExtent.encodePrevEndRow(null));
-    update(getMetadataTable(context), null, m);
+    update(context, getMetadataTable(context), null, m);
   }
 
   /**
    * During an upgrade we need to move deletion requests for files under the !METADATA table to the
    * root tablet.
    */
-  public static void moveMetaDeleteMarkers(ClientContext context) {
+  public static void moveMetaDeleteMarkers(ServerContext context) {
     String oldDeletesPrefix = "!!~del";
     Range oldDeletesRange = new Range(oldDeletesPrefix, true, "!!~dem", false);
 
@@ -1075,7 +1074,7 @@ public static void moveMetaDeleteMarkers(ClientContext context) {
     }
   }
 
-  public static void moveMetaDeleteMarkersFrom14(ClientContext context) {
+  public static void moveMetaDeleteMarkersFrom14(ServerContext context) {
     // new KeyExtent is only added to force update to write to the metadata table, not the root
     // table
     KeyExtent notMetadata = new KeyExtent(Table.ID.of("anythingNotMetadata"), null, null);
@@ -1096,7 +1095,7 @@ public static void moveMetaDeleteMarkersFrom14(ClientContext context) {
     }
   }
 
-  private static void moveDeleteEntry(ClientContext context, KeyExtent oldExtent,
+  private static void moveDeleteEntry(ServerContext context, KeyExtent oldExtent,
       Entry<Key,Value> entry, String rowID, String prefix) {
     String filename = rowID.substring(prefix.length());
 
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/NamespacePropUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/NamespacePropUtil.java
index 6ef9d9b815..5dec31578a 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/NamespacePropUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/NamespacePropUtil.java
@@ -28,14 +28,14 @@
 import org.apache.zookeeper.KeeperException;
 
 public class NamespacePropUtil {
-  public static boolean setNamespaceProperty(Namespace.ID namespaceId, String property,
-      String value) throws KeeperException, InterruptedException {
+  public static boolean setNamespaceProperty(ServerContext context, Namespace.ID namespaceId,
+      String property, String value) throws KeeperException, InterruptedException {
     if (!isPropertyValid(property, value))
       return false;
 
     // create the zk node for per-namespace properties for this namespace if it doesn't already
     // exist
-    String zkNamespacePath = getPath(namespaceId);
+    String zkNamespacePath = getPath(context, namespaceId);
     ZooReaderWriter.getInstance().putPersistentData(zkNamespacePath, new byte[0],
         NodeExistsPolicy.SKIP);
 
@@ -53,14 +53,14 @@ public static boolean isPropertyValid(String property, String value) {
         && Property.isValidTablePropertyKey(property);
   }
 
-  public static void removeNamespaceProperty(Namespace.ID namespaceId, String property)
-      throws InterruptedException, KeeperException {
-    String zPath = getPath(namespaceId) + "/" + property;
+  public static void removeNamespaceProperty(ServerContext context, Namespace.ID namespaceId,
+      String property) throws InterruptedException, KeeperException {
+    String zPath = getPath(context, namespaceId) + "/" + property;
     ZooReaderWriter.getInstance().recursiveDelete(zPath, NodeMissingPolicy.SKIP);
   }
 
-  private static String getPath(Namespace.ID namespaceId) {
-    return ServerContext.getInstance().getZooKeeperRoot() + Constants.ZNAMESPACES + "/"
-        + namespaceId + Constants.ZNAMESPACE_CONF;
+  private static String getPath(ServerContext context, Namespace.ID namespaceId) {
+    return context.getZooKeeperRoot() + Constants.ZNAMESPACES + "/" + namespaceId
+        + Constants.ZNAMESPACE_CONF;
   }
 }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java b/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java
index af290b256f..50916eea3d 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java
@@ -44,7 +44,6 @@
 import org.apache.accumulo.server.fs.VolumeChooserEnvironment;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
-import org.apache.accumulo.server.tables.TableManager;
 import org.apache.hadoop.fs.Path;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -55,15 +54,15 @@
   public static void main(String[] args) throws AccumuloException, AccumuloSecurityException {
     ClientOnRequiredTable opts = new ClientOnRequiredTable();
     opts.parseArgs(RandomizeVolumes.class.getName(), args);
+    ServerContext context = opts.getServerContext();
     Connector c;
     if (opts.getToken() == null) {
-      ServerContext context = opts.getServerContext();
       c = context.getConnector();
     } else {
       c = opts.getConnector();
     }
     try {
-      int status = randomize(c, opts.getTableName());
+      int status = randomize(context, c, opts.getTableName());
       System.exit(status);
     } catch (Exception ex) {
       log.error("{}", ex.getMessage(), ex);
@@ -71,7 +70,7 @@ public static void main(String[] args) throws AccumuloException, AccumuloSecurit
     }
   }
 
-  public static int randomize(Connector c, String tableName)
+  public static int randomize(ServerContext context, Connector c, String tableName)
       throws IOException, AccumuloSecurityException, AccumuloException, TableNotFoundException {
     final VolumeManager vm = VolumeManagerImpl.get();
     if (vm.getVolumes().size() < 2) {
@@ -84,7 +83,7 @@ public static int randomize(Connector c, String tableName)
       return 2;
     }
     Table.ID tableId = Table.ID.of(tblStr);
-    TableState tableState = TableManager.getInstance().getTableState(tableId);
+    TableState tableState = context.getTableManager().getTableState(tableId);
     if (TableState.OFFLINE != tableState) {
       log.info("Taking {} offline", tableName);
       c.tableOperations().offline(tableName, true);
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/SystemPropUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/SystemPropUtil.java
index 1fdd4be27a..32c8a225a9 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/SystemPropUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/SystemPropUtil.java
@@ -30,9 +30,10 @@
 import org.slf4j.LoggerFactory;
 
 public class SystemPropUtil {
+
   private static final Logger log = LoggerFactory.getLogger(SystemPropUtil.class);
 
-  public static boolean setSystemProperty(String property, String value)
+  public static boolean setSystemProperty(ServerContext context, String property, String value)
       throws KeeperException, InterruptedException {
     if (!Property.isValidZooPropertyKey(property)) {
       IllegalArgumentException iae = new IllegalArgumentException(
@@ -60,17 +61,15 @@ public static boolean setSystemProperty(String property, String value)
     }
 
     // create the zk node for this property and set it's data to the specified value
-    String zPath = ServerContext.getInstance().getZooKeeperRoot() + Constants.ZCONFIG + "/"
-        + property;
+    String zPath = context.getZooKeeperRoot() + Constants.ZCONFIG + "/" + property;
 
     return ZooReaderWriter.getInstance().putPersistentData(zPath, value.getBytes(UTF_8),
         NodeExistsPolicy.OVERWRITE);
   }
 
-  public static void removeSystemProperty(String property)
+  public static void removeSystemProperty(ServerContext context, String property)
       throws InterruptedException, KeeperException {
-    String zPath = ServerContext.getInstance().getZooKeeperRoot() + Constants.ZCONFIG + "/"
-        + property;
+    String zPath = context.getZooKeeperRoot() + Constants.ZCONFIG + "/" + property;
     ZooReaderWriter.getInstance().recursiveDelete(zPath, NodeMissingPolicy.FAIL);
   }
 }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/TablePropUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/TablePropUtil.java
index 37ec6b34fc..ab5afb4ccd 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/TablePropUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/TablePropUtil.java
@@ -28,13 +28,19 @@
 import org.apache.zookeeper.KeeperException;
 
 public class TablePropUtil {
-  public static boolean setTableProperty(Table.ID tableId, String property, String value)
-      throws KeeperException, InterruptedException {
+
+  public static boolean setTableProperty(ServerContext context, Table.ID tableId, String property,
+      String value) throws KeeperException, InterruptedException {
+    return setTableProperty(context.getZooKeeperRoot(), tableId, property, value);
+  }
+
+  public static boolean setTableProperty(String zkRoot, Table.ID tableId, String property,
+      String value) throws KeeperException, InterruptedException {
     if (!isPropertyValid(property, value))
       return false;
 
     // create the zk node for per-table properties for this table if it doesn't already exist
-    String zkTablePath = getTablePath(tableId);
+    String zkTablePath = getTablePath(zkRoot, tableId);
     ZooReaderWriter.getInstance().putPersistentData(zkTablePath, new byte[0],
         NodeExistsPolicy.SKIP);
 
@@ -52,14 +58,13 @@ public static boolean isPropertyValid(String property, String value) {
         && Property.isValidTablePropertyKey(property);
   }
 
-  public static void removeTableProperty(Table.ID tableId, String property)
+  public static void removeTableProperty(ServerContext context, Table.ID tableId, String property)
       throws InterruptedException, KeeperException {
-    String zPath = getTablePath(tableId) + "/" + property;
+    String zPath = getTablePath(context.getZooKeeperRoot(), tableId) + "/" + property;
     ZooReaderWriter.getInstance().recursiveDelete(zPath, NodeMissingPolicy.SKIP);
   }
 
-  private static String getTablePath(Table.ID tableId) {
-    return ServerContext.getInstance().getZooKeeperRoot() + Constants.ZTABLES + "/"
-        + tableId.canonicalID() + Constants.ZTABLE_CONF;
+  private static String getTablePath(String zkRoot, Table.ID tableId) {
+    return zkRoot + Constants.ZTABLES + "/" + tableId.canonicalID() + Constants.ZTABLE_CONF;
   }
 }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/zookeeper/TransactionWatcher.java b/server/base/src/main/java/org/apache/accumulo/server/zookeeper/TransactionWatcher.java
index b65a40021f..4cbe48a24b 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/zookeeper/TransactionWatcher.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/zookeeper/TransactionWatcher.java
@@ -31,8 +31,13 @@
 public class TransactionWatcher extends org.apache.accumulo.fate.zookeeper.TransactionWatcher {
   public static class ZooArbitrator implements Arbitrator {
 
-    private static ServerContext context = ServerContext.getInstance();
-    ZooReader rdr = new ZooReader(context.getZooKeepers(), context.getZooKeepersSessionTimeOut());
+    private ServerContext context;
+    private ZooReader rdr;
+
+    public ZooArbitrator(ServerContext context) {
+      this.context = context;
+      rdr = new ZooReader(context.getZooKeepers(), context.getZooKeepersSessionTimeOut());
+    }
 
     @Override
     public boolean transactionAlive(String type, long tid) throws Exception {
@@ -41,7 +46,8 @@ public boolean transactionAlive(String type, long tid) throws Exception {
       return rdr.exists(path);
     }
 
-    public static void start(String type, long tid) throws KeeperException, InterruptedException {
+    public static void start(ServerContext context, String type, long tid)
+        throws KeeperException, InterruptedException {
       IZooReaderWriter writer = ZooReaderWriter.getInstance();
       writer.putPersistentData(context.getZooKeeperRoot() + "/" + type, new byte[] {},
           NodeExistsPolicy.OVERWRITE);
@@ -51,13 +57,15 @@ public static void start(String type, long tid) throws KeeperException, Interrup
           new byte[] {}, NodeExistsPolicy.OVERWRITE);
     }
 
-    public static void stop(String type, long tid) throws KeeperException, InterruptedException {
+    public static void stop(ServerContext context, String type, long tid)
+        throws KeeperException, InterruptedException {
       IZooReaderWriter writer = ZooReaderWriter.getInstance();
       writer.recursiveDelete(context.getZooKeeperRoot() + "/" + type + "/" + tid,
           NodeMissingPolicy.SKIP);
     }
 
-    public static void cleanup(String type, long tid) throws KeeperException, InterruptedException {
+    public static void cleanup(ServerContext context, String type, long tid)
+        throws KeeperException, InterruptedException {
       IZooReaderWriter writer = ZooReaderWriter.getInstance();
       writer.recursiveDelete(context.getZooKeeperRoot() + "/" + type + "/" + tid,
           NodeMissingPolicy.SKIP);
@@ -65,7 +73,7 @@ public static void cleanup(String type, long tid) throws KeeperException, Interr
           NodeMissingPolicy.SKIP);
     }
 
-    public static Set<Long> allTransactionsAlive(String type)
+    public static Set<Long> allTransactionsAlive(ServerContext context, String type)
         throws KeeperException, InterruptedException {
       final IZooReader reader = ZooReaderWriter.getInstance();
       final Set<Long> result = new HashSet<>();
@@ -89,7 +97,7 @@ public boolean transactionComplete(String type, long tid) throws Exception {
     }
   }
 
-  public TransactionWatcher() {
-    super(new ZooArbitrator());
+  public TransactionWatcher(ServerContext context) {
+    super(new ZooArbitrator(context));
   }
 }
diff --git a/server/base/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java b/server/base/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java
index 8b5e7b912c..6e9733e3c8 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java
@@ -29,9 +29,11 @@
 import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import org.apache.accumulo.fate.zookeeper.TransactionWatcher.Arbitrator;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
+import org.easymock.EasyMock;
 import org.junit.Test;
 
 public class MetadataConstraintsTest {
@@ -62,6 +64,8 @@ public void testCheck() {
     Mutation m = new Mutation(new Text("0;foo"));
     TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m, new Value("1foo".getBytes()));
 
+    ServerContext context = EasyMock.createMock(ServerContext.class);
+
     MetadataConstraints mc = new MetadataConstraints();
 
     List<Short> violations = mc.check(null, m);
diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java b/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java
index 6527283371..aa777e2eb8 100644
--- a/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java
+++ b/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java
@@ -90,7 +90,6 @@
 import org.apache.accumulo.server.rpc.TCredentialsUpdatingWrapper;
 import org.apache.accumulo.server.rpc.TServerUtils;
 import org.apache.accumulo.server.rpc.ThriftServerType;
-import org.apache.accumulo.server.tables.TableManager;
 import org.apache.accumulo.server.util.Halt;
 import org.apache.accumulo.server.zookeeper.ZooLock;
 import org.apache.hadoop.fs.FileStatus;
@@ -403,8 +402,8 @@ public void run() {
                 if (parts.length > 2) {
                   Table.ID tableId = Table.ID.of(parts[1]);
                   String tabletDir = parts[2];
-                  TableManager.getInstance().updateTableStateCache(tableId);
-                  TableState tableState = TableManager.getInstance().getTableState(tableId);
+                  context.getTableManager().updateTableStateCache(tableId);
+                  TableState tableState = context.getTableManager().getTableState(tableId);
                   if (tableState != null && tableState != TableState.DELETING) {
                     // clone directories don't always exist
                     if (!tabletDir.startsWith(Constants.CLONE_PREFIX))
diff --git a/server/master/src/main/java/org/apache/accumulo/master/Master.java b/server/master/src/main/java/org/apache/accumulo/master/Master.java
index b22320a742..28b7724242 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/Master.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/Master.java
@@ -333,7 +333,7 @@ private void upgradeZookeeper() {
             + " initialized prior to the Master transitioning to active. Please"
             + " save all logs and file a bug.");
       }
-      Accumulo.abortIfFateTransactions();
+      Accumulo.abortIfFateTransactions(getContext());
       try {
         log.info("Upgrading zookeeper");
 
@@ -417,7 +417,7 @@ private void upgradeZookeeper() {
         log.debug("Upgrade creating table {} (ID: {})", RootTable.NAME, RootTable.ID);
         TableManager.prepareNewTableState(getInstanceID(), RootTable.ID, Namespace.ID.ACCUMULO,
             RootTable.NAME, TableState.ONLINE, NodeExistsPolicy.SKIP);
-        Initialize.initSystemTablesConfig();
+        Initialize.initSystemTablesConfig(context.getZooKeeperRoot());
         // ensure root user can flush root table
         security.grantTablePermission(context.rpcCreds(), security.getRootUsername(), RootTable.ID,
             TablePermission.ALTER_TABLE, Namespace.ID.ACCUMULO);
@@ -587,7 +587,7 @@ int displayUnassigned() {
       case NORMAL:
         // Count offline tablets for online tables
         for (TabletGroupWatcher watcher : watchers) {
-          TableManager manager = TableManager.getInstance();
+          TableManager manager = context.getTableManager();
           for (Entry<Table.ID,TableCounts> entry : watcher.getStats().entrySet()) {
             Table.ID tableId = entry.getKey();
             TableCounts counts = entry.getValue();
@@ -630,6 +630,10 @@ public ServerContext getContext() {
     return context;
   }
 
+  public TableManager getTableManager() {
+    return context.getTableManager();
+  }
+
   public Connector getConnector() throws AccumuloSecurityException, AccumuloException {
     return context.getConnector();
   }
@@ -848,7 +852,7 @@ TabletGoalState getSystemGoalState(TabletLocationState tls) {
   }
 
   TabletGoalState getTableGoalState(KeyExtent extent) {
-    TableState tableState = TableManager.getInstance().getTableState(extent.getTableId());
+    TableState tableState = context.getTableManager().getTableState(extent.getTableId());
     if (tableState == null)
       return TabletGoalState.DELETED;
     switch (tableState) {
@@ -953,7 +957,7 @@ private void cleanupNonexistentMigrations(final Connector connector)
      * tablet server will load the tablet. check for offline tables and remove their migrations.
      */
     private void cleanupOfflineMigrations() {
-      TableManager manager = TableManager.getInstance();
+      TableManager manager = context.getTableManager();
       for (Table.ID tableId : Tables.getIdToNameMap(context).keySet()) {
         TableState state = manager.getTableState(tableId);
         if (TableState.OFFLINE == state) {
@@ -1277,7 +1281,7 @@ public void run() throws IOException, InterruptedException, KeeperException {
 
     recoveryManager = new RecoveryManager(this);
 
-    TableManager.getInstance().addObserver(this);
+    context.getTableManager().addObserver(this);
 
     StatusThread statusThread = new StatusThread();
     statusThread.start();
@@ -1649,7 +1653,7 @@ public void sessionExpired() {}
         result.add(RootTable.ID);
       return result;
     }
-    TableManager manager = TableManager.getInstance();
+    TableManager manager = context.getTableManager();
 
     for (Table.ID tableId : Tables.getIdToNameMap(context).keySet()) {
       TableState state = manager.getTableState(tableId);
diff --git a/server/master/src/main/java/org/apache/accumulo/master/MasterClientServiceHandler.java b/server/master/src/main/java/org/apache/accumulo/master/MasterClientServiceHandler.java
index b49de608b9..3d070ee337 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/MasterClientServiceHandler.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/MasterClientServiceHandler.java
@@ -402,7 +402,7 @@ public void removeSystemProperty(TInfo info, TCredentials c, String property)
     master.security.canPerformSystemActions(c);
 
     try {
-      SystemPropUtil.removeSystemProperty(property);
+      SystemPropUtil.removeSystemProperty(master.getContext(), property);
       updatePlugins(property);
     } catch (Exception e) {
       Master.log.error("Problem removing config property in zookeeper", e);
@@ -416,7 +416,7 @@ public void setSystemProperty(TInfo info, TCredentials c, String property, Strin
     master.security.canPerformSystemActions(c);
 
     try {
-      SystemPropUtil.setSystemProperty(property, value);
+      SystemPropUtil.setSystemProperty(master.getContext(), property, value);
       updatePlugins(property);
     } catch (IllegalArgumentException iae) {
       // throw the exception here so it is not caught and converted to a generic TException
@@ -451,9 +451,9 @@ private void alterNamespaceProperty(TCredentials c, String namespace, String pro
 
     try {
       if (value == null) {
-        NamespacePropUtil.removeNamespaceProperty(namespaceId, property);
+        NamespacePropUtil.removeNamespaceProperty(master.getContext(), namespaceId, property);
       } else {
-        NamespacePropUtil.setNamespaceProperty(namespaceId, property, value);
+        NamespacePropUtil.setNamespaceProperty(master.getContext(), namespaceId, property, value);
       }
     } catch (KeeperException.NoNodeException e) {
       // race condition... namespace no longer exists? This call will throw an exception if the
@@ -478,8 +478,8 @@ private void alterTableProperty(TCredentials c, String tableName, String propert
 
     try {
       if (value == null || value.isEmpty()) {
-        TablePropUtil.removeTableProperty(tableId, property);
-      } else if (!TablePropUtil.setTableProperty(tableId, property, value)) {
+        TablePropUtil.removeTableProperty(master.getContext(), tableId, property);
+      } else if (!TablePropUtil.setTableProperty(master.getContext(), tableId, property, value)) {
         throw new Exception("Invalid table property.");
       }
     } catch (KeeperException.NoNodeException e) {
diff --git a/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java b/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java
index d96874b949..52447b44b9 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java
@@ -87,7 +87,6 @@
 import org.apache.accumulo.server.master.state.TabletLocationState.BadLocationStateException;
 import org.apache.accumulo.server.master.state.TabletState;
 import org.apache.accumulo.server.master.state.TabletStateStore;
-import org.apache.accumulo.server.tables.TableManager;
 import org.apache.accumulo.server.tablets.TabletTime;
 import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
@@ -205,7 +204,7 @@ public void run() {
           }
           Master.log.debug("{} location State: {}", store.name(), tls);
           // ignore entries for tables that do not exist in zookeeper
-          if (TableManager.getInstance().getTableState(tls.extent.getTableId()) == null)
+          if (master.getTableManager().getTableState(tls.extent.getTableId()) == null)
             continue;
 
           if (Master.log.isTraceEnabled())
@@ -415,7 +414,7 @@ public void run() {
 
   private void cancelOfflineTableMigrations(TabletLocationState tls) {
     TServerInstance dest = this.master.migrations.get(tls.extent);
-    TableState tableState = TableManager.getInstance().getTableState(tls.extent.getTableId());
+    TableState tableState = master.getTableManager().getTableState(tls.extent.getTableId());
     if (dest != null && tableState == TableState.OFFLINE) {
       this.master.migrations.remove(tls.extent);
     }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CancelCompactions.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CancelCompactions.java
index 25322474fe..e3f68edc65 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CancelCompactions.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CancelCompactions.java
@@ -41,8 +41,8 @@ public CancelCompactions(Namespace.ID namespaceId, Table.ID tableId) {
 
   @Override
   public long isReady(long tid, Master env) throws Exception {
-    return Utils.reserveNamespace(namespaceId, tid, false, true, TableOperation.COMPACT_CANCEL)
-        + Utils.reserveTable(tableId, tid, false, true, TableOperation.COMPACT_CANCEL);
+    return Utils.reserveNamespace(env, namespaceId, tid, false, true, TableOperation.COMPACT_CANCEL)
+        + Utils.reserveTable(env, tableId, tid, false, true, TableOperation.COMPACT_CANCEL);
   }
 
   @Override
@@ -77,7 +77,7 @@ public long isReady(long tid, Master env) throws Exception {
 
   @Override
   public void undo(long tid, Master env) throws Exception {
-    Utils.unreserveTable(tableId, tid, false);
-    Utils.unreserveNamespace(namespaceId, tid, false);
+    Utils.unreserveTable(env, tableId, tid, false);
+    Utils.unreserveNamespace(env, namespaceId, tid, false);
   }
 }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ChangeTableState.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ChangeTableState.java
index d7b6e8d5dd..ee4801022c 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ChangeTableState.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ChangeTableState.java
@@ -22,7 +22,6 @@
 import org.apache.accumulo.core.master.state.tables.TableState;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.master.Master;
-import org.apache.accumulo.server.tables.TableManager;
 import org.slf4j.LoggerFactory;
 
 public class ChangeTableState extends MasterRepo {
@@ -45,8 +44,8 @@ public ChangeTableState(Namespace.ID namespaceId, Table.ID tableId, TableOperati
   public long isReady(long tid, Master env) throws Exception {
     // reserve the table so that this op does not run concurrently with create, clone, or delete
     // table
-    return Utils.reserveNamespace(namespaceId, tid, false, true, top)
-        + Utils.reserveTable(tableId, tid, true, true, top);
+    return Utils.reserveNamespace(env, namespaceId, tid, false, true, top)
+        + Utils.reserveTable(env, tableId, tid, true, true, top);
   }
 
   @Override
@@ -55,9 +54,9 @@ public long isReady(long tid, Master env) throws Exception {
     if (top == TableOperation.OFFLINE)
       ts = TableState.OFFLINE;
 
-    TableManager.getInstance().transitionTableState(tableId, ts);
-    Utils.unreserveNamespace(namespaceId, tid, false);
-    Utils.unreserveTable(tableId, tid, true);
+    env.getTableManager().transitionTableState(tableId, ts);
+    Utils.unreserveNamespace(env, namespaceId, tid, false);
+    Utils.unreserveTable(env, tableId, tid, true);
     LoggerFactory.getLogger(ChangeTableState.class).debug("Changed table state {} {}", tableId, ts);
     env.getEventCoordinator().event("Set table state of %s to %s", tableId, ts);
     return null;
@@ -65,7 +64,7 @@ public long isReady(long tid, Master env) throws Exception {
 
   @Override
   public void undo(long tid, Master env) throws Exception {
-    Utils.unreserveNamespace(namespaceId, tid, false);
-    Utils.unreserveTable(tableId, tid, true);
+    Utils.unreserveNamespace(env, namespaceId, tid, false);
+    Utils.unreserveTable(env, tableId, tid, true);
   }
 }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java
index 81cf7b44da..debda60aba 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CleanUp.java
@@ -47,7 +47,6 @@
 import org.apache.accumulo.server.master.state.TabletState;
 import org.apache.accumulo.server.problems.ProblemReports;
 import org.apache.accumulo.server.security.AuditedSecurityOperation;
-import org.apache.accumulo.server.tables.TableManager;
 import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
@@ -188,7 +187,7 @@ public long isReady(long tid, Master master) throws Exception {
 
     // remove table from zookeeper
     try {
-      TableManager.getInstance().removeTable(tableId);
+      master.getTableManager().removeTable(tableId);
       Tables.clearCache(master.getContext());
     } catch (Exception e) {
       log.error("Failed to find table id in zookeeper", e);
@@ -202,8 +201,8 @@ public long isReady(long tid, Master master) throws Exception {
       log.error("{}", e.getMessage(), e);
     }
 
-    Utils.unreserveTable(tableId, tid, true);
-    Utils.unreserveNamespace(namespaceId, tid, false);
+    Utils.unreserveTable(master, tableId, tid, true);
+    Utils.unreserveNamespace(master, namespaceId, tid, false);
 
     LoggerFactory.getLogger(CleanUp.class).debug("Deleted table " + tableId);
 
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneTable.java
index cbfb015caf..406b8a258f 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneTable.java
@@ -43,9 +43,10 @@ public CloneTable(String user, Namespace.ID namespaceId, Table.ID srcTableId, St
 
   @Override
   public long isReady(long tid, Master environment) throws Exception {
-    long val = Utils.reserveNamespace(cloneInfo.srcNamespaceId, tid, false, true,
+    long val = Utils.reserveNamespace(environment, cloneInfo.srcNamespaceId, tid, false, true,
+        TableOperation.CLONE);
+    val += Utils.reserveTable(environment, cloneInfo.srcTableId, tid, false, true,
         TableOperation.CLONE);
-    val += Utils.reserveTable(cloneInfo.srcTableId, tid, false, true, TableOperation.CLONE);
     return val;
   }
 
@@ -64,8 +65,8 @@ public long isReady(long tid, Master environment) throws Exception {
 
   @Override
   public void undo(long tid, Master environment) throws Exception {
-    Utils.unreserveNamespace(cloneInfo.srcNamespaceId, tid, false);
-    Utils.unreserveTable(cloneInfo.srcTableId, tid, false);
+    Utils.unreserveNamespace(environment, cloneInfo.srcNamespaceId, tid, false);
+    Utils.unreserveTable(environment, cloneInfo.srcTableId, tid, false);
   }
 
 }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneZookeeper.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneZookeeper.java
index 480081a91a..dac2e930dd 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneZookeeper.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneZookeeper.java
@@ -24,7 +24,6 @@
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.master.Master;
-import org.apache.accumulo.server.tables.TableManager;
 
 class CloneZookeeper extends MasterRepo {
 
@@ -43,8 +42,10 @@ public CloneZookeeper(CloneInfo cloneInfo, ClientContext context)
   public long isReady(long tid, Master environment) throws Exception {
     long val = 0;
     if (!cloneInfo.srcNamespaceId.equals(cloneInfo.namespaceId))
-      val += Utils.reserveNamespace(cloneInfo.namespaceId, tid, false, true, TableOperation.CLONE);
-    val += Utils.reserveTable(cloneInfo.tableId, tid, true, false, TableOperation.CLONE);
+      val += Utils.reserveNamespace(environment, cloneInfo.namespaceId, tid, false, true,
+          TableOperation.CLONE);
+    val += Utils.reserveTable(environment, cloneInfo.tableId, tid, true, false,
+        TableOperation.CLONE);
     return val;
   }
 
@@ -57,7 +58,7 @@ public long isReady(long tid, Master environment) throws Exception {
       Utils.checkTableDoesNotExist(environment.getContext(), cloneInfo.tableName, cloneInfo.tableId,
           TableOperation.CLONE);
 
-      TableManager.getInstance().cloneTable(cloneInfo.srcTableId, cloneInfo.tableId,
+      environment.getTableManager().cloneTable(cloneInfo.srcTableId, cloneInfo.tableId,
           cloneInfo.tableName, cloneInfo.namespaceId, cloneInfo.propertiesToSet,
           cloneInfo.propertiesToExclude, NodeExistsPolicy.OVERWRITE);
       Tables.clearCache(environment.getContext());
@@ -70,10 +71,10 @@ public long isReady(long tid, Master environment) throws Exception {
 
   @Override
   public void undo(long tid, Master environment) throws Exception {
-    TableManager.getInstance().removeTable(cloneInfo.tableId);
+    environment.getTableManager().removeTable(cloneInfo.tableId);
     if (!cloneInfo.srcNamespaceId.equals(cloneInfo.namespaceId))
-      Utils.unreserveNamespace(cloneInfo.namespaceId, tid, false);
-    Utils.unreserveTable(cloneInfo.tableId, tid, true);
+      Utils.unreserveNamespace(environment, cloneInfo.namespaceId, tid, false);
+    Utils.unreserveTable(environment, cloneInfo.tableId, tid, true);
     Tables.clearCache(environment.getContext());
   }
 
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java
index ecbfdb8878..88df1b9768 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java
@@ -84,8 +84,8 @@ public CompactRange(Namespace.ID namespaceId, Table.ID tableId, byte[] startRow,
 
   @Override
   public long isReady(long tid, Master env) throws Exception {
-    return Utils.reserveNamespace(namespaceId, tid, false, true, TableOperation.COMPACT)
-        + Utils.reserveTable(tableId, tid, false, true, TableOperation.COMPACT);
+    return Utils.reserveNamespace(env, namespaceId, tid, false, true, TableOperation.COMPACT)
+        + Utils.reserveTable(env, tableId, tid, false, true, TableOperation.COMPACT);
   }
 
   @Override
@@ -176,8 +176,8 @@ public void undo(long tid, Master env) throws Exception {
     try {
       removeIterators(env, tid, tableId);
     } finally {
-      Utils.unreserveNamespace(namespaceId, tid, false);
-      Utils.unreserveTable(tableId, tid, false);
+      Utils.unreserveNamespace(env, namespaceId, tid, false);
+      Utils.unreserveTable(env, tableId, tid, false);
     }
   }
 
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactionDriver.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactionDriver.java
index a9663710a2..98984adc8d 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactionDriver.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactionDriver.java
@@ -185,8 +185,8 @@ public long isReady(long tid, Master master) throws Exception {
   @Override
   public Repo<Master> call(long tid, Master env) throws Exception {
     CompactRange.removeIterators(env, tid, tableId);
-    Utils.getReadLock(tableId, tid).unlock();
-    Utils.getReadLock(namespaceId, tid).unlock();
+    Utils.getReadLock(env, tableId, tid).unlock();
+    Utils.getReadLock(env, namespaceId, tid).unlock();
     return null;
   }
 
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateImportDir.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateImportDir.java
index e91cbe05f7..4fdec0bdd9 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateImportDir.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateImportDir.java
@@ -41,7 +41,7 @@
   @Override
   public Repo<Master> call(long tid, Master master) throws Exception {
 
-    UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
+    UniqueNameAllocator namer = master.getContext().getUniqueNameAllocator();
 
     Path exportDir = new Path(tableInfo.exportDir);
     String[] tableDirs = ServerConstants.getTablesDirs();
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java
index c3ca32d3b9..65aef994de 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java
@@ -44,7 +44,8 @@ public CreateTable(String user, String tableName, TimeType timeType, Map<String,
   @Override
   public long isReady(long tid, Master environment) throws Exception {
     // reserve the table's namespace to make sure it doesn't change while the table is created
-    return Utils.reserveNamespace(tableInfo.namespaceId, tid, false, true, TableOperation.CREATE);
+    return Utils.reserveNamespace(environment, tableInfo.namespaceId, tid, false, true,
+        TableOperation.CREATE);
   }
 
   @Override
@@ -67,7 +68,7 @@ public long isReady(long tid, Master environment) throws Exception {
 
   @Override
   public void undo(long tid, Master env) throws Exception {
-    Utils.unreserveNamespace(tableInfo.namespaceId, tid, false);
+    Utils.unreserveNamespace(env, tableInfo.namespaceId, tid, false);
   }
 
 }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteNamespace.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteNamespace.java
index 922e41994e..5d66a3af5e 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteNamespace.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteNamespace.java
@@ -33,7 +33,7 @@ public DeleteNamespace(Namespace.ID namespaceId) {
 
   @Override
   public long isReady(long id, Master environment) throws Exception {
-    return Utils.reserveNamespace(namespaceId, id, true, true, TableOperation.DELETE);
+    return Utils.reserveNamespace(environment, namespaceId, id, true, true, TableOperation.DELETE);
   }
 
   @Override
@@ -44,7 +44,7 @@ public long isReady(long id, Master environment) throws Exception {
 
   @Override
   public void undo(long id, Master environment) throws Exception {
-    Utils.unreserveNamespace(namespaceId, id, true);
+    Utils.unreserveNamespace(environment, namespaceId, id, true);
   }
 
 }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteTable.java
index 7752498428..0922afefb3 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/DeleteTable.java
@@ -22,7 +22,6 @@
 import org.apache.accumulo.core.master.state.tables.TableState;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.master.Master;
-import org.apache.accumulo.server.tables.TableManager;
 
 public class DeleteTable extends MasterRepo {
 
@@ -38,20 +37,20 @@ public DeleteTable(Namespace.ID namespaceId, Table.ID tableId) {
 
   @Override
   public long isReady(long tid, Master env) throws Exception {
-    return Utils.reserveNamespace(namespaceId, tid, false, false, TableOperation.DELETE)
-        + Utils.reserveTable(tableId, tid, true, true, TableOperation.DELETE);
+    return Utils.reserveNamespace(env, namespaceId, tid, false, false, TableOperation.DELETE)
+        + Utils.reserveTable(env, tableId, tid, true, true, TableOperation.DELETE);
   }
 
   @Override
   public Repo<Master> call(long tid, Master env) throws Exception {
-    TableManager.getInstance().transitionTableState(tableId, TableState.DELETING);
+    env.getTableManager().transitionTableState(tableId, TableState.DELETING);
     env.getEventCoordinator().event("deleting table %s ", tableId);
     return new CleanUp(tableId, namespaceId);
   }
 
   @Override
   public void undo(long tid, Master env) throws Exception {
-    Utils.unreserveTable(tableId, tid, true);
-    Utils.unreserveNamespace(namespaceId, tid, false);
+    Utils.unreserveTable(env, tableId, tid, true);
+    Utils.unreserveNamespace(env, namespaceId, tid, false);
   }
 }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportTable.java
index f9c28db4b9..96ae5ca1de 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ExportTable.java
@@ -39,7 +39,7 @@ public ExportTable(Namespace.ID namespaceId, String tableName, Table.ID tableId,
 
   @Override
   public long isReady(long tid, Master environment) throws Exception {
-    return Utils.reserveHdfsDirectory(new Path(tableInfo.exportDir).toString(), tid);
+    return Utils.reserveHdfsDirectory(environment, new Path(tableInfo.exportDir).toString(), tid);
   }
 
   @Override
@@ -49,7 +49,7 @@ public long isReady(long tid, Master environment) throws Exception {
 
   @Override
   public void undo(long tid, Master env) throws Exception {
-    Utils.unreserveHdfsDirectory(new Path(tableInfo.exportDir).toString(), tid);
+    Utils.unreserveHdfsDirectory(env, new Path(tableInfo.exportDir).toString(), tid);
   }
 
   public static final int VERSION = 1;
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCancelCompaction.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCancelCompaction.java
index adb80396d5..b89771a776 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCancelCompaction.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCancelCompaction.java
@@ -33,8 +33,8 @@ public FinishCancelCompaction(Namespace.ID namespaceId, Table.ID tableId) {
 
   @Override
   public Repo<Master> call(long tid, Master environment) throws Exception {
-    Utils.unreserveTable(tableId, tid, false);
-    Utils.unreserveNamespace(namespaceId, tid, false);
+    Utils.unreserveTable(environment, tableId, tid, false);
+    Utils.unreserveNamespace(environment, namespaceId, tid, false);
     return null;
   }
 
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCloneTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCloneTable.java
index d089ba1526..fa291826a8 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCloneTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCloneTable.java
@@ -19,7 +19,6 @@
 import org.apache.accumulo.core.master.state.tables.TableState;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.master.Master;
-import org.apache.accumulo.server.tables.TableManager;
 import org.slf4j.LoggerFactory;
 
 class FinishCloneTable extends MasterRepo {
@@ -44,13 +43,13 @@ public long isReady(long tid, Master environment) throws Exception {
     // may never create files.. therefore there is no need to consume namenode space w/ directories
     // that are not used... tablet will create directories as needed
 
-    TableManager.getInstance().transitionTableState(cloneInfo.tableId, TableState.ONLINE);
+    environment.getTableManager().transitionTableState(cloneInfo.tableId, TableState.ONLINE);
 
-    Utils.unreserveNamespace(cloneInfo.srcNamespaceId, tid, false);
+    Utils.unreserveNamespace(environment, cloneInfo.srcNamespaceId, tid, false);
     if (!cloneInfo.srcNamespaceId.equals(cloneInfo.namespaceId))
-      Utils.unreserveNamespace(cloneInfo.namespaceId, tid, false);
-    Utils.unreserveTable(cloneInfo.srcTableId, tid, false);
-    Utils.unreserveTable(cloneInfo.tableId, tid, true);
+      Utils.unreserveNamespace(environment, cloneInfo.namespaceId, tid, false);
+    Utils.unreserveTable(environment, cloneInfo.srcTableId, tid, false);
+    Utils.unreserveTable(environment, cloneInfo.tableId, tid, true);
 
     environment.getEventCoordinator().event("Cloned table %s from %s", cloneInfo.tableName,
         cloneInfo.srcTableId);
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateNamespace.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateNamespace.java
index 8b37f922bd..05c9169526 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateNamespace.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateNamespace.java
@@ -38,7 +38,7 @@ public long isReady(long tid, Master environment) throws Exception {
   @Override
   public Repo<Master> call(long id, Master env) throws Exception {
 
-    Utils.unreserveNamespace(namespaceInfo.namespaceId, id, true);
+    Utils.unreserveNamespace(env, namespaceInfo.namespaceId, id, true);
 
     env.getEventCoordinator().event("Created namespace %s ", namespaceInfo.namespaceName);
 
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateTable.java
index 633decb80b..a80d06ef50 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishCreateTable.java
@@ -19,7 +19,6 @@
 import org.apache.accumulo.core.master.state.tables.TableState;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.master.Master;
-import org.apache.accumulo.server.tables.TableManager;
 import org.slf4j.LoggerFactory;
 
 class FinishCreateTable extends MasterRepo {
@@ -39,10 +38,10 @@ public long isReady(long tid, Master environment) throws Exception {
 
   @Override
   public Repo<Master> call(long tid, Master env) throws Exception {
-    TableManager.getInstance().transitionTableState(tableInfo.tableId, TableState.ONLINE);
+    env.getTableManager().transitionTableState(tableInfo.tableId, TableState.ONLINE);
 
-    Utils.unreserveNamespace(tableInfo.namespaceId, tid, false);
-    Utils.unreserveTable(tableInfo.tableId, tid, true);
+    Utils.unreserveNamespace(env, tableInfo.namespaceId, tid, false);
+    Utils.unreserveTable(env, tableInfo.tableId, tid, true);
 
     env.getEventCoordinator().event("Created table %s ", tableInfo.tableName);
 
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishImportTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishImportTable.java
index 19d6bf2af8..9932afa446 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishImportTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/FinishImportTable.java
@@ -19,7 +19,6 @@
 import org.apache.accumulo.core.master.state.tables.TableState;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.master.Master;
-import org.apache.accumulo.server.tables.TableManager;
 import org.apache.hadoop.fs.Path;
 import org.slf4j.LoggerFactory;
 
@@ -43,12 +42,12 @@ public long isReady(long tid, Master environment) throws Exception {
 
     env.getFileSystem().deleteRecursively(new Path(tableInfo.importDir, "mappings.txt"));
 
-    TableManager.getInstance().transitionTableState(tableInfo.tableId, TableState.ONLINE);
+    env.getTableManager().transitionTableState(tableInfo.tableId, TableState.ONLINE);
 
-    Utils.unreserveNamespace(tableInfo.namespaceId, tid, false);
-    Utils.unreserveTable(tableInfo.tableId, tid, true);
+    Utils.unreserveNamespace(env, tableInfo.namespaceId, tid, false);
+    Utils.unreserveTable(env, tableInfo.tableId, tid, true);
 
-    Utils.unreserveHdfsDirectory(new Path(tableInfo.exportDir).toString(), tid);
+    Utils.unreserveHdfsDirectory(env, new Path(tableInfo.exportDir).toString(), tid);
 
     env.getEventCoordinator().event("Imported table %s ", tableInfo.tableName);
 
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportPopulateZookeeper.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportPopulateZookeeper.java
index d2c4238976..7aff7865fe 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportPopulateZookeeper.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportPopulateZookeeper.java
@@ -32,7 +32,6 @@
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.master.Master;
 import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.accumulo.server.tables.TableManager;
 import org.apache.accumulo.server.util.TablePropUtil;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -49,7 +48,8 @@
 
   @Override
   public long isReady(long tid, Master environment) throws Exception {
-    return Utils.reserveTable(tableInfo.tableId, tid, true, false, TableOperation.IMPORT);
+    return Utils.reserveTable(environment, tableInfo.tableId, tid, true, false,
+        TableOperation.IMPORT);
   }
 
   private Map<String,String> getExportedProps(VolumeManager fs) throws Exception {
@@ -78,7 +78,7 @@ public long isReady(long tid, Master environment) throws Exception {
 
       String namespace = Tables.qualify(tableInfo.tableName).getFirst();
       Namespace.ID namespaceId = Namespaces.getNamespaceId(env.getContext(), namespace);
-      TableManager.getInstance().addTable(tableInfo.tableId, namespaceId, tableInfo.tableName,
+      env.getTableManager().addTable(tableInfo.tableId, namespaceId, tableInfo.tableName,
           NodeExistsPolicy.OVERWRITE);
 
       Tables.clearCache(env.getContext());
@@ -87,7 +87,8 @@ public long isReady(long tid, Master environment) throws Exception {
     }
 
     for (Entry<String,String> entry : getExportedProps(env.getFileSystem()).entrySet())
-      if (!TablePropUtil.setTableProperty(tableInfo.tableId, entry.getKey(), entry.getValue())) {
+      if (!TablePropUtil.setTableProperty(env.getContext(), tableInfo.tableId, entry.getKey(),
+          entry.getValue())) {
         throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonicalID(),
             tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
             "Invalid table property " + entry.getKey());
@@ -98,8 +99,8 @@ public long isReady(long tid, Master environment) throws Exception {
 
   @Override
   public void undo(long tid, Master env) throws Exception {
-    TableManager.getInstance().removeTable(tableInfo.tableId);
-    Utils.unreserveTable(tableInfo.tableId, tid, true);
+    env.getTableManager().removeTable(tableInfo.tableId);
+    Utils.unreserveTable(env, tableInfo.tableId, tid, true);
     Tables.clearCache(env.getContext());
   }
 }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java
index 479e61b774..8e41037e13 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java
@@ -54,8 +54,9 @@ public ImportTable(String user, String tableName, String exportDir, Namespace.ID
 
   @Override
   public long isReady(long tid, Master environment) throws Exception {
-    return Utils.reserveHdfsDirectory(new Path(tableInfo.exportDir).toString(), tid)
-        + Utils.reserveNamespace(tableInfo.namespaceId, tid, false, true, TableOperation.IMPORT);
+    return Utils.reserveHdfsDirectory(environment, new Path(tableInfo.exportDir).toString(), tid)
+        + Utils.reserveNamespace(environment, tableInfo.namespaceId, tid, false, true,
+            TableOperation.IMPORT);
   }
 
   @Override
@@ -119,7 +120,7 @@ public void checkVersions(Master env) throws AcceptableThriftTableOperationExcep
 
   @Override
   public void undo(long tid, Master env) throws Exception {
-    Utils.unreserveHdfsDirectory(new Path(tableInfo.exportDir).toString(), tid);
-    Utils.unreserveNamespace(tableInfo.namespaceId, tid, false);
+    Utils.unreserveHdfsDirectory(env, new Path(tableInfo.exportDir).toString(), tid);
+    Utils.unreserveNamespace(env, tableInfo.namespaceId, tid, false);
   }
 }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/MapImportFileNames.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/MapImportFileNames.java
index 84f54cdc6f..8d86261737 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/MapImportFileNames.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/MapImportFileNames.java
@@ -61,7 +61,7 @@
 
       FileStatus[] files = fs.listStatus(new Path(tableInfo.exportDir));
 
-      UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
+      UniqueNameAllocator namer = environment.getContext().getUniqueNameAllocator();
 
       mappingsWriter = new BufferedWriter(new OutputStreamWriter(fs.create(path), UTF_8));
 
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/NamespaceCleanUp.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/NamespaceCleanUp.java
index c1b0d59835..3138dbfa31 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/NamespaceCleanUp.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/NamespaceCleanUp.java
@@ -22,7 +22,6 @@
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.master.Master;
 import org.apache.accumulo.server.security.AuditedSecurityOperation;
-import org.apache.accumulo.server.tables.TableManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -48,7 +47,7 @@ public long isReady(long tid, Master master) throws Exception {
 
     // remove from zookeeper
     try {
-      TableManager.getInstance().removeNamespace(namespaceId);
+      master.getTableManager().removeNamespace(namespaceId);
     } catch (Exception e) {
       log.error("Failed to find namespace in zookeeper", e);
     }
@@ -62,7 +61,7 @@ public long isReady(long tid, Master master) throws Exception {
       log.error("{}", e.getMessage(), e);
     }
 
-    Utils.unreserveNamespace(namespaceId, id, true);
+    Utils.unreserveNamespace(master, namespaceId, id, true);
 
     log.debug("Deleted namespace " + namespaceId);
 
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeper.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeper.java
index 5b5b9d37fd..68e783d053 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeper.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeper.java
@@ -23,7 +23,6 @@
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.master.Master;
-import org.apache.accumulo.server.tables.TableManager;
 import org.apache.accumulo.server.util.TablePropUtil;
 
 class PopulateZookeeper extends MasterRepo {
@@ -38,7 +37,8 @@
 
   @Override
   public long isReady(long tid, Master environment) throws Exception {
-    return Utils.reserveTable(tableInfo.tableId, tid, true, false, TableOperation.CREATE);
+    return Utils.reserveTable(environment, tableInfo.tableId, tid, true, false,
+        TableOperation.CREATE);
   }
 
   @Override
@@ -51,11 +51,12 @@ public long isReady(long tid, Master environment) throws Exception {
       Utils.checkTableDoesNotExist(master.getContext(), tableInfo.tableName, tableInfo.tableId,
           TableOperation.CREATE);
 
-      TableManager.getInstance().addTable(tableInfo.tableId, tableInfo.namespaceId,
+      master.getTableManager().addTable(tableInfo.tableId, tableInfo.namespaceId,
           tableInfo.tableName, NodeExistsPolicy.OVERWRITE);
 
       for (Entry<String,String> entry : tableInfo.props.entrySet())
-        TablePropUtil.setTableProperty(tableInfo.tableId, entry.getKey(), entry.getValue());
+        TablePropUtil.setTableProperty(master.getContext(), tableInfo.tableId, entry.getKey(),
+            entry.getValue());
 
       Tables.clearCache(master.getContext());
       return new ChooseDir(tableInfo);
@@ -67,8 +68,8 @@ public long isReady(long tid, Master environment) throws Exception {
 
   @Override
   public void undo(long tid, Master master) throws Exception {
-    TableManager.getInstance().removeTable(tableInfo.tableId);
-    Utils.unreserveTable(tableInfo.tableId, tid, true);
+    master.getTableManager().removeTable(tableInfo.tableId);
+    Utils.unreserveTable(master, tableInfo.tableId, tid, true);
     Tables.clearCache(master.getContext());
   }
 
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeperWithNamespace.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeperWithNamespace.java
index 50e656ceb4..96e5c4ecc1 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeperWithNamespace.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/PopulateZookeeperWithNamespace.java
@@ -38,7 +38,7 @@
 
   @Override
   public long isReady(long id, Master environment) throws Exception {
-    return Utils.reserveNamespace(namespaceInfo.namespaceId, id, true, false,
+    return Utils.reserveNamespace(environment, namespaceInfo.namespaceId, id, true, false,
         TableOperation.CREATE);
   }
 
@@ -54,8 +54,8 @@ public long isReady(long id, Master environment) throws Exception {
           namespaceInfo.namespaceName, NodeExistsPolicy.OVERWRITE);
 
       for (Entry<String,String> entry : namespaceInfo.props.entrySet())
-        NamespacePropUtil.setNamespaceProperty(namespaceInfo.namespaceId, entry.getKey(),
-            entry.getValue());
+        NamespacePropUtil.setNamespaceProperty(master.getContext(), namespaceInfo.namespaceId,
+            entry.getKey(), entry.getValue());
 
       Tables.clearCache(master.getContext());
 
@@ -67,9 +67,9 @@ public long isReady(long id, Master environment) throws Exception {
 
   @Override
   public void undo(long tid, Master master) throws Exception {
-    TableManager.getInstance().removeNamespace(namespaceInfo.namespaceId);
+    master.getTableManager().removeNamespace(namespaceInfo.namespaceId);
     Tables.clearCache(master.getContext());
-    Utils.unreserveNamespace(namespaceInfo.namespaceId, tid, true);
+    Utils.unreserveNamespace(master, namespaceInfo.namespaceId, tid, true);
   }
 
 }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameNamespace.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameNamespace.java
index d8e2ff1485..5621fdf81c 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameNamespace.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameNamespace.java
@@ -38,7 +38,7 @@
 
   @Override
   public long isReady(long id, Master environment) throws Exception {
-    return Utils.reserveNamespace(namespaceId, id, true, true, TableOperation.RENAME);
+    return Utils.reserveNamespace(environment, namespaceId, id, true, true, TableOperation.RENAME);
   }
 
   public RenameNamespace(Namespace.ID namespaceId, String oldName, String newName) {
@@ -76,7 +76,7 @@ public RenameNamespace(Namespace.ID namespaceId, String oldName, String newName)
       Tables.clearCache(master.getContext());
     } finally {
       Utils.tableNameLock.unlock();
-      Utils.unreserveNamespace(namespaceId, id, true);
+      Utils.unreserveNamespace(master, namespaceId, id, true);
     }
 
     LoggerFactory.getLogger(RenameNamespace.class).debug("Renamed namespace {} {} {}", namespaceId,
@@ -87,7 +87,7 @@ public RenameNamespace(Namespace.ID namespaceId, String oldName, String newName)
 
   @Override
   public void undo(long tid, Master env) throws Exception {
-    Utils.unreserveNamespace(namespaceId, tid, true);
+    Utils.unreserveNamespace(env, namespaceId, tid, true);
   }
 
 }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameTable.java
index 74d075c355..3001911554 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/RenameTable.java
@@ -45,8 +45,8 @@
 
   @Override
   public long isReady(long tid, Master env) throws Exception {
-    return Utils.reserveNamespace(namespaceId, tid, false, true, TableOperation.RENAME)
-        + Utils.reserveTable(tableId, tid, true, true, TableOperation.RENAME);
+    return Utils.reserveNamespace(env, namespaceId, tid, false, true, TableOperation.RENAME)
+        + Utils.reserveTable(env, tableId, tid, true, true, TableOperation.RENAME);
   }
 
   public RenameTable(Namespace.ID namespaceId, Table.ID tableId, String oldTableName,
@@ -99,8 +99,8 @@ public RenameTable(Namespace.ID namespaceId, Table.ID tableId, String oldTableNa
       Tables.clearCache(master.getContext());
     } finally {
       Utils.tableNameLock.unlock();
-      Utils.unreserveTable(tableId, tid, true);
-      Utils.unreserveNamespace(namespaceId, tid, false);
+      Utils.unreserveTable(master, tableId, tid, true);
+      Utils.unreserveNamespace(master, namespaceId, tid, false);
     }
 
     LoggerFactory.getLogger(RenameTable.class).debug("Renamed table {} {} {}", tableId,
@@ -111,8 +111,8 @@ public RenameTable(Namespace.ID namespaceId, Table.ID tableId, String oldTableNa
 
   @Override
   public void undo(long tid, Master env) throws Exception {
-    Utils.unreserveTable(tableId, tid, true);
-    Utils.unreserveNamespace(namespaceId, tid, false);
+    Utils.unreserveTable(env, tableId, tid, true);
+    Utils.unreserveNamespace(env, namespaceId, tid, false);
   }
 
 }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOp.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOp.java
index cd00df0a94..0c61fb8c62 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOp.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOp.java
@@ -46,8 +46,8 @@
 
   @Override
   public long isReady(long tid, Master env) throws Exception {
-    return Utils.reserveNamespace(namespaceId, tid, false, true, TableOperation.MERGE)
-        + Utils.reserveTable(tableId, tid, true, true, TableOperation.MERGE);
+    return Utils.reserveNamespace(env, namespaceId, tid, false, true, TableOperation.MERGE)
+        + Utils.reserveTable(env, tableId, tid, true, true, TableOperation.MERGE);
   }
 
   public TableRangeOp(MergeInfo.Operation op, Namespace.ID namespaceId, Table.ID tableId,
@@ -95,8 +95,8 @@ public void undo(long tid, Master env) throws Exception {
     if (mergeInfo.getState() != MergeState.NONE)
       log.info("removing merge information {}", mergeInfo);
     env.clearMergeState(tableId);
-    Utils.unreserveNamespace(namespaceId, tid, false);
-    Utils.unreserveTable(tableId, tid, true);
+    Utils.unreserveNamespace(env, namespaceId, tid, false);
+    Utils.unreserveTable(env, tableId, tid, true);
   }
 
 }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOpWait.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOpWait.java
index a6f69b8edc..d24b27b84a 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOpWait.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/TableRangeOpWait.java
@@ -67,8 +67,8 @@ public long isReady(long tid, Master env) throws Exception {
     MergeInfo mergeInfo = master.getMergeInfo(tableId);
     log.info("removing merge information " + mergeInfo);
     master.clearMergeState(tableId);
-    Utils.unreserveTable(tableId, tid, true);
-    Utils.unreserveNamespace(namespaceId, tid, false);
+    Utils.unreserveTable(master, tableId, tid, true);
+    Utils.unreserveNamespace(master, namespaceId, tid, false);
     return null;
   }
 
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java
index 0906af721d..75db68dc68 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/Utils.java
@@ -38,7 +38,7 @@
 import org.apache.accumulo.fate.zookeeper.DistributedReadWriteLock;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooReservation;
-import org.apache.accumulo.server.ServerContext;
+import org.apache.accumulo.master.Master;
 import org.apache.accumulo.server.zookeeper.ZooQueueLock;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.zookeeper.KeeperException;
@@ -49,8 +49,6 @@
   private static final byte[] ZERO_BYTE = {'0'};
   private static final Logger log = LoggerFactory.getLogger(Utils.class);
 
-  private static final ServerContext context = ServerContext.getInstance();
-
   static void checkTableDoesNotExist(ClientContext context, String tableName, Table.ID tableId,
       TableOperation operation) throws AcceptableThriftTableOperationException {
 
@@ -82,12 +80,12 @@ static void checkTableDoesNotExist(ClientContext context, String tableName, Tabl
   static final Lock tableNameLock = new ReentrantLock();
   static final Lock idLock = new ReentrantLock();
 
-  public static long reserveTable(Table.ID tableId, long tid, boolean writeLock,
+  public static long reserveTable(Master env, Table.ID tableId, long tid, boolean writeLock,
       boolean tableMustExist, TableOperation op) throws Exception {
-    if (getLock(tableId, tid, writeLock).tryLock()) {
+    if (getLock(env.getContext(), tableId, tid, writeLock).tryLock()) {
       if (tableMustExist) {
         IZooReaderWriter zk = ZooReaderWriter.getInstance();
-        if (!zk.exists(context.getZooKeeperRoot() + Constants.ZTABLES + "/" + tableId))
+        if (!zk.exists(env.getContext().getZooKeeperRoot() + Constants.ZTABLES + "/" + tableId))
           throw new AcceptableThriftTableOperationException(tableId.canonicalID(), "", op,
               TableOperationExceptionType.NOTFOUND, "Table does not exist");
       }
@@ -98,26 +96,27 @@ public static long reserveTable(Table.ID tableId, long tid, boolean writeLock,
       return 100;
   }
 
-  public static void unreserveTable(Table.ID tableId, long tid, boolean writeLock)
+  public static void unreserveTable(Master env, Table.ID tableId, long tid, boolean writeLock)
       throws Exception {
-    getLock(tableId, tid, writeLock).unlock();
+    getLock(env.getContext(), tableId, tid, writeLock).unlock();
     log.info("table {} ({}) unlocked for ", tableId, Long.toHexString(tid),
         (writeLock ? "write" : "read"));
   }
 
-  public static void unreserveNamespace(Namespace.ID namespaceId, long id, boolean writeLock)
-      throws Exception {
-    getLock(namespaceId, id, writeLock).unlock();
+  public static void unreserveNamespace(Master env, Namespace.ID namespaceId, long id,
+      boolean writeLock) throws Exception {
+    getLock(env.getContext(), namespaceId, id, writeLock).unlock();
     log.info("namespace {} ({}) unlocked for {}", namespaceId, Long.toHexString(id),
         (writeLock ? "write" : "read"));
   }
 
-  public static long reserveNamespace(Namespace.ID namespaceId, long id, boolean writeLock,
-      boolean mustExist, TableOperation op) throws Exception {
-    if (getLock(namespaceId, id, writeLock).tryLock()) {
+  public static long reserveNamespace(Master env, Namespace.ID namespaceId, long id,
+      boolean writeLock, boolean mustExist, TableOperation op) throws Exception {
+    if (getLock(env.getContext(), namespaceId, id, writeLock).tryLock()) {
       if (mustExist) {
         IZooReaderWriter zk = ZooReaderWriter.getInstance();
-        if (!zk.exists(context.getZooKeeperRoot() + Constants.ZNAMESPACES + "/" + namespaceId))
+        if (!zk.exists(
+            env.getContext().getZooKeeperRoot() + Constants.ZNAMESPACES + "/" + namespaceId))
           throw new AcceptableThriftTableOperationException(namespaceId.canonicalID(), "", op,
               TableOperationExceptionType.NAMESPACE_NOTFOUND, "Namespace does not exist");
       }
@@ -128,9 +127,9 @@ public static long reserveNamespace(Namespace.ID namespaceId, long id, boolean w
       return 100;
   }
 
-  public static long reserveHdfsDirectory(String directory, long tid)
+  public static long reserveHdfsDirectory(Master env, String directory, long tid)
       throws KeeperException, InterruptedException {
-    String resvPath = context.getZooKeeperRoot() + Constants.ZHDFS_RESERVATIONS + "/"
+    String resvPath = env.getContext().getZooKeeperRoot() + Constants.ZHDFS_RESERVATIONS + "/"
         + Base64.getEncoder().encodeToString(directory.getBytes(UTF_8));
 
     IZooReaderWriter zk = ZooReaderWriter.getInstance();
@@ -141,14 +140,15 @@ public static long reserveHdfsDirectory(String directory, long tid)
       return 50;
   }
 
-  public static void unreserveHdfsDirectory(String directory, long tid)
+  public static void unreserveHdfsDirectory(Master env, String directory, long tid)
       throws KeeperException, InterruptedException {
-    String resvPath = context.getZooKeeperRoot() + Constants.ZHDFS_RESERVATIONS + "/"
+    String resvPath = env.getContext().getZooKeeperRoot() + Constants.ZHDFS_RESERVATIONS + "/"
         + Base64.getEncoder().encodeToString(directory.getBytes(UTF_8));
     ZooReservation.release(ZooReaderWriter.getInstance(), resvPath, String.format("%016x", tid));
   }
 
-  private static Lock getLock(AbstractId id, long tid, boolean writeLock) throws Exception {
+  private static Lock getLock(ClientContext context, AbstractId id, long tid, boolean writeLock)
+      throws Exception {
     byte[] lockData = String.format("%016x", tid).getBytes(UTF_8);
     ZooQueueLock qlock = new ZooQueueLock(
         context.getZooKeeperRoot() + Constants.ZTABLE_LOCKS + "/" + id, false);
@@ -163,8 +163,8 @@ private static Lock getLock(AbstractId id, long tid, boolean writeLock) throws E
     return lock;
   }
 
-  public static Lock getReadLock(AbstractId tableId, long tid) throws Exception {
-    return Utils.getLock(tableId, tid, false);
+  public static Lock getReadLock(Master env, AbstractId tableId, long tid) throws Exception {
+    return Utils.getLock(env.getContext(), tableId, tid, false);
   }
 
   static void checkNamespaceDoesNotExist(ClientContext context, String namespace,
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/WriteExportFiles.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/WriteExportFiles.java
index d3f84717ef..696f3f1d47 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/WriteExportFiles.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/WriteExportFiles.java
@@ -84,9 +84,9 @@ private void checkOffline(ClientContext context) throws Exception {
   @Override
   public long isReady(long tid, Master master) throws Exception {
 
-    long reserved = Utils.reserveNamespace(tableInfo.namespaceID, tid, false, true,
+    long reserved = Utils.reserveNamespace(master, tableInfo.namespaceID, tid, false, true,
         TableOperation.EXPORT)
-        + Utils.reserveTable(tableInfo.tableID, tid, false, true, TableOperation.EXPORT);
+        + Utils.reserveTable(master, tableInfo.tableID, tid, false, true, TableOperation.EXPORT);
     if (reserved > 0)
       return reserved;
 
@@ -131,16 +131,16 @@ public long isReady(long tid, Master master) throws Exception {
           tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER,
           "Failed to create export files " + ioe.getMessage());
     }
-    Utils.unreserveNamespace(tableInfo.namespaceID, tid, false);
-    Utils.unreserveTable(tableInfo.tableID, tid, false);
-    Utils.unreserveHdfsDirectory(new Path(tableInfo.exportDir).toString(), tid);
+    Utils.unreserveNamespace(master, tableInfo.namespaceID, tid, false);
+    Utils.unreserveTable(master, tableInfo.tableID, tid, false);
+    Utils.unreserveHdfsDirectory(master, new Path(tableInfo.exportDir).toString(), tid);
     return null;
   }
 
   @Override
   public void undo(long tid, Master env) throws Exception {
-    Utils.unreserveNamespace(tableInfo.namespaceID, tid, false);
-    Utils.unreserveTable(tableInfo.tableID, tid, false);
+    Utils.unreserveNamespace(env, tableInfo.namespaceID, tid, false);
+    Utils.unreserveTable(env, tableInfo.tableID, tid, false);
   }
 
   public static void exportTable(VolumeManager fs, ServerContext context, String tableName,
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/BulkImport.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/BulkImport.java
index e17f3b6875..aa59d07cf8 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/BulkImport.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/BulkImport.java
@@ -41,6 +41,7 @@
 import org.apache.accumulo.master.tableOps.MasterRepo;
 import org.apache.accumulo.master.tableOps.Utils;
 import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.tablets.UniqueNameAllocator;
 import org.apache.accumulo.server.util.MetadataTableUtil;
@@ -87,15 +88,15 @@ public BulkImport(Table.ID tableId, String sourceDir, String errorDir, boolean s
 
   @Override
   public long isReady(long tid, Master master) throws Exception {
-    if (!Utils.getReadLock(tableId, tid).tryLock())
+    if (!Utils.getReadLock(master, tableId, tid).tryLock())
       return 100;
 
     Tables.clearCache(master.getContext());
     if (Tables.getTableState(master.getContext(), tableId) == TableState.ONLINE) {
       long reserve1, reserve2;
-      reserve1 = reserve2 = Utils.reserveHdfsDirectory(sourceDir, tid);
+      reserve1 = reserve2 = Utils.reserveHdfsDirectory(master, sourceDir, tid);
       if (reserve1 == 0)
-        reserve2 = Utils.reserveHdfsDirectory(errorDir, tid);
+        reserve2 = Utils.reserveHdfsDirectory(master, errorDir, tid);
       return reserve2;
     } else {
       throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null,
@@ -107,7 +108,7 @@ public long isReady(long tid, Master master) throws Exception {
   public Repo<Master> call(long tid, Master master) throws Exception {
     log.debug(" tid {} sourceDir {}", tid, sourceDir);
 
-    Utils.getReadLock(tableId, tid).lock();
+    Utils.getReadLock(master, tableId, tid).lock();
 
     // check that the error directory exists and is empty
     VolumeManager fs = master.getFileSystem();
@@ -132,7 +133,7 @@ public long isReady(long tid, Master master) throws Exception {
           TableOperation.BULK_IMPORT, TableOperationExceptionType.BULK_BAD_ERROR_DIRECTORY,
           errorDir + " is not empty");
 
-    ZooArbitrator.start(Constants.BULK_ARBITRATOR_TYPE, tid);
+    ZooArbitrator.start(master.getContext(), Constants.BULK_ARBITRATOR_TYPE, tid);
     master.updateBulkImportStatus(sourceDir, BulkImportState.MOVING);
     // move the files into the directory
     try {
@@ -147,7 +148,8 @@ public long isReady(long tid, Master master) throws Exception {
     }
   }
 
-  private Path createNewBulkDir(VolumeManager fs, Table.ID tableId) throws IOException {
+  private Path createNewBulkDir(ServerContext context, VolumeManager fs, Table.ID tableId)
+      throws IOException {
     Path tempPath = fs.matchingFileSystem(new Path(sourceDir), ServerConstants.getTablesDirs());
     if (tempPath == null)
       throw new IOException(sourceDir + " is not in a volume configured for Accumulo");
@@ -164,7 +166,7 @@ private Path createNewBulkDir(VolumeManager fs, Table.ID tableId) throws IOExcep
     // fs.mkdirs()... if only hadoop had a mkdir() function
     // that failed when the dir existed
 
-    UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
+    UniqueNameAllocator namer = context.getUniqueNameAllocator();
 
     while (true) {
       Path newBulkDir = new Path(directory, Constants.BULK_PREFIX + namer.getNextName());
@@ -180,7 +182,7 @@ private Path createNewBulkDir(VolumeManager fs, Table.ID tableId) throws IOExcep
 
   private String prepareBulkImport(Master master, final VolumeManager fs, String dir,
       Table.ID tableId) throws Exception {
-    final Path bulkDir = createNewBulkDir(fs, tableId);
+    final Path bulkDir = createNewBulkDir(master.getContext(), fs, tableId);
 
     MetadataTableUtil.addBulkLoadInProgressFlag(master.getContext(),
         "/" + bulkDir.getParent().getName() + "/" + bulkDir.getName());
@@ -188,7 +190,7 @@ private String prepareBulkImport(Master master, final VolumeManager fs, String d
     Path dirPath = new Path(dir);
     FileStatus[] mapFiles = fs.listStatus(dirPath);
 
-    final UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
+    final UniqueNameAllocator namer = master.getContext().getUniqueNameAllocator();
 
     int workerCount = master.getConfiguration().getCount(Property.MASTER_BULK_RENAME_THREADS);
     SimpleThreadPool workers = new SimpleThreadPool(workerCount, "bulk move");
@@ -265,9 +267,9 @@ private String prepareBulkImport(Master master, final VolumeManager fs, String d
   @Override
   public void undo(long tid, Master environment) throws Exception {
     // unreserve source/error directories
-    Utils.unreserveHdfsDirectory(sourceDir, tid);
-    Utils.unreserveHdfsDirectory(errorDir, tid);
-    Utils.getReadLock(tableId, tid).unlock();
-    ZooArbitrator.cleanup(Constants.BULK_ARBITRATOR_TYPE, tid);
+    Utils.unreserveHdfsDirectory(environment, sourceDir, tid);
+    Utils.unreserveHdfsDirectory(environment, errorDir, tid);
+    Utils.getReadLock(environment, tableId, tid).unlock();
+    ZooArbitrator.cleanup(environment.getContext(), Constants.BULK_ARBITRATOR_TYPE, tid);
   }
 }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/CleanUpBulkImport.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/CleanUpBulkImport.java
index 4ef01d45ba..e2b1cb4db9 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/CleanUpBulkImport.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/CleanUpBulkImport.java
@@ -60,11 +60,11 @@
     Connector conn = master.getConnector();
     MetadataTableUtil.removeBulkLoadEntries(conn, tableId, tid);
     log.debug("releasing HDFS reservations for " + source + " and " + error);
-    Utils.unreserveHdfsDirectory(source, tid);
-    Utils.unreserveHdfsDirectory(error, tid);
-    Utils.getReadLock(tableId, tid).unlock();
+    Utils.unreserveHdfsDirectory(master, source, tid);
+    Utils.unreserveHdfsDirectory(master, error, tid);
+    Utils.getReadLock(master, tableId, tid).unlock();
     log.debug("completing bulkDir import transaction " + tid);
-    ZooArbitrator.cleanup(Constants.BULK_ARBITRATOR_TYPE, tid);
+    ZooArbitrator.cleanup(master.getContext(), Constants.BULK_ARBITRATOR_TYPE, tid);
     master.removeBulkImportStatus(source);
     return null;
   }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/CompleteBulkImport.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/CompleteBulkImport.java
index 5c85f7af03..7eede40291 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/CompleteBulkImport.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer1/CompleteBulkImport.java
@@ -41,7 +41,7 @@ public CompleteBulkImport(Table.ID tableId, String source, String bulk, String e
 
   @Override
   public Repo<Master> call(long tid, Master master) throws Exception {
-    ZooArbitrator.stop(Constants.BULK_ARBITRATOR_TYPE, tid);
+    ZooArbitrator.stop(master.getContext(), Constants.BULK_ARBITRATOR_TYPE, tid);
     return new CopyFailed(tableId, source, bulk, error);
   }
 }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/BulkImportMove.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/BulkImportMove.java
index 6b5d26fa45..7dc854478a 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/BulkImportMove.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/BulkImportMove.java
@@ -77,7 +77,7 @@ public BulkImportMove(BulkInfo bulkInfo) {
     VolumeManager fs = master.getFileSystem();
 
     if (bulkInfo.tableState == TableState.ONLINE) {
-      ZooArbitrator.start(Constants.BULK_ARBITRATOR_TYPE, tid);
+      ZooArbitrator.start(master.getContext(), Constants.BULK_ARBITRATOR_TYPE, tid);
     }
 
     try {
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/CleanUpBulkImport.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/CleanUpBulkImport.java
index 072285c080..31db07bcae 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/CleanUpBulkImport.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/CleanUpBulkImport.java
@@ -55,8 +55,8 @@ public CleanUpBulkImport(BulkInfo info) {
       Connector conn = master.getConnector();
       MetadataTableUtil.removeBulkLoadEntries(conn, info.tableId, tid);
     }
-    Utils.unreserveHdfsDirectory(info.sourceDir, tid);
-    Utils.getReadLock(info.tableId, tid).unlock();
+    Utils.unreserveHdfsDirectory(master, info.sourceDir, tid);
+    Utils.getReadLock(master, info.tableId, tid).unlock();
     // delete json renames and mapping files
     Path renamingFile = new Path(bulkDir, Constants.BULK_RENAME_FILE);
     Path mappingFile = new Path(bulkDir, Constants.BULK_LOAD_MAPPING);
@@ -69,7 +69,7 @@ public CleanUpBulkImport(BulkInfo info) {
 
     log.debug("completing bulkDir import transaction " + tid);
     if (info.tableState == TableState.ONLINE) {
-      ZooArbitrator.cleanup(Constants.BULK_ARBITRATOR_TYPE, tid);
+      ZooArbitrator.cleanup(master.getContext(), Constants.BULK_ARBITRATOR_TYPE, tid);
     }
     return null;
   }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/CompleteBulkImport.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/CompleteBulkImport.java
index ac7a8a5efd..23faeb56e5 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/CompleteBulkImport.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/CompleteBulkImport.java
@@ -34,7 +34,7 @@ public CompleteBulkImport(BulkInfo info) {
 
   @Override
   public Repo<Master> call(long tid, Master master) throws Exception {
-    ZooArbitrator.stop(Constants.BULK_ARBITRATOR_TYPE, tid);
+    ZooArbitrator.stop(master.getContext(), Constants.BULK_ARBITRATOR_TYPE, tid);
     return new CleanUpBulkImport(info);
   }
 }
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/PrepBulkImport.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/PrepBulkImport.java
index 4c0030d612..21fa939630 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/PrepBulkImport.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/bulkVer2/PrepBulkImport.java
@@ -43,6 +43,7 @@
 import org.apache.accumulo.master.tableOps.MasterRepo;
 import org.apache.accumulo.master.tableOps.Utils;
 import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.tablets.UniqueNameAllocator;
 import org.apache.accumulo.server.zookeeper.TransactionWatcher;
@@ -82,14 +83,14 @@ public PrepBulkImport(Table.ID tableId, String sourceDir, boolean setTime) {
 
   @Override
   public long isReady(long tid, Master master) throws Exception {
-    if (!Utils.getReadLock(bulkInfo.tableId, tid).tryLock())
+    if (!Utils.getReadLock(master, bulkInfo.tableId, tid).tryLock())
       return 100;
 
     if (master.onlineTabletServers().size() == 0)
       return 500;
     Tables.clearCache(master.getContext());
 
-    return Utils.reserveHdfsDirectory(bulkInfo.sourceDir, tid);
+    return Utils.reserveHdfsDirectory(master, bulkInfo.sourceDir, tid);
   }
 
   @VisibleForTesting
@@ -180,11 +181,11 @@ private void checkForMerge(final Master master) throws Exception {
     bulkInfo.tableState = Tables.getTableState(master.getContext(), bulkInfo.tableId);
 
     VolumeManager fs = master.getFileSystem();
-    final UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
+    final UniqueNameAllocator namer = master.getContext().getUniqueNameAllocator();
     Path sourceDir = new Path(bulkInfo.sourceDir);
     FileStatus[] files = fs.listStatus(sourceDir);
 
-    Path bulkDir = createNewBulkDir(fs, bulkInfo.tableId);
+    Path bulkDir = createNewBulkDir(master.getContext(), fs, bulkInfo.tableId);
     Path mappingFile = new Path(sourceDir, Constants.BULK_LOAD_MAPPING);
 
     Map<String,String> oldToNewNameMap = new HashMap<>();
@@ -220,7 +221,8 @@ private void checkForMerge(final Master master) throws Exception {
     return new BulkImportMove(bulkInfo);
   }
 
-  private Path createNewBulkDir(VolumeManager fs, Table.ID tableId) throws IOException {
+  private Path createNewBulkDir(ServerContext context, VolumeManager fs, Table.ID tableId)
+      throws IOException {
     Path tempPath = fs.matchingFileSystem(new Path(bulkInfo.sourceDir),
         ServerConstants.getTablesDirs());
     if (tempPath == null)
@@ -232,7 +234,7 @@ private Path createNewBulkDir(VolumeManager fs, Table.ID tableId) throws IOExcep
     Path directory = new Path(tableDir + "/" + tableId);
     fs.mkdirs(directory);
 
-    UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
+    UniqueNameAllocator namer = context.getUniqueNameAllocator();
     while (true) {
       Path newBulkDir = new Path(directory, Constants.BULK_PREFIX + namer.getNextName());
       if (fs.mkdirs(newBulkDir))
@@ -246,9 +248,9 @@ private Path createNewBulkDir(VolumeManager fs, Table.ID tableId) throws IOExcep
   @Override
   public void undo(long tid, Master environment) throws Exception {
     // unreserve sourceDir/error directories
-    Utils.unreserveHdfsDirectory(bulkInfo.sourceDir, tid);
-    Utils.getReadLock(bulkInfo.tableId, tid).unlock();
-    TransactionWatcher.ZooArbitrator.cleanup(Constants.BULK_ARBITRATOR_TYPE, tid);
+    Utils.unreserveHdfsDirectory(environment, bulkInfo.sourceDir, tid);
+    Utils.getReadLock(environment, bulkInfo.tableId, tid).unlock();
+    TransactionWatcher.ZooArbitrator.cleanup(environment.getContext(),
+        Constants.BULK_ARBITRATOR_TYPE, tid);
   }
-
 }
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/Monitor.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/Monitor.java
index 5287810e91..a048c1d216 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/Monitor.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/Monitor.java
@@ -256,7 +256,7 @@ public static void fetchData() {
           public void run() {
             synchronized (Monitor.class) {
               if (cachedInstanceName.get().equals(DEFAULT_INSTANCE_NAME)) {
-                final String instanceName = ServerContext.getInstance().getInstanceName();
+                final String instanceName = context.getInstanceName();
                 if (null != instanceName) {
                   cachedInstanceName.set(instanceName);
                 }
@@ -434,16 +434,15 @@ public static void main(String[] args) throws Exception {
     final String app = "monitor";
     ServerOpts opts = new ServerOpts();
     opts.parseArgs(app, args);
-    ServerContext context = ServerContext.getInstance();
+    Monitor.context = ServerContext.getInstance();
     context.setupServer(app, Monitor.class.getName(), opts.getAddress());
     try {
       config = context.getServerConfFactory();
-      Monitor.context = context;
       Monitor monitor = new Monitor();
       // Servlets need access to limit requests when the monitor is not active, but Servlets are
       // instantiated via reflection. Expose the service this way instead.
       Monitor.HA_SERVICE_INSTANCE = monitor;
-      monitor.run(context.getHostname());
+      monitor.run();
     } finally {
       context.teardownServer();
     }
@@ -451,13 +450,13 @@ public static void main(String[] args) throws Exception {
 
   private static long START_TIME;
 
-  public void run(String hostname) {
+  public void run() {
     Monitor.START_TIME = System.currentTimeMillis();
     int ports[] = config.getSystemConfiguration().getPort(Property.MONITOR_PORT);
     for (int port : ports) {
       try {
         log.debug("Creating monitor on port {}", port);
-        server = new EmbeddedWebServer(hostname, port);
+        server = new EmbeddedWebServer(context.getHostname(), port);
         server.addServlet(getDefaultServlet(), "/resources/*");
         server.addServlet(getRestServlet(), "/rest/*");
         server.addServlet(getViewServlet(), "/*");
@@ -479,7 +478,7 @@ public void run(String hostname) {
       throw new RuntimeException(e);
     }
 
-    String advertiseHost = hostname;
+    String advertiseHost = context.getHostname();
     if (advertiseHost.equals("0.0.0.0")) {
       try {
         advertiseHost = InetAddress.getLocalHost().getHostName();
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/rest/replication/ReplicationResource.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/rest/replication/ReplicationResource.java
index 852209e759..80c31c1082 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/rest/replication/ReplicationResource.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/rest/replication/ReplicationResource.java
@@ -93,7 +93,7 @@
         String peerName = property.getKey().substring(definedPeersPrefix.length());
         ReplicaSystem replica;
         try {
-          replica = replicaSystemFactory.get(property.getValue());
+          replica = replicaSystemFactory.get(Monitor.getContext(), property.getValue());
         } catch (Exception e) {
           log.warn("Could not instantiate ReplicaSystem for {} with configuration {}",
               property.getKey(), property.getValue(), e);
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/rest/tables/TablesResource.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/rest/tables/TablesResource.java
index b19beae19e..bfda9d52df 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/rest/tables/TablesResource.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/rest/tables/TablesResource.java
@@ -79,7 +79,7 @@ public static TableInformationList getTables() {
         tableStats.put(Table.ID.of(te.getKey()), te.getValue());
 
     Map<String,Double> compactingByTable = TableInfoUtil.summarizeTableStats(Monitor.getMmi());
-    TableManager tableManager = TableManager.getInstance();
+    TableManager tableManager = Monitor.getContext().getTableManager();
 
     // Add tables to the list
     for (Map.Entry<String,Table.ID> entry : Tables.getNameToIdMap(Monitor.getContext())
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/rest/trace/TracesResource.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/rest/trace/TracesResource.java
index 0dfcca1ec9..4e20dd1ef8 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/rest/trace/TracesResource.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/rest/trace/TracesResource.java
@@ -56,7 +56,6 @@
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.util.Pair;
 import org.apache.accumulo.monitor.Monitor;
-import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.security.SecurityUtil;
 import org.apache.accumulo.tracer.SpanTree;
 import org.apache.accumulo.tracer.SpanTreeVisitor;
@@ -359,7 +358,7 @@ private void parseSpans(Scanner scanner, Map<String,RecentTracesInformation> sum
   private Scanner getScanner(String table, String principal, AuthenticationToken at)
       throws AccumuloException, AccumuloSecurityException {
     try {
-      Connector conn = ServerContext.getInstance().getConnector(principal, at);
+      Connector conn = Monitor.getContext().getConnector(principal, at);
       if (!conn.tableOperations().exists(table)) {
         return null;
       }
diff --git a/server/monitor/src/main/java/org/apache/accumulo/monitor/util/AccumuloMonitorAppender.java b/server/monitor/src/main/java/org/apache/accumulo/monitor/util/AccumuloMonitorAppender.java
index d925f6369e..c886b0f56d 100644
--- a/server/monitor/src/main/java/org/apache/accumulo/monitor/util/AccumuloMonitorAppender.java
+++ b/server/monitor/src/main/java/org/apache/accumulo/monitor/util/AccumuloMonitorAppender.java
@@ -136,14 +136,15 @@ public int hashCode() {
     // path and zooCache are lazily set the first time this tracker is run
     // this allows the tracker to be constructed and scheduled during log4j initialization without
     // triggering any actual logs from the Accumulo or ZooKeeper code
+    private ServerContext context = null;
     private String path = null;
     private ZooCache zooCache = null;
 
     @Override
     public MonitorLocation get() {
       // lazily set up path and zooCache (see comment in constructor)
-      if (this.zooCache == null) {
-        ServerContext context = ServerContext.getInstance();
+      if (this.context == null) {
+        this.context = ServerContext.getInstance();
         this.path = context.getZooKeeperRoot() + Constants.ZMONITOR_LOG4J_ADDR;
         this.zooCache = context.getZooCache();
       }
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
index 7dd90f8c2b..9d9b3a1bae 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServer.java
@@ -286,7 +286,7 @@
   private static final long TIME_BETWEEN_LOCATOR_CACHE_CLEARS = 60 * 60 * 1000;
 
   private final GarbageCollectionLogger gcLogger = new GarbageCollectionLogger();
-  private final TransactionWatcher watcher = new TransactionWatcher();
+  private final TransactionWatcher watcher;
   private final ZooCache masterLockCache = new ZooCache();
 
   private final TabletServerLogger logger;
@@ -357,6 +357,7 @@ public Metrics getMinCMetrics() {
 
   public TabletServer(ServerContext context) {
     this.context = context;
+    this.watcher = new TransactionWatcher(context);
     this.confFactory = context.getServerConfFactory();
     this.fs = context.getVolumeManager();
     final AccumuloConfiguration aconf = getConfiguration();
@@ -2513,9 +2514,10 @@ public void run() {
             getTableConfiguration(extent));
         TabletData data;
         if (extent.isRootTablet()) {
-          data = new TabletData(fs, ZooReaderWriter.getInstance(), getTableConfiguration(extent));
+          data = new TabletData(context, fs, ZooReaderWriter.getInstance(),
+              getTableConfiguration(extent));
         } else {
-          data = new TabletData(extent, fs, tabletsKeyValues.entrySet().iterator());
+          data = new TabletData(context, extent, fs, tabletsKeyValues.entrySet().iterator());
         }
 
         tablet = new Tablet(TabletServer.this, extent, trm, data);
@@ -2983,9 +2985,9 @@ public void run() {
     }
   }
 
-  private static Pair<Text,KeyExtent> verifyRootTablet(KeyExtent extent, TServerInstance instance)
-      throws DistributedStoreException, AccumuloException {
-    ZooTabletStateStore store = new ZooTabletStateStore();
+  private static Pair<Text,KeyExtent> verifyRootTablet(ServerContext context, KeyExtent extent,
+      TServerInstance instance) throws DistributedStoreException, AccumuloException {
+    ZooTabletStateStore store = new ZooTabletStateStore(context);
     if (!store.iterator().hasNext()) {
       throw new AccumuloException("Illegal state: location is not set in zookeeper");
     }
@@ -2999,7 +3001,7 @@ public void run() {
     }
 
     try {
-      return new Pair<>(new Text(MetadataTableUtil.getRootTabletDir()), null);
+      return new Pair<>(new Text(MetadataTableUtil.getRootTabletDir(context)), null);
     } catch (IOException e) {
       throw new AccumuloException(e);
     }
@@ -3013,7 +3015,7 @@ public void run() {
 
     log.debug("verifying extent {}", extent);
     if (extent.isRootTablet()) {
-      return verifyRootTablet(extent, instance);
+      return verifyRootTablet(context, extent, instance);
     }
     Table.ID tableToVerify = MetadataTable.ID;
     if (extent.isMeta())
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/AccumuloReplicaSystem.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/AccumuloReplicaSystem.java
index 7f90340035..1e71e8a813 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/AccumuloReplicaSystem.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/AccumuloReplicaSystem.java
@@ -130,7 +130,7 @@ public static String buildConfiguration(String instanceName, String zookeepers)
   }
 
   @Override
-  public void configure(String configuration) {
+  public void configure(ServerContext context, String configuration) {
     requireNonNull(configuration);
 
     // instance_name,zookeepers
@@ -146,8 +146,7 @@ public void configure(String configuration) {
 
     instanceName = configuration.substring(0, index);
     zookeepers = configuration.substring(index + 1);
-
-    conf = ServerContext.getInstance().getServerConfFactory().getSystemConfiguration();
+    conf = context.getConfiguration();
 
     try {
       fs = VolumeManagerImpl.get(conf);
@@ -160,8 +159,7 @@ public void configure(String configuration) {
   @Override
   public Status replicate(final Path p, final Status status, final ReplicationTarget target,
       final ReplicaSystemHelper helper) {
-    final AccumuloConfiguration localConf = ServerContext.getInstance().getServerConfFactory()
-        .getSystemConfiguration();
+    final AccumuloConfiguration localConf = conf;
 
     log.debug("Replication RPC timeout is {}",
         localConf.get(Property.REPLICATION_RPC_TIMEOUT.getKey()));
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationProcessor.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationProcessor.java
index 69bc021aea..35fdbe7610 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationProcessor.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationProcessor.java
@@ -25,7 +25,6 @@
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.Range;
@@ -34,6 +33,7 @@
 import org.apache.accumulo.core.replication.ReplicationTable;
 import org.apache.accumulo.core.replication.ReplicationTableOfflineException;
 import org.apache.accumulo.core.replication.ReplicationTarget;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.replication.DistributedWorkQueueWorkAssignerHelper;
 import org.apache.accumulo.server.replication.ReplicaSystem;
@@ -55,13 +55,13 @@
 public class ReplicationProcessor implements Processor {
   private static final Logger log = LoggerFactory.getLogger(ReplicationProcessor.class);
 
-  private final ClientContext context;
+  private final ServerContext context;
   private final AccumuloConfiguration conf;
   private final VolumeManager fs;
   private final ReplicaSystemHelper helper;
   private final ReplicaSystemFactory factory;
 
-  public ReplicationProcessor(ClientContext context, AccumuloConfiguration conf, VolumeManager fs) {
+  public ReplicationProcessor(ServerContext context, AccumuloConfiguration conf, VolumeManager fs) {
     this.context = context;
     this.conf = conf;
     this.fs = fs;
@@ -151,7 +151,7 @@ protected ReplicaSystem getReplicaSystem(ReplicationTarget target) {
     String peerType = getPeerType(target.getPeerName());
 
     // Get the peer that we're replicating to
-    return factory.get(peerType);
+    return factory.get(context, peerType);
   }
 
   protected String getPeerType(String peerName) {
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationWorker.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationWorker.java
index 2ccfe76588..123401ab6b 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationWorker.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/replication/ReplicationWorker.java
@@ -18,11 +18,11 @@
 
 import java.util.concurrent.ThreadPoolExecutor;
 
-import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.DefaultConfiguration;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.replication.ReplicationConstants;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.zookeeper.DistributedWorkQueue;
 import org.apache.zookeeper.KeeperException;
@@ -35,15 +35,15 @@
 public class ReplicationWorker implements Runnable {
   private static final Logger log = LoggerFactory.getLogger(ReplicationWorker.class);
 
-  private ClientContext context;
+  private ServerContext context;
   private AccumuloConfiguration conf;
   private VolumeManager fs;
   private ThreadPoolExecutor executor;
 
-  public ReplicationWorker(ClientContext clientCtx, VolumeManager fs) {
-    this.context = clientCtx;
+  public ReplicationWorker(ServerContext context, VolumeManager fs) {
+    this.context = context;
     this.fs = fs;
-    this.conf = clientCtx.getConfiguration();
+    this.conf = context.getConfiguration();
   }
 
   public void setExecutor(ThreadPoolExecutor executor) {
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/BulkImportCacheCleaner.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/BulkImportCacheCleaner.java
index 9f8de0c8da..68444cdd66 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/BulkImportCacheCleaner.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/BulkImportCacheCleaner.java
@@ -44,8 +44,8 @@ public void run() {
     }
     try {
       // get the current transactions from ZooKeeper
-      final Set<Long> allTransactionsAlive = ZooArbitrator
-          .allTransactionsAlive(Constants.BULK_ARBITRATOR_TYPE);
+      final Set<Long> allTransactionsAlive = ZooArbitrator.allTransactionsAlive(server.getContext(),
+          Constants.BULK_ARBITRATOR_TYPE);
       // remove any that are still alive
       tids.removeAll(allTransactionsAlive);
       // cleanup any memory of these transactions
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java
index 5d1f4fd410..98ce1f6947 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java
@@ -274,8 +274,8 @@ public int getLogId() {
   FileRef getNextMapFilename(String prefix) throws IOException {
     String extension = FileOperations.getNewFileExtension(tableConfiguration);
     checkTabletDir();
-    return new FileRef(location + "/" + prefix + UniqueNameAllocator.getInstance().getNextName()
-        + "." + extension);
+    return new FileRef(
+        location + "/" + prefix + context.getUniqueNameAllocator().getNextName() + "." + extension);
   }
 
   private void checkTabletDir() throws IOException {
@@ -2298,7 +2298,7 @@ public boolean isMajorCompactionQueued() {
       KeyExtent low = new KeyExtent(extent.getTableId(), midRow, extent.getPrevEndRow());
       KeyExtent high = new KeyExtent(extent.getTableId(), extent.getEndRow(), midRow);
 
-      String lowDirectory = createTabletDirectory(getTabletServer().getFileSystem(),
+      String lowDirectory = createTabletDirectory(context, getTabletServer().getFileSystem(),
           extent.getTableId(), midRow);
 
       // write new tablet information to MetadataTable
@@ -2842,10 +2842,11 @@ public AtomicLong getScannedCounter() {
     return scannedCount;
   }
 
-  private static String createTabletDirectory(VolumeManager fs, Table.ID tableId, Text endRow) {
+  private static String createTabletDirectory(ServerContext context, VolumeManager fs,
+      Table.ID tableId, Text endRow) {
     String lowDirectory;
 
-    UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
+    UniqueNameAllocator namer = context.getUniqueNameAllocator();
     VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(tableId);
     String volume = fs.choose(chooserEnv, ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR
         + Path.SEPARATOR;
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/TabletData.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/TabletData.java
index 7774875a6d..f7a01f83d6 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/TabletData.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/TabletData.java
@@ -49,6 +49,7 @@
 import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily;
 import org.apache.accumulo.core.tabletserver.log.LogEntry;
 import org.apache.accumulo.fate.zookeeper.ZooReader;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.fs.FileRef;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeUtil;
@@ -68,6 +69,7 @@
 public class TabletData {
   private static Logger log = LoggerFactory.getLogger(TabletData.class);
 
+  private ServerContext context;
   private String time = null;
   private SortedMap<FileRef,DataFileValue> dataFiles = new TreeMap<>();
   private List<LogEntry> logEntries = new ArrayList<>();
@@ -80,7 +82,9 @@
   private String directory = null;
 
   // Read tablet data from metadata tables
-  public TabletData(KeyExtent extent, VolumeManager fs, Iterator<Entry<Key,Value>> entries) {
+  public TabletData(ServerContext context, KeyExtent extent, VolumeManager fs,
+      Iterator<Entry<Key,Value>> entries) {
+    this.context = context;
     final Text family = new Text();
     Text rowName = extent.getMetadataEntry();
     while (entries.hasNext()) {
@@ -132,9 +136,11 @@ public TabletData(KeyExtent extent, VolumeManager fs, Iterator<Entry<Key,Value>>
   }
 
   // Read basic root table metadata from zookeeper
-  public TabletData(VolumeManager fs, ZooReader rdr, AccumuloConfiguration conf)
-      throws IOException {
-    directory = VolumeUtil.switchRootTableVolume(MetadataTableUtil.getRootTabletDir());
+  public TabletData(ServerContext context, VolumeManager fs, ZooReader rdr,
+      AccumuloConfiguration conf) throws IOException {
+    this.context = context;
+    directory = VolumeUtil.switchRootTableVolume(context,
+        MetadataTableUtil.getRootTabletDir(context));
 
     Path location = new Path(directory);
 
@@ -166,7 +172,7 @@ public TabletData(VolumeManager fs, ZooReader rdr, AccumuloConfiguration conf)
     }
 
     try {
-      logEntries = MetadataTableUtil.getLogEntries(null, RootTable.EXTENT);
+      logEntries = MetadataTableUtil.getLogEntries(context, RootTable.EXTENT);
     } catch (Exception ex) {
       throw new RuntimeException("Unable to read tablet log entries", ex);
     }
diff --git a/server/tserver/src/test/java/org/apache/accumulo/tserver/replication/ReplicationProcessorTest.java b/server/tserver/src/test/java/org/apache/accumulo/tserver/replication/ReplicationProcessorTest.java
index e1b38e227e..e1adbd9d1c 100644
--- a/server/tserver/src/test/java/org/apache/accumulo/tserver/replication/ReplicationProcessorTest.java
+++ b/server/tserver/src/test/java/org/apache/accumulo/tserver/replication/ReplicationProcessorTest.java
@@ -21,11 +21,11 @@
 import java.util.HashMap;
 import java.util.Map;
 
-import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.core.client.impl.Table;
 import org.apache.accumulo.core.conf.ConfigurationCopy;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.replication.ReplicationTarget;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.replication.DistributedWorkQueueWorkAssignerHelper;
 import org.apache.accumulo.server.replication.ReplicaSystem;
@@ -41,7 +41,7 @@
   @Test
   public void peerTypeExtractionFromConfiguration() {
     VolumeManager fs = EasyMock.createMock(VolumeManager.class);
-    ClientContext context = EasyMock.createMock(ClientContext.class);
+    ServerContext context = EasyMock.createMock(ServerContext.class);
 
     Map<String,String> data = new HashMap<>();
 
@@ -58,7 +58,7 @@ public void peerTypeExtractionFromConfiguration() {
   @Test(expected = IllegalArgumentException.class)
   public void noPeerConfigurationThrowsAnException() {
     VolumeManager fs = EasyMock.createMock(VolumeManager.class);
-    ClientContext context = EasyMock.createMock(ClientContext.class);
+    ServerContext context = EasyMock.createMock(ServerContext.class);
 
     Map<String,String> data = new HashMap<>();
     ConfigurationCopy conf = new ConfigurationCopy(data);
diff --git a/shell/src/main/java/org/apache/accumulo/shell/commands/ListBulkCommand.java b/shell/src/main/java/org/apache/accumulo/shell/commands/ListBulkCommand.java
index 3a1f16c015..e62b078815 100644
--- a/shell/src/main/java/org/apache/accumulo/shell/commands/ListBulkCommand.java
+++ b/shell/src/main/java/org/apache/accumulo/shell/commands/ListBulkCommand.java
@@ -52,7 +52,7 @@ public int execute(final String fullCommand, final CommandLine cl, final Shell s
 
     MasterMonitorInfo stats;
     MasterClientService.Iface client = null;
-    ServerContext context = new ServerContext(shellState.getContext());
+    ServerContext context = ServerContext.getInstance(shellState.getContext().getClientInfo());
     while (true) {
       try {
         client = MasterClient.getConnectionWithRetry(context);
diff --git a/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java b/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java
index 67e6ea96f3..d44b372a08 100644
--- a/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java
+++ b/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java
@@ -41,6 +41,7 @@
 import org.apache.accumulo.harness.conf.AccumuloClusterPropertyConfiguration;
 import org.apache.accumulo.harness.conf.StandaloneAccumuloClusterConfiguration;
 import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.test.categories.StandaloneCapableClusterTests;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -276,6 +277,10 @@ public static ClientContext getClientContext() {
     return new ClientContext(getClientInfo());
   }
 
+  public static ServerContext getServerContext() {
+    return getCluster().getServerContext();
+  }
+
   public static boolean saslEnabled() {
     if (initialized) {
       return getClientInfo().saslEnabled();
diff --git a/test/src/main/java/org/apache/accumulo/test/MetaConstraintRetryIT.java b/test/src/main/java/org/apache/accumulo/test/MetaConstraintRetryIT.java
index 0dc53ef2c1..74ad75c0b9 100644
--- a/test/src/main/java/org/apache/accumulo/test/MetaConstraintRetryIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/MetaConstraintRetryIT.java
@@ -17,7 +17,6 @@
 
 package org.apache.accumulo.test;
 
-import org.apache.accumulo.core.client.impl.ClientContext;
 import org.apache.accumulo.core.client.impl.Table;
 import org.apache.accumulo.core.client.impl.Writer;
 import org.apache.accumulo.core.data.Mutation;
@@ -26,6 +25,7 @@
 import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.core.tabletserver.thrift.ConstraintViolationException;
 import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.junit.Test;
 
@@ -43,7 +43,7 @@ public void test() throws Exception {
     getConnector().securityOperations().grantTablePermission(getAdminPrincipal(),
         MetadataTable.NAME, TablePermission.WRITE);
 
-    ClientContext context = getClientContext();
+    ServerContext context = getServerContext();
     Writer w = new Writer(context, MetadataTable.ID);
     KeyExtent extent = new KeyExtent(Table.ID.of("5"), null, null);
 
@@ -52,7 +52,7 @@ public void test() throws Exception {
     m.put("badcolfam", "badcolqual", "3");
 
     try {
-      MetadataTableUtil.update(w, null, m);
+      MetadataTableUtil.update(context, w, null, m);
     } catch (RuntimeException e) {
       if (e.getCause().getClass().equals(ConstraintViolationException.class)) {
         throw (ConstraintViolationException) e.getCause();
diff --git a/test/src/main/java/org/apache/accumulo/test/QueryMetadataTable.java b/test/src/main/java/org/apache/accumulo/test/QueryMetadataTable.java
index 16a9d9cb6f..d21eb7346b 100644
--- a/test/src/main/java/org/apache/accumulo/test/QueryMetadataTable.java
+++ b/test/src/main/java/org/apache/accumulo/test/QueryMetadataTable.java
@@ -30,7 +30,6 @@
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
@@ -38,7 +37,6 @@
 import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.hadoop.io.Text;
 import org.slf4j.Logger;
@@ -49,15 +47,15 @@
 public class QueryMetadataTable {
   private static final Logger log = LoggerFactory.getLogger(QueryMetadataTable.class);
 
-  private static String principal;
-  private static AuthenticationToken token;
-
   static String location;
 
   static class MDTQuery implements Runnable {
+
+    private Connector conn;
     private Text row;
 
-    MDTQuery(Text row) {
+    MDTQuery(Connector conn, Text row) {
+      this.conn = conn;
       this.row = row;
     }
 
@@ -67,8 +65,7 @@ public void run() {
       try {
         KeyExtent extent = new KeyExtent(row, (Text) null);
 
-        Connector connector = ServerContext.getInstance().getConnector(principal, token);
-        mdScanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+        mdScanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
         Text row = extent.getMetadataEntry();
 
         mdScanner.setRange(new Range(row));
@@ -81,12 +78,6 @@ public void run() {
       } catch (TableNotFoundException e) {
         log.error("Table '" + MetadataTable.NAME + "' not found.", e);
         throw new RuntimeException(e);
-      } catch (AccumuloException e) {
-        log.error("AccumuloException encountered.", e);
-        throw new RuntimeException(e);
-      } catch (AccumuloSecurityException e) {
-        log.error("AccumuloSecurityException encountered.", e);
-        throw new RuntimeException(e);
       } finally {
         if (mdScanner != null) {
           mdScanner.close();
@@ -146,7 +137,7 @@ public static void main(String[] args)
 
     for (int i = 0; i < opts.numQueries; i++) {
       int index = r.nextInt(rows.size());
-      MDTQuery mdtq = new MDTQuery(rows.get(index));
+      MDTQuery mdtq = new MDTQuery(connector, rows.get(index));
       tp.submit(mdtq);
     }
 
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java b/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
index 225f3d84a2..1898e983e6 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
@@ -77,8 +77,7 @@ private KeyExtent nke(String table, String endRow, String prevEndRow) {
         prevEndRow == null ? null : new Text(prevEndRow));
   }
 
-  private void run() throws Exception {
-    ServerContext c = ServerContext.getInstance();
+  private void run(ServerContext c) throws Exception {
     String zPath = c.getZooKeeperRoot() + "/testLock";
     IZooReaderWriter zoo = ZooReaderWriter.getInstance();
     zoo.putPersistentData(zPath, new byte[0], NodeExistsPolicy.OVERWRITE);
@@ -150,7 +149,7 @@ private void runSplitRecoveryTest(ServerContext context, int failPoint, String m
         splitMapFiles = mapFiles;
       }
       int tid = 0;
-      TransactionWatcher.ZooArbitrator.start(Constants.BULK_ARBITRATOR_TYPE, tid);
+      TransactionWatcher.ZooArbitrator.start(context, Constants.BULK_ARBITRATOR_TYPE, tid);
       MetadataTableUtil.updateTabletDataFile(tid, extent, mapFiles, "L0", context, zl);
     }
 
@@ -286,7 +285,7 @@ private void verifySame(SortedMap<FileRef,DataFileValue> datafileSizes,
   }
 
   public static void main(String[] args) throws Exception {
-    new SplitRecoveryIT().run();
+    new SplitRecoveryIT().run(ServerContext.getInstance());
   }
 
   @Test
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ZombieTServer.java b/test/src/main/java/org/apache/accumulo/test/functional/ZombieTServer.java
index c629321c98..63eea7fe77 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/ZombieTServer.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ZombieTServer.java
@@ -100,7 +100,7 @@ public static void main(String[] args) throws Exception {
     Random random = new Random(System.currentTimeMillis() % 1000);
     int port = random.nextInt(30000) + 2000;
     ServerContext context = ServerContext.getInstance();
-    TransactionWatcher watcher = new TransactionWatcher();
+    TransactionWatcher watcher = new TransactionWatcher(context);
     final ThriftClientHandler tch = new ThriftClientHandler(context, watcher);
     Processor<Iface> processor = new Processor<>(tch);
     ServerAddress serverPort = TServerUtils.startTServer(context.getConfiguration(),
diff --git a/test/src/main/java/org/apache/accumulo/test/performance/NullTserver.java b/test/src/main/java/org/apache/accumulo/test/performance/NullTserver.java
index d8ba8696d0..7bd4c37437 100644
--- a/test/src/main/java/org/apache/accumulo/test/performance/NullTserver.java
+++ b/test/src/main/java/org/apache/accumulo/test/performance/NullTserver.java
@@ -309,8 +309,8 @@ public static void main(String[] args) throws Exception {
     // modify metadata
     int zkTimeOut = (int) DefaultConfiguration.getInstance()
         .getTimeInMillis(Property.INSTANCE_ZK_TIMEOUT);
-    ServerContext context = new ServerContext(opts.iname, opts.keepers, zkTimeOut);
-    TransactionWatcher watcher = new TransactionWatcher();
+    ServerContext context = ServerContext.getInstance(opts.iname, opts.keepers, zkTimeOut);
+    TransactionWatcher watcher = new TransactionWatcher(context);
     ThriftClientHandler tch = new ThriftClientHandler(context, watcher);
     Processor<Iface> processor = new Processor<>(tch);
     TServerUtils.startTServer(context.getConfiguration(), ThriftServerType.CUSTOM_HS_HA, processor,
diff --git a/test/src/main/java/org/apache/accumulo/test/performance/scan/CollectTabletStats.java b/test/src/main/java/org/apache/accumulo/test/performance/scan/CollectTabletStats.java
index dd8a6d0724..ea09a1512b 100644
--- a/test/src/main/java/org/apache/accumulo/test/performance/scan/CollectTabletStats.java
+++ b/test/src/main/java/org/apache/accumulo/test/performance/scan/CollectTabletStats.java
@@ -106,8 +106,8 @@ public static void main(String[] args) throws Exception {
       columnsTmp = opts.columns.split(",");
     final String columns[] = columnsTmp;
 
-    final VolumeManager fs = VolumeManagerImpl.get();
     ServerContext context = opts.getServerContext();
+    final VolumeManager fs = context.getVolumeManager();
     ServerConfigurationFactory sconf = context.getServerConfFactory();
 
     Table.ID tableId = Tables.getTableId(context, opts.getTableName());
@@ -391,7 +391,7 @@ private static void runTest(String desc, List<Test> tests, int numThreads,
     return tabletsToTest;
   }
 
-  private static List<FileRef> getTabletFiles(ClientContext context, KeyExtent ke)
+  private static List<FileRef> getTabletFiles(ServerContext context, KeyExtent ke)
       throws IOException {
     return new ArrayList<>(MetadataTableUtil.getDataFileSizes(ke, context).keySet());
   }
diff --git a/test/src/main/java/org/apache/accumulo/test/replication/MockReplicaSystem.java b/test/src/main/java/org/apache/accumulo/test/replication/MockReplicaSystem.java
index 1bc9f207f7..dbdab127fb 100644
--- a/test/src/main/java/org/apache/accumulo/test/replication/MockReplicaSystem.java
+++ b/test/src/main/java/org/apache/accumulo/test/replication/MockReplicaSystem.java
@@ -21,6 +21,7 @@
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.protobuf.ProtobufUtil;
 import org.apache.accumulo.core.replication.ReplicationTarget;
+import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.replication.ReplicaSystem;
 import org.apache.accumulo.server.replication.ReplicaSystemHelper;
 import org.apache.accumulo.server.replication.proto.Replication.Status;
@@ -85,7 +86,7 @@ public Status replicate(Path p, Status status, ReplicationTarget target,
   }
 
   @Override
-  public void configure(String configuration) {
+  public void configure(ServerContext context, String configuration) {
     if (StringUtils.isBlank(configuration)) {
       log.debug("No configuration, using default sleep of {}", sleep);
       return;


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services