You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ed...@apache.org on 2023/02/08 21:32:54 UTC

[accumulo] branch main updated: remove upgrade code for versions before 2.1 (#3160)

This is an automated email from the ASF dual-hosted git repository.

edcoleman pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/main by this push:
     new 94f4009429 remove upgrade code for versions before 2.1 (#3160)
94f4009429 is described below

commit 94f40094290c8a79cd42071dccc51e5a63ce1b2c
Author: EdColeman <de...@etcoleman.com>
AuthorDate: Wed Feb 8 16:32:48 2023 -0500

    remove upgrade code for versions before 2.1 (#3160)
    
    * Added new data version REMOVE_DEPRECATIONS_FOR_VERSION_3 (11)
    * Deletes upgraders and associated code for data versions before ROOT_TABLET_METATA_CHANGES (10)
    * Improves upgrade failure message on invalid data version by providing Accumulo release hint.
    
    Co-authored-by: Christopher Tubbs <ct...@apache.org>
---
 .../accumulo/server/AccumuloDataVersion.java       |  50 +-
 .../org/apache/accumulo/server/ServerContext.java  |   4 +-
 .../server/conf/store/impl/ZooPropStore.java       |   7 -
 .../server/conf/util/ConfigPropertyUpgrader.java   | 144 ----
 .../server/conf/util/ConfigTransformer.java        | 443 -----------
 .../accumulo/server/conf/util/TransformToken.java  | 207 -----
 .../apache/accumulo/server/ServerContextTest.java  |   2 +-
 .../conf/util/ConfigPropertyUpgraderTest.java      | 102 ---
 .../java/org/apache/accumulo/manager/Manager.java  |   2 +
 .../manager/upgrade/PreUpgradeValidation.java      | 120 +++
 .../manager/upgrade/UpgradeCoordinator.java        |  15 +-
 .../accumulo/manager/upgrade/Upgrader8to9.java     |  44 --
 .../accumulo/manager/upgrade/Upgrader9to10.java    | 872 ---------------------
 .../manager/upgrade/RootFilesUpgradeTest.java      | 203 -----
 .../manager/upgrade/Upgrader9to10Test.java         | 405 ----------
 .../test/conf/util/ConfigTransformerIT.java        | 187 -----
 .../accumulo/test/conf/util/LegacyPropData.java    | 386 ---------
 .../accumulo/test/conf/util/TransformTokenIT.java  | 157 ----
 .../apache/accumulo/test/start/KeywordStartIT.java |   2 -
 .../test/upgrade/ConfigPropertyUpgraderIT.java     | 175 -----
 .../test/upgrade/GCUpgrade9to10TestIT.java         | 252 ------
 21 files changed, 166 insertions(+), 3613 deletions(-)

diff --git a/server/base/src/main/java/org/apache/accumulo/server/AccumuloDataVersion.java b/server/base/src/main/java/org/apache/accumulo/server/AccumuloDataVersion.java
index b80ac9039d..5b44913f11 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/AccumuloDataVersion.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/AccumuloDataVersion.java
@@ -22,16 +22,16 @@ import java.util.Set;
 
 /**
  * Class representing the version of data stored in Accumulo.
- *
+ * <p>
  * This version is separate but related to the file specific version in
  * {@link org.apache.accumulo.core.file.rfile.RFile}. A version change to RFile will reflect a
  * version change to the AccumuloDataVersion. But a version change to the AccumuloDataVersion may
  * not affect the version number in RFile. For example, changes made to other parts of Accumulo that
  * affects how data is stored, like the metadata table, would change the AccumuloDataVersion number
  * here but not in RFile.
- *
+ * <p>
  * This number is stored in HDFS under {@link org.apache.accumulo.core.Constants#VERSION_DIR}.
- *
+ * <p>
  * This class is used for checking the version during server startup and upgrades.
  */
 public class AccumuloDataVersion {
@@ -47,22 +47,12 @@ public class AccumuloDataVersion {
    */
   public static final int ROOT_TABLET_META_CHANGES = 10;
 
-  /**
-   * version (9) reflects changes to crypto that resulted in RFiles and WALs being serialized
-   * differently in version 2.0.0. Also RFiles in 2.0.0 may have summary data.
-   */
-  public static final int CRYPTO_CHANGES = 9;
-
-  /**
-   * version (8) reflects changes to RFile index (ACCUMULO-1124) AND the change to WAL tracking in
-   * ZK in version 1.8.0
-   */
-  public static final int SHORTEN_RFILE_KEYS = 8;
-
   /**
    * Historic data versions
    *
    * <ul>
+   * <li>version (9) RFiles and wal crypto serialization changes. RFile summary data in 2.0.0</li>
+   * <li>version (8) RFile index (ACCUMULO-1124) and wal tracking in ZK in 1.8.0</li>
    * <li>version (7) also reflects the addition of a replication table in 1.7.0
    * <li>version (6) reflects the addition of a separate root table (ACCUMULO-1481) in 1.6.0 -
    * <li>version (5) moves delete file markers for the metadata table into the root tablet
@@ -81,6 +71,32 @@ public class AccumuloDataVersion {
     return CURRENT_VERSION;
   }
 
-  public static final Set<Integer> CAN_RUN =
-      Set.of(SHORTEN_RFILE_KEYS, CRYPTO_CHANGES, ROOT_TABLET_META_CHANGES, CURRENT_VERSION);
+  public static final Set<Integer> CAN_RUN = Set.of(ROOT_TABLET_META_CHANGES, CURRENT_VERSION);
+
+  /**
+   * Get the stored, current working version.
+   *
+   * @param context the server context
+   * @return the stored data version
+   */
+  public static int getCurrentVersion(ServerContext context) {
+    int cv =
+        context.getServerDirs().getAccumuloPersistentVersion(context.getVolumeManager().getFirst());
+    ServerContext.ensureDataVersionCompatible(cv);
+    return cv;
+  }
+
+  public static String oldestUpgradeableVersionName() {
+    return dataVersionToReleaseName(CAN_RUN.stream().mapToInt(x -> x).min().orElseThrow());
+  }
+
+  private static String dataVersionToReleaseName(final int version) {
+    switch (version) {
+      case ROOT_TABLET_META_CHANGES:
+        return "2.1.0";
+      case REMOVE_DEPRECATIONS_FOR_VERSION_3:
+        return "3.0.0";
+    }
+    throw new IllegalArgumentException("Unsupported data version " + version);
+  }
 }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/ServerContext.java b/server/base/src/main/java/org/apache/accumulo/server/ServerContext.java
index 03ab556923..e2286115c2 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/ServerContext.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/ServerContext.java
@@ -300,7 +300,9 @@ public class ServerContext extends ClientContext {
   public static void ensureDataVersionCompatible(int dataVersion) {
     if (!AccumuloDataVersion.CAN_RUN.contains(dataVersion)) {
       throw new IllegalStateException("This version of accumulo (" + Constants.VERSION
-          + ") is not compatible with files stored using data version " + dataVersion);
+          + ") is not compatible with files stored using data version " + dataVersion
+          + ". Please upgrade from " + AccumuloDataVersion.oldestUpgradeableVersionName()
+          + " or later.");
     }
   }
 
diff --git a/server/base/src/main/java/org/apache/accumulo/server/conf/store/impl/ZooPropStore.java b/server/base/src/main/java/org/apache/accumulo/server/conf/store/impl/ZooPropStore.java
index aed65fdbbe..8151f36f1e 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/conf/store/impl/ZooPropStore.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/conf/store/impl/ZooPropStore.java
@@ -38,8 +38,6 @@ import org.apache.accumulo.server.conf.store.PropCache;
 import org.apache.accumulo.server.conf.store.PropChangeListener;
 import org.apache.accumulo.server.conf.store.PropStore;
 import org.apache.accumulo.server.conf.store.PropStoreKey;
-import org.apache.accumulo.server.conf.store.SystemPropKey;
-import org.apache.accumulo.server.conf.util.ConfigTransformer;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.data.Stat;
 import org.checkerframework.checker.nullness.qual.NonNull;
@@ -180,11 +178,6 @@ public class ZooPropStore implements PropStore, PropChangeListener {
       return props;
     }
 
-    if (propStoreKey instanceof SystemPropKey) {
-      return new ConfigTransformer(zrw, codec, propStoreWatcher).transform(propStoreKey,
-          propStoreKey.getPath(), false);
-    }
-
     throw new IllegalStateException(
         "Invalid request for " + propStoreKey + ", the property node does not exist");
   }
diff --git a/server/base/src/main/java/org/apache/accumulo/server/conf/util/ConfigPropertyUpgrader.java b/server/base/src/main/java/org/apache/accumulo/server/conf/util/ConfigPropertyUpgrader.java
deleted file mode 100644
index b6400e5667..0000000000
--- a/server/base/src/main/java/org/apache/accumulo/server/conf/util/ConfigPropertyUpgrader.java
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.accumulo.server.conf.util;
-
-import static org.apache.accumulo.core.Constants.ZCONFIG;
-
-import java.util.Set;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.data.InstanceId;
-import org.apache.accumulo.core.data.NamespaceId;
-import org.apache.accumulo.core.data.TableId;
-import org.apache.accumulo.core.fate.zookeeper.ZooReaderWriter;
-import org.apache.accumulo.core.fate.zookeeper.ZooUtil;
-import org.apache.accumulo.server.ServerContext;
-import org.apache.accumulo.server.cli.ServerUtilOpts;
-import org.apache.accumulo.server.conf.codec.VersionedPropCodec;
-import org.apache.accumulo.server.conf.store.NamespacePropKey;
-import org.apache.accumulo.server.conf.store.SystemPropKey;
-import org.apache.accumulo.server.conf.store.TablePropKey;
-import org.apache.accumulo.server.conf.store.impl.PropStoreWatcher;
-import org.apache.accumulo.server.conf.store.impl.ReadyMonitor;
-import org.apache.accumulo.start.spi.KeywordExecutable;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.auto.service.AutoService;
-import com.google.common.annotations.VisibleForTesting;
-
-@AutoService(KeywordExecutable.class)
-public class ConfigPropertyUpgrader implements KeywordExecutable {
-
-  private static final Logger log = LoggerFactory.getLogger(ConfigPropertyUpgrader.class);
-
-  private final static VersionedPropCodec codec = VersionedPropCodec.getDefault();
-
-  public ConfigPropertyUpgrader() {}
-
-  public static void main(String[] args) throws Exception {
-    new ConfigPropertyUpgrader().execute(args);
-  }
-
-  @Override
-  public String keyword() {
-    return "config-upgrade";
-  }
-
-  @Override
-  public String description() {
-    return "converts properties store in ZooKeeper to 2.1 format";
-  }
-
-  @Override
-  public void execute(final String[] args) throws Exception {
-    ServerUtilOpts opts = new ServerUtilOpts();
-    opts.parseArgs(ConfigPropertyUpgrader.class.getName(), args);
-
-    ServerContext context = opts.getServerContext();
-
-    doUpgrade(context.getInstanceID(), context.getZooReaderWriter());
-  }
-
-  public void doUpgrade(final InstanceId instanceId, final ZooReaderWriter zrw) {
-
-    ReadyMonitor readyMonitor = new ReadyMonitor(ConfigPropertyUpgrader.class.getSimpleName(),
-        zrw.getSessionTimeout() * 2L);
-    PropStoreWatcher nullWatcher = new PropStoreWatcher(readyMonitor);
-
-    ConfigTransformer transformer = new ConfigTransformer(zrw, codec, nullWatcher);
-
-    upgradeSysProps(instanceId, transformer);
-    upgradeNamespaceProps(instanceId, zrw, transformer);
-    upgradeTableProps(instanceId, zrw, transformer);
-  }
-
-  @VisibleForTesting
-  void upgradeSysProps(final InstanceId instanceId, final ConfigTransformer transformer) {
-    log.info("Upgrade system config properties for {}", instanceId);
-    String legacyPath = ZooUtil.getRoot(instanceId) + ZCONFIG;
-    transformer.transform(SystemPropKey.of(instanceId), legacyPath, false);
-  }
-
-  @VisibleForTesting
-  void upgradeNamespaceProps(final InstanceId instanceId, final ZooReaderWriter zrw,
-      final ConfigTransformer transformer) {
-    String zkPathNamespaceBase = ZooUtil.getRoot(instanceId) + Constants.ZNAMESPACES;
-    try {
-      // sort is cosmetic - only improves readability and consistency in logs
-      Set<String> namespaces = new TreeSet<>(zrw.getChildren(zkPathNamespaceBase));
-      for (String namespace : namespaces) {
-        String legacyPath = zkPathNamespaceBase + "/" + namespace + Constants.ZCONF_LEGACY;
-        log.info("Upgrading namespace {} base path: {}", namespace, legacyPath);
-        transformer.transform(NamespacePropKey.of(instanceId, NamespaceId.of(namespace)),
-            legacyPath, true);
-      }
-    } catch (KeeperException ex) {
-      throw new IllegalStateException(
-          "Failed to read namespaces from ZooKeeper for path: " + zkPathNamespaceBase, ex);
-    } catch (InterruptedException ex) {
-      throw new IllegalStateException(
-          "Interrupted reading namespaces from ZooKeeper for path: " + zkPathNamespaceBase, ex);
-    }
-  }
-
-  @VisibleForTesting
-  void upgradeTableProps(final InstanceId instanceId, final ZooReaderWriter zrw,
-      ConfigTransformer transformer) {
-    String zkPathTableBase = ZooUtil.getRoot(instanceId) + Constants.ZTABLES;
-    try {
-      // sort is cosmetic - only improves readability and consistency in logs
-      Set<String> tables = new TreeSet<>(zrw.getChildren(zkPathTableBase));
-      for (String table : tables) {
-        String legacyPath = zkPathTableBase + "/" + table + Constants.ZCONF_LEGACY;
-        log.info("Upgrading table {} base path: {}", table, legacyPath);
-        transformer.transform(TablePropKey.of(instanceId, TableId.of(table)), legacyPath, true);
-      }
-    } catch (KeeperException ex) {
-      throw new IllegalStateException(
-          "Failed to read tables from ZooKeeper for path: " + zkPathTableBase, ex);
-    } catch (InterruptedException ex) {
-      throw new IllegalStateException(
-          "Interrupted reading tables from ZooKeeper for path: " + zkPathTableBase, ex);
-    }
-  }
-
-}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/conf/util/ConfigTransformer.java b/server/base/src/main/java/org/apache/accumulo/server/conf/util/ConfigTransformer.java
deleted file mode 100644
index 53195c2db1..0000000000
--- a/server/base/src/main/java/org/apache/accumulo/server/conf/util/ConfigTransformer.java
+++ /dev/null
@@ -1,443 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.accumulo.server.conf.util;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static java.util.concurrent.TimeUnit.MILLISECONDS;
-import static java.util.concurrent.TimeUnit.MINUTES;
-import static java.util.concurrent.TimeUnit.SECONDS;
-
-import java.io.IOException;
-import java.time.Duration;
-import java.time.Instant;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Set;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.conf.DeprecatedPropertyUtil;
-import org.apache.accumulo.core.fate.zookeeper.ZooReaderWriter;
-import org.apache.accumulo.core.fate.zookeeper.ZooUtil;
-import org.apache.accumulo.core.util.DurationFormat;
-import org.apache.accumulo.core.util.Pair;
-import org.apache.accumulo.core.util.Retry;
-import org.apache.accumulo.server.conf.codec.VersionedPropCodec;
-import org.apache.accumulo.server.conf.codec.VersionedProperties;
-import org.apache.accumulo.server.conf.store.PropStoreKey;
-import org.apache.accumulo.server.conf.store.SystemPropKey;
-import org.apache.accumulo.server.conf.store.impl.PropStoreWatcher;
-import org.apache.accumulo.server.conf.store.impl.ZooPropStore;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.data.Stat;
-import org.checkerframework.checker.nullness.qual.NonNull;
-import org.checkerframework.checker.nullness.qual.Nullable;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.annotations.VisibleForTesting;
-
-/**
- * Read legacy properties (pre 2.1) from ZooKeeper and transform them into the single node format.
- * The encoded properties are stored in ZooKeeper and then the legacy property ZooKeeper nodes are
- * deleted.
- */
-public class ConfigTransformer {
-
-  private static final Logger log = LoggerFactory.getLogger(ConfigTransformer.class);
-
-  private final ZooReaderWriter zrw;
-  private final VersionedPropCodec codec;
-  private final PropStoreWatcher propStoreWatcher;
-  private final Retry retry;
-
-  /**
-   * Instantiate a transformer instance.
-   *
-   * @param zrw a ZooReaderWriter
-   * @param codec the codec used to encode to the single-node format.
-   * @param propStoreWatcher the watcher registered to receive future notifications of changes to
-   *        the encoded property node.
-   */
-  public ConfigTransformer(final ZooReaderWriter zrw, VersionedPropCodec codec,
-      final PropStoreWatcher propStoreWatcher) {
-    this.zrw = zrw;
-    this.codec = codec;
-    this.propStoreWatcher = propStoreWatcher;
-
-    // default - allow for a conservative max delay of about a minute
-    retry =
-        Retry.builder().maxRetries(15).retryAfter(250, MILLISECONDS).incrementBy(500, MILLISECONDS)
-            .maxWait(5, SECONDS).backOffFactor(1.75).logInterval(3, MINUTES).createRetry();
-
-  }
-
-  public ConfigTransformer(final ZooReaderWriter zrw, VersionedPropCodec codec,
-      final PropStoreWatcher propStoreWatcher, final Retry retry) {
-    this.zrw = zrw;
-    this.codec = codec;
-    this.propStoreWatcher = propStoreWatcher;
-    this.retry = retry;
-  }
-
-  /**
-   * Transform the properties for the provided prop cache key.
-   *
-   * @return the encoded properties.
-   */
-  public VersionedProperties transform(final PropStoreKey<?> propStoreKey, final String legacyPath,
-      final boolean deleteLegacyNode) {
-    VersionedProperties exists = checkNeedsTransform(propStoreKey);
-    if (exists != null) {
-      return exists;
-    }
-    TransformToken token = TransformToken.createToken(legacyPath, zrw);
-    return transform(propStoreKey, token, legacyPath, deleteLegacyNode);
-  }
-
-  // Allow external (mocked) TransformToken to be used
-  @VisibleForTesting
-  VersionedProperties transform(final PropStoreKey<?> propStoreKey, final TransformToken token,
-      final String legacyPath, final boolean deleteLegacyNode) {
-    log.trace("checking for legacy property upgrade transform for {}", propStoreKey);
-    VersionedProperties results;
-    Instant start = Instant.now();
-    try {
-
-      // check for node - just return if it exists.
-      results = checkNeedsTransform(propStoreKey);
-      if (results != null) {
-        return results;
-      }
-
-      while (!token.haveTokenOwnership()) {
-        try {
-          retry.useRetry();
-          retry.waitForNextAttempt(log, "transform property at " + propStoreKey.getPath());
-          // look and return node if created while trying to token.
-          log.trace("own the token - look for existing encoded node at: {}",
-              propStoreKey.getPath());
-          results = ZooPropStore.readFromZk(propStoreKey, propStoreWatcher, zrw);
-          if (results != null) {
-            log.trace(
-                "Found existing node with properties after getting token at {}. skipping legacy prop conversion - version: {}, timestamp: {}",
-                propStoreKey, results.getDataVersion(), results.getTimestamp());
-            return results;
-          }
-          // still does not exist - try again.
-          token.getTokenOwnership();
-        } catch (InterruptedException ex) {
-          Thread.currentThread().interrupt();
-          throw new IllegalStateException("Failed to hold transform token for " + propStoreKey, ex);
-        } catch (IllegalStateException ex) {
-          throw new IllegalStateException("Failed to hold transform token for " + propStoreKey, ex);
-        }
-      }
-
-      Set<LegacyPropNode> upgradeNodes = readLegacyProps(legacyPath);
-      if (upgradeNodes.size() == 0) {
-        log.trace("No existing legacy props {}, skipping conversion, writing default prop node",
-            propStoreKey);
-        return writeNode(propStoreKey, Map.of());
-      }
-
-      upgradeNodes = convertDeprecatedProps(propStoreKey, upgradeNodes);
-
-      results = writeConverted(propStoreKey, upgradeNodes);
-
-      if (results == null) {
-        throw new IllegalStateException("Could not create properties for " + propStoreKey);
-      }
-
-      // validate token still valid before deletion.
-      if (!token.validateToken()) {
-        throw new IllegalStateException(
-            "legacy conversion failed. Lost transform token for " + propStoreKey);
-      }
-
-      Pair<Integer,Integer> deleteCounts = deleteLegacyProps(upgradeNodes);
-      log.info("property transform for {} took {} ms, delete count: {}, error count: {}",
-          propStoreKey, new DurationFormat(Duration.between(start, Instant.now()).toMillis(), ""),
-          deleteCounts.getFirst(), deleteCounts.getSecond());
-
-      return results;
-
-    } catch (Exception ex) {
-      log.info("Issue on upgrading legacy properties for: " + propStoreKey, ex);
-    } finally {
-      token.releaseToken();
-      if (deleteLegacyNode) {
-        log.trace("Delete legacy property base node: {}", legacyPath);
-        try {
-          zrw.delete(legacyPath);
-        } catch (KeeperException.NotEmptyException ex) {
-          log.info("Delete for legacy prop node {} - not empty", legacyPath);
-        } catch (KeeperException | InterruptedException ex) {
-          Thread.currentThread().interrupt();
-        }
-      }
-    }
-    return null;
-  }
-
-  /**
-   * If the config node exists, return the properties, otherwise return null. ZooKeeper exceptions
-   * are ignored. Interrupt exceptions will be propagated as IllegalStateExceptions.
-   *
-   * @param propStoreKey the prop key for that identifies the configuration node.
-   * @return the existing encoded properties if present, null if they do not.
-   */
-  private VersionedProperties checkNeedsTransform(PropStoreKey<?> propStoreKey) {
-    try { // check for node - just return if it exists.
-      VersionedProperties results = ZooPropStore.readFromZk(propStoreKey, propStoreWatcher, zrw);
-      if (results != null) {
-        log.trace(
-            "Found existing node with properties at {}. skipping legacy prop conversion - version: {}, timestamp: {}",
-            propStoreKey, results.getDataVersion(), results.getTimestamp());
-        return results;
-      }
-    } catch (InterruptedException ex) {
-      Thread.currentThread().interrupt();
-      throw new IllegalStateException("Interrupted during zookeeper read", ex);
-    } catch (IOException | KeeperException ex) {
-      log.trace("node for {} not found for upgrade", propStoreKey);
-    }
-    return null;
-  }
-
-  private Set<LegacyPropNode> convertDeprecatedProps(PropStoreKey<?> propStoreKey,
-      Set<LegacyPropNode> upgradeNodes) {
-
-    if (!(propStoreKey instanceof SystemPropKey)) {
-      return upgradeNodes;
-    }
-
-    Set<LegacyPropNode> renamedNodes = new TreeSet<>();
-
-    for (LegacyPropNode original : upgradeNodes) {
-      var finalName = DeprecatedPropertyUtil.getReplacementName(original.getPropName(),
-          (log, replacement) -> log
-              .info("Automatically renaming deprecated property '{}' with its replacement '{}'"
-                  + " in ZooKeeper configuration upgrade.", original, replacement));
-      LegacyPropNode renamed = new LegacyPropNode(original.getPath(), finalName, original.getData(),
-          original.getNodeVersion());
-      renamedNodes.add(renamed);
-    }
-    return renamedNodes;
-  }
-
-  private @NonNull Set<LegacyPropNode> readLegacyProps(final String basePath) {
-
-    Set<LegacyPropNode> legacyProps = new TreeSet<>();
-
-    // strip leading slash
-    var tokenName = TransformToken.TRANSFORM_TOKEN.substring(1);
-
-    try {
-      List<String> childNames = zrw.getChildren(basePath);
-      for (String propName : childNames) {
-        log.trace("processing ZooKeeper child node: {} at path: {}", propName, basePath);
-        if (tokenName.equals(propName)) {
-          continue;
-        }
-
-        log.trace("Adding: {} to list for legacy conversion", propName);
-
-        var path = basePath + "/" + propName;
-        Stat stat = new Stat();
-        byte[] bytes = zrw.getData(path, stat);
-
-        try {
-          LegacyPropNode node;
-          if (stat.getDataLength() > 0) {
-            node = new LegacyPropNode(path, propName, new String(bytes, UTF_8), stat.getVersion());
-          } else {
-            node = new LegacyPropNode(path, propName, "", stat.getVersion());
-          }
-          legacyProps.add(node);
-        } catch (IllegalStateException ex) {
-          log.warn("Skipping invalid property at path " + path, ex);
-        }
-      }
-
-    } catch (KeeperException ex) {
-      throw new IllegalStateException("Failed to read legacy props due to ZooKeeper error", ex);
-    } catch (InterruptedException ex) {
-      Thread.currentThread().interrupt();
-      throw new IllegalStateException(
-          "Failed to read legacy props due to interrupt read from ZooKeeper", ex);
-    }
-    return legacyProps;
-  }
-
-  private Pair<Integer,Integer> deleteLegacyProps(Set<LegacyPropNode> nodes) {
-    int deleteCount = 0;
-    int errorCount = 0;
-    for (LegacyPropNode n : nodes) {
-      try {
-        log.trace("Delete legacy prop at path: {}, data version: {}", n.getPath(),
-            n.getNodeVersion());
-        deleteCount++;
-        zrw.deleteStrict(n.getPath(), n.getNodeVersion());
-      } catch (InterruptedException ex) {
-        Thread.currentThread().interrupt();
-        throw new IllegalStateException("interrupt received during upgrade node clean-up", ex);
-      } catch (KeeperException ex) {
-        errorCount++;
-        log.info("Failed to delete node during upgrade clean-up", ex);
-      }
-    }
-    return new Pair<>(deleteCount, errorCount);
-  }
-
-  private @Nullable VersionedProperties writeConverted(final PropStoreKey<?> propStoreKey,
-      final Set<LegacyPropNode> nodes) {
-    final Map<String,String> props = new HashMap<>();
-    nodes.forEach(node -> props.put(node.getPropName(), node.getData()));
-
-    VersionedProperties vProps;
-    try {
-      vProps = writeNode(propStoreKey, props);
-    } catch (InterruptedException | KeeperException ex) {
-      if (ex instanceof InterruptedException) {
-        Thread.currentThread().interrupt();
-      }
-      throw new IllegalStateException(
-          "failed to create node for " + propStoreKey + " on conversion", ex);
-    }
-
-    if (!validateWrite(propStoreKey, vProps)) {
-      log.trace("Failed property conversion validation for: {}", propStoreKey);
-      // failed validation
-      return null;
-    }
-
-    return vProps;
-  }
-
-  private VersionedProperties writeNode(final PropStoreKey<?> propStoreKey,
-      final Map<String,String> props) throws InterruptedException, KeeperException {
-    VersionedProperties vProps;
-    try {
-      String path = propStoreKey.getPath();
-      log.trace("Writing converted properties to ZooKeeper path: {} for key: {}", path,
-          propStoreKey);
-      Stat currStat = zrw.getStatus(path);
-      if (currStat == null || currStat.getDataLength() == 0) {
-        // no node or node with no props stored
-        vProps = new VersionedProperties(props);
-        zrw.putPrivatePersistentData(path, codec.toBytes(vProps),
-            ZooUtil.NodeExistsPolicy.OVERWRITE);
-      }
-      return ZooPropStore.readFromZk(propStoreKey, propStoreWatcher, zrw);
-    } catch (IOException ex) {
-      throw new IllegalStateException(
-          "failed to create node for " + propStoreKey + " on conversion", ex);
-    }
-  }
-
-  private boolean validateWrite(final PropStoreKey<?> propStoreKey,
-      final VersionedProperties vProps) {
-    try {
-      Stat stat = zrw.getStatus(propStoreKey.getPath(), propStoreWatcher);
-      if (stat == null) {
-        throw new IllegalStateException(
-            "failed to get stat to validate created node for " + propStoreKey);
-      }
-      log.debug("Property conversion validation - version received: {}, version expected: {}",
-          stat.getVersion(), vProps.getDataVersion());
-      return stat.getVersion() == vProps.getDataVersion();
-    } catch (KeeperException ex) {
-      throw new IllegalStateException("failed to validate created node for " + propStoreKey, ex);
-    } catch (InterruptedException ex) {
-      Thread.currentThread().interrupt();
-      throw new IllegalStateException("failed to validate created node for " + propStoreKey, ex);
-    }
-  }
-
-  /**
-   * Immutable container for legacy ZooKeeper property node information.
-   */
-  private static class LegacyPropNode implements Comparable<LegacyPropNode> {
-    private final String path;
-    private final String propName;
-    private final String data;
-    private final int nodeVersion;
-
-    /**
-     * An immutable instance of legacy ZooKeeper property node information. It holds the property
-     * and the node stat for later comparison to enable detection of ZooKeeper node changes. If the
-     * legacy property name has been deprecated, the property is renamed and the conversion is noted
-     * in the log.
-     *
-     * @param path the ZooKeeper path
-     * @param propName the property name - if deprecated it will be stored as the updated name and
-     *        the conversion logged.
-     * @param data the property value
-     * @param nodeVersion the ZooKeeper stat data version.
-     */
-    public LegacyPropNode(@NonNull final String path, final String propName, final String data,
-        final int nodeVersion) {
-      this.path = path;
-      this.propName = propName;
-      this.data = data;
-      this.nodeVersion = nodeVersion;
-    }
-
-    public String getPath() {
-      return path;
-    }
-
-    public String getPropName() {
-      return propName;
-    }
-
-    public String getData() {
-      return data;
-    }
-
-    public int getNodeVersion() {
-      return nodeVersion;
-    }
-
-    @Override
-    public boolean equals(Object o) {
-      if (this == o) {
-        return true;
-      }
-      if (o == null || getClass() != o.getClass()) {
-        return false;
-      }
-      LegacyPropNode that = (LegacyPropNode) o;
-      return path.equals(that.path);
-    }
-
-    @Override
-    public int hashCode() {
-      return Objects.hash(path);
-    }
-
-    @Override
-    public int compareTo(LegacyPropNode other) {
-      return path.compareTo(other.path);
-    }
-  }
-
-}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/conf/util/TransformToken.java b/server/base/src/main/java/org/apache/accumulo/server/conf/util/TransformToken.java
deleted file mode 100644
index 9241913995..0000000000
--- a/server/base/src/main/java/org/apache/accumulo/server/conf/util/TransformToken.java
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.accumulo.server.conf.util;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.util.Arrays;
-import java.util.Objects;
-import java.util.UUID;
-
-import org.apache.accumulo.core.fate.zookeeper.ZooReaderWriter;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.data.Stat;
-import org.checkerframework.checker.nullness.qual.NonNull;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Provides a token used in property conversion. The token is used to limit the number of processes
- * that try to create a property node and transform the legacy property format to the 2.1 encoded
- * properties. Processes do not queue for a token (using a sequential node) - processes should look
- * for the token to exist, and if present wait and then periodically re-check for the property node
- * to be created by the process that created / has the token.
- * <p>
- * Features
- * <ul>
- * <li>Uses ephemeral node that will be removed if transform process terminates without
- * completing</li>
- * <li>Watcher not necessary - the existence of the node and uuid in data sufficient to detect
- * changes.</li>
- * </ul>
- */
-public class TransformToken {
-  public static final String TRANSFORM_TOKEN = "/transform_token";
-  private static final Logger log = LoggerFactory.getLogger(TransformToken.class);
-  private final TokenUUID tokenUUID = new TokenUUID();
-  private final String path;
-  private final ZooReaderWriter zrw;
-  private boolean haveToken = false;
-
-  private TransformToken(final @NonNull String basePath, final ZooReaderWriter zrw) {
-    path = basePath + TRANSFORM_TOKEN;
-    this.zrw = zrw;
-
-    boolean t = getTokenOwnership();
-    log.trace("created token - token held: {}", t);
-  }
-
-  /**
-   * Create a lock node in ZooKeeper using an ephemeral node. Will not throw and exception except on
-   * an interrupt. If the lock node is created, the returned lock will be locked. If another lock
-   * already exists, the lock is unlocked and the caller can decide to either wait for the resource
-   * to be created by the thread that created the lock, or try calling to {@code lock} to succeed
-   *
-   * @param path the parent node of the legacy properties and the associated property children
-   *        nodes.
-   * @param zrw a ZooReaderWriter
-   * @return an TransformLock instance.
-   * @throws IllegalStateException is the lock creation fails due to an underlying ZooKeeper
-   *         exception.
-   */
-  public static TransformToken createToken(final @NonNull String path, final ZooReaderWriter zrw) {
-    return new TransformToken(path, zrw);
-  }
-
-  /**
-   * Create and try to establish ownership (hold the token). Token ownership can be tested with
-   * {@link #haveTokenOwnership() haveTokenOwnership}
-   *
-   * @return true if able to get ownership, false otherwise.
-   */
-  public boolean getTokenOwnership() {
-    if (haveToken) {
-      return true;
-    }
-    try {
-      // existence check should be lighter-weight than failing on NODE_EXISTS exception
-      if (zrw.exists(path)) {
-        return false;
-      }
-      // if this completes this thread has created the lock
-      zrw.putEphemeralData(path, tokenUUID.asBytes());
-      log.trace("wrote property transform token: {} - {}", path, tokenUUID);
-      haveToken = true;
-      return true;
-    } catch (KeeperException ex) {
-      log.debug(
-          "Failed to write transform token for " + path + " another process may have created one",
-          ex);
-      return false;
-    } catch (InterruptedException ex) {
-      Thread.currentThread().interrupt();
-      throw new IllegalStateException("Interrupted getting transform token", ex);
-    }
-  }
-
-  /**
-   * Return the token ownership status
-   *
-   * @return true if this instance has ownership of the token, false otherwise.
-   */
-  public boolean haveTokenOwnership() {
-    return haveToken;
-  }
-
-  /**
-   * Verify ownership is still valid while holding the token.
-   *
-   * @return true if token is still owned, false otherwise
-   */
-  public boolean validateToken() {
-    try {
-      byte[] readId = zrw.getData(path);
-      log.trace("validate token: read: {} - expected: {}", readId, tokenUUID);
-      return Arrays.equals(readId, tokenUUID.asBytes());
-    } catch (KeeperException ex) {
-      throw new IllegalStateException("Failed to validate token", ex);
-    } catch (InterruptedException ex) {
-      Thread.currentThread().interrupt();
-      throw new IllegalStateException("Interrupted while validating token", ex);
-    }
-  }
-
-  /**
-   * If the token was created by this instance, the uuid of this instance and the uuid stored in the
-   * ZooKeeper data will match.
-   */
-  public void releaseToken() {
-    try {
-      if (log.isTraceEnabled()) {
-        log.trace("releaseToken called - {} - exists in ZooKeeper: {}", path, zrw.exists(path));
-      }
-
-      Stat stat = new Stat();
-      byte[] readId = zrw.getData(path, stat);
-      if (!Arrays.equals(readId, tokenUUID.asBytes())) {
-        throw new IllegalStateException(
-            "tried to release a token that was not held by current thread");
-      }
-
-      if (log.isTraceEnabled()) {
-        log.trace("releaseToken read id: {} - exists: {}", readId, zrw.exists(path));
-      }
-
-      // make sure we are deleting the same node version just checked.
-      zrw.deleteStrict(path, stat.getVersion());
-    } catch (KeeperException ex) {
-      throw new IllegalStateException("Failed to release transform lock for " + path, ex);
-    } catch (InterruptedException ex) {
-      Thread.currentThread().interrupt();
-      throw new IllegalStateException("Interrupted getting transform token", ex);
-    }
-    haveToken = false;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    TransformToken that = (TransformToken) o;
-    return path.equals(that.path) && Arrays.equals(tokenUUID.asBytes(), that.tokenUUID.asBytes());
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(path, Arrays.hashCode(tokenUUID.asBytes()));
-  }
-
-  @Override
-  public String toString() {
-    return "TransformLock{ path='" + path + "', locked='" + haveToken + "' id=" + tokenUUID + "'}'";
-  }
-
-  private static class TokenUUID {
-    private final String id = UUID.randomUUID().toString();
-    private final byte[] idBytes = id.getBytes(UTF_8);
-
-    public byte[] asBytes() {
-      return idBytes;
-    }
-
-    @Override
-    public String toString() {
-      return "TransformToken{id='" + id + '}';
-    }
-  }
-}
diff --git a/server/base/src/test/java/org/apache/accumulo/server/ServerContextTest.java b/server/base/src/test/java/org/apache/accumulo/server/ServerContextTest.java
index 97a882d435..f01145a5b0 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/ServerContextTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/ServerContextTest.java
@@ -135,7 +135,7 @@ public class ServerContextTest {
     // ensure this fails with older versions; the oldest supported version is hard-coded here
     // to ensure we don't unintentionally break upgrade support; changing this should be a conscious
     // decision and this check will ensure we don't overlook it
-    final int oldestSupported = 8;
+    final int oldestSupported = AccumuloDataVersion.ROOT_TABLET_META_CHANGES;
     final int currentVersion = AccumuloDataVersion.get();
     IntConsumer shouldPass = ServerContext::ensureDataVersionCompatible;
     IntConsumer shouldFail = v -> assertThrows(IllegalStateException.class,
diff --git a/server/base/src/test/java/org/apache/accumulo/server/conf/util/ConfigPropertyUpgraderTest.java b/server/base/src/test/java/org/apache/accumulo/server/conf/util/ConfigPropertyUpgraderTest.java
deleted file mode 100644
index 929cdf04ce..0000000000
--- a/server/base/src/test/java/org/apache/accumulo/server/conf/util/ConfigPropertyUpgraderTest.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.accumulo.server.conf.util;
-
-import static org.easymock.EasyMock.anyString;
-import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.verify;
-
-import java.util.List;
-import java.util.UUID;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.data.InstanceId;
-import org.apache.accumulo.core.data.NamespaceId;
-import org.apache.accumulo.core.data.TableId;
-import org.apache.accumulo.core.fate.zookeeper.ZooReaderWriter;
-import org.apache.accumulo.core.fate.zookeeper.ZooUtil;
-import org.apache.accumulo.server.conf.codec.VersionedProperties;
-import org.apache.accumulo.server.conf.store.NamespacePropKey;
-import org.apache.accumulo.server.conf.store.SystemPropKey;
-import org.apache.accumulo.server.conf.store.TablePropKey;
-import org.junit.jupiter.api.Test;
-
-class ConfigPropertyUpgraderTest {
-
-  @Test
-  void upgradeSysProps() {
-
-    InstanceId iid = InstanceId.of(UUID.randomUUID());
-
-    ConfigPropertyUpgrader upgrader = new ConfigPropertyUpgrader();
-    ConfigTransformer transformer = createMock(ConfigTransformer.class);
-    expect(transformer.transform(SystemPropKey.of(iid), SystemPropKey.of(iid).getPath(), false))
-        .andReturn(new VersionedProperties()).once();
-
-    replay(transformer);
-    upgrader.upgradeSysProps(iid, transformer);
-    verify(transformer);
-  }
-
-  @Test
-  void upgradeNamespaceProps() throws Exception {
-
-    InstanceId iid = InstanceId.of(UUID.randomUUID());
-
-    ConfigPropertyUpgrader upgrader = new ConfigPropertyUpgrader();
-
-    ConfigTransformer transformer = createMock(ConfigTransformer.class);
-    String nsRoot = ZooUtil.getRoot(iid) + Constants.ZNAMESPACES;
-    expect(transformer.transform(NamespacePropKey.of(iid, NamespaceId.of("a")),
-        nsRoot + "/a" + Constants.ZCONF_LEGACY, true)).andReturn(new VersionedProperties()).once();
-    expect(transformer.transform(NamespacePropKey.of(iid, NamespaceId.of("b")),
-        nsRoot + "/b" + Constants.ZCONF_LEGACY, true)).andReturn(new VersionedProperties()).once();
-
-    ZooReaderWriter zrw = createMock(ZooReaderWriter.class);
-    expect(zrw.getChildren(anyString())).andReturn(List.of("a", "b")).once();
-
-    replay(transformer, zrw);
-    upgrader.upgradeNamespaceProps(iid, zrw, transformer);
-    verify(transformer, zrw);
-  }
-
-  @Test
-  void upgradeTableProps() throws Exception {
-
-    InstanceId iid = InstanceId.of(UUID.randomUUID());
-
-    ConfigPropertyUpgrader upgrader = new ConfigPropertyUpgrader();
-
-    ConfigTransformer transformer = createMock(ConfigTransformer.class);
-    String nsRoot = ZooUtil.getRoot(iid) + Constants.ZTABLES;
-    expect(transformer.transform(TablePropKey.of(iid, TableId.of("a")),
-        nsRoot + "/a" + Constants.ZCONF_LEGACY, true)).andReturn(new VersionedProperties()).once();
-    expect(transformer.transform(TablePropKey.of(iid, TableId.of("b")),
-        nsRoot + "/b" + Constants.ZCONF_LEGACY, true)).andReturn(new VersionedProperties()).once();
-
-    ZooReaderWriter zrw = createMock(ZooReaderWriter.class);
-    expect(zrw.getChildren(anyString())).andReturn(List.of("a", "b")).once();
-
-    replay(transformer, zrw);
-    upgrader.upgradeTableProps(iid, zrw, transformer);
-    verify(transformer, zrw);
-  }
-}
diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java
index b7a99ef9b0..9177f7fa1a 100644
--- a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java
+++ b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java
@@ -109,6 +109,7 @@ import org.apache.accumulo.manager.metrics.ManagerMetrics;
 import org.apache.accumulo.manager.recovery.RecoveryManager;
 import org.apache.accumulo.manager.state.TableCounts;
 import org.apache.accumulo.manager.tableOps.TraceRepo;
+import org.apache.accumulo.manager.upgrade.PreUpgradeValidation;
 import org.apache.accumulo.manager.upgrade.UpgradeCoordinator;
 import org.apache.accumulo.server.AbstractServer;
 import org.apache.accumulo.server.HighlyAvailableService;
@@ -295,6 +296,7 @@ public class Manager extends AbstractServer
     }
 
     if (oldState != newState && (newState == ManagerState.HAVE_LOCK)) {
+      new PreUpgradeValidation().validate(getContext(), nextEvent);
       upgradeCoordinator.upgradeZookeeper(getContext(), nextEvent);
     }
 
diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/PreUpgradeValidation.java b/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/PreUpgradeValidation.java
new file mode 100644
index 0000000000..2449355dad
--- /dev/null
+++ b/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/PreUpgradeValidation.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.manager.upgrade;
+
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.accumulo.core.fate.zookeeper.ZooReaderWriter;
+import org.apache.accumulo.manager.EventCoordinator;
+import org.apache.accumulo.server.AccumuloDataVersion;
+import org.apache.accumulo.server.ServerContext;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.ZKUtil;
+import org.apache.zookeeper.ZooDefs;
+import org.apache.zookeeper.ZooKeeper;
+import org.apache.zookeeper.data.ACL;
+import org.apache.zookeeper.data.Stat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+
+/**
+ * Provide checks before upgraders run that can perform checks that the environment from previous
+ * versions match expectations. Checks include:
+ * <ul>
+ * <li>ACL validation of ZooKeeper nodes</li>
+ * </ul>
+ */
+public class PreUpgradeValidation {
+
+  private final static Logger log = LoggerFactory.getLogger(PreUpgradeValidation.class);
+
+  public void validate(final ServerContext context, final EventCoordinator eventCoordinator) {
+    int cv = AccumuloDataVersion.getCurrentVersion(context);
+    if (cv == AccumuloDataVersion.get()) {
+      log.debug("already at current data version: {}, skipping validation", cv);
+      return;
+    }
+    validateACLs(context);
+  }
+
+  private void validateACLs(ServerContext context) {
+
+    final AtomicBoolean aclErrorOccurred = new AtomicBoolean(false);
+    final ZooReaderWriter zrw = context.getZooReaderWriter();
+    final ZooKeeper zk = zrw.getZooKeeper();
+    final String rootPath = context.getZooKeeperRoot();
+    final Set<String> users = Set.of("accumulo", "anyone");
+
+    log.info("Starting validation on ZooKeeper ACLs");
+
+    try {
+      ZKUtil.visitSubTreeDFS(zk, rootPath, false, (rc, path, ctx, name) -> {
+        try {
+          final List<ACL> acls = zk.getACL(path, new Stat());
+          if (!hasAllPermissions(users, acls)) {
+            log.error(
+                "ZNode at {} does not have an ACL that allows accumulo to write to it. ZNode ACL will need to be modified. Current ACLs: {}",
+                path, acls);
+            aclErrorOccurred.set(true);
+          }
+        } catch (KeeperException | InterruptedException e) {
+          log.error("Error getting ACL for path: {}", path, e);
+          aclErrorOccurred.set(true);
+        }
+      });
+      if (aclErrorOccurred.get()) {
+        throw new RuntimeException(
+            "Upgrade precondition failed! ACLs will need to be modified for some ZooKeeper nodes. "
+                + "Check the log for specific failed paths, check ZooKeeper troubleshooting in user documentation "
+                + "for instructions on how to fix.");
+      }
+    } catch (KeeperException | InterruptedException e) {
+      throw new RuntimeException("Upgrade Failed! Error validating nodes under " + rootPath, e);
+    }
+    log.info("Successfully completed validation on ZooKeeper ACLs");
+  }
+
+  private static boolean hasAllPermissions(final Set<String> users, final List<ACL> acls) {
+    return acls.stream()
+        .anyMatch(a -> users.contains(extractAuthName(a)) && a.getPerms() == ZooDefs.Perms.ALL);
+  }
+
+  private static String extractAuthName(ACL acl) {
+    Objects.requireNonNull(acl, "provided ACL cannot be null");
+    try {
+      return acl.getId().getId().trim().split(":")[0];
+    } catch (Exception ex) {
+      log.debug("Invalid ACL passed, cannot parse id from '{}'", acl);
+      return "";
+    }
+  }
+
+  @SuppressFBWarnings(value = "DM_EXIT",
+      justification = "Want to immediately stop all threads on upgrade error")
+  private void fail(Exception e) {
+    log.error("FATAL: Error performing pre-upgrade checks", e);
+    System.exit(1);
+  }
+
+}
diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/UpgradeCoordinator.java b/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/UpgradeCoordinator.java
index 505c2831ea..c4a58dee55 100644
--- a/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/UpgradeCoordinator.java
+++ b/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/UpgradeCoordinator.java
@@ -18,6 +18,8 @@
  */
 package org.apache.accumulo.manager.upgrade;
 
+import static org.apache.accumulo.server.AccumuloDataVersion.ROOT_TABLET_META_CHANGES;
+
 import java.io.IOException;
 import java.util.Collections;
 import java.util.Map;
@@ -109,10 +111,9 @@ public class UpgradeCoordinator {
 
   private int currentVersion;
   // map of "current version" -> upgrader to next version.
-  private final Map<Integer,Upgrader> upgraders =
-      Collections.unmodifiableMap(new TreeMap<>(Map.of(AccumuloDataVersion.SHORTEN_RFILE_KEYS,
-          new Upgrader8to9(), AccumuloDataVersion.CRYPTO_CHANGES, new Upgrader9to10(),
-          AccumuloDataVersion.ROOT_TABLET_META_CHANGES, new Upgrader10to11())));
+  // Sorted so upgrades execute in order from the oldest supported data version to current
+  private final Map<Integer,Upgrader> upgraders = Collections
+      .unmodifiableMap(new TreeMap<>(Map.of(ROOT_TABLET_META_CHANGES, new Upgrader10to11())));
 
   private volatile UpgradeStatus status;
 
@@ -144,9 +145,7 @@ public class UpgradeCoordinator {
         "Not currently in a suitable state to do zookeeper upgrade %s", status);
 
     try {
-      int cv = context.getServerDirs()
-          .getAccumuloPersistentVersion(context.getVolumeManager().getFirst());
-      ServerContext.ensureDataVersionCompatible(cv);
+      int cv = AccumuloDataVersion.getCurrentVersion(context);
       this.currentVersion = cv;
 
       if (cv == AccumuloDataVersion.get()) {
@@ -275,7 +274,7 @@ public class UpgradeCoordinator {
         throw new AccumuloException("Aborting upgrade because there are"
             + " outstanding FATE transactions from a previous Accumulo version."
             + " You can start the tservers and then use the shell to delete completed "
-            + " transactions. If there are uncomplete transactions, you will need to roll"
+            + " transactions. If there are incomplete transactions, you will need to roll"
             + " back and fix those issues. Please see the following page for more information: "
             + " https://accumulo.apache.org/docs/2.x/troubleshooting/advanced#upgrade-issues");
       }
diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/Upgrader8to9.java b/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/Upgrader8to9.java
deleted file mode 100644
index 2250a51e0a..0000000000
--- a/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/Upgrader8to9.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.accumulo.manager.upgrade;
-
-import org.apache.accumulo.server.AccumuloDataVersion;
-import org.apache.accumulo.server.ServerContext;
-
-/**
- * See {@link AccumuloDataVersion#CRYPTO_CHANGES}
- */
-public class Upgrader8to9 implements Upgrader {
-
-  @Override
-  public void upgradeZookeeper(ServerContext context) {
-    // There is no action that needs to be taken for zookeeper
-  }
-
-  @Override
-  public void upgradeRoot(ServerContext context) {
-    // There is no action that needs to be taken for metadata
-  }
-
-  @Override
-  public void upgradeMetadata(ServerContext context) {
-    // There is no action that needs to be taken for metadata
-  }
-
-}
diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/Upgrader9to10.java b/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/Upgrader9to10.java
deleted file mode 100644
index cfe1b3795e..0000000000
--- a/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/Upgrader9to10.java
+++ /dev/null
@@ -1,872 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.accumulo.manager.upgrade;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.apache.accumulo.core.metadata.RootTable.ZROOT_TABLET;
-import static org.apache.accumulo.core.metadata.RootTable.ZROOT_TABLET_GC_CANDIDATES;
-import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN;
-import static org.apache.accumulo.server.util.MetadataTableUtil.EMPTY_TEXT;
-
-import java.io.IOException;
-import java.io.UncheckedIOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Objects;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.function.BiConsumer;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.AccumuloClient;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.admin.TimeType;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.TableId;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.dataImpl.KeyExtent;
-import org.apache.accumulo.core.fate.zookeeper.ZooReaderWriter;
-import org.apache.accumulo.core.fate.zookeeper.ZooUtil.NodeExistsPolicy;
-import org.apache.accumulo.core.fate.zookeeper.ZooUtil.NodeMissingPolicy;
-import org.apache.accumulo.core.file.FileOperations;
-import org.apache.accumulo.core.file.FileSKVIterator;
-import org.apache.accumulo.core.gc.ReferenceFile;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.core.metadata.TServerInstance;
-import org.apache.accumulo.core.metadata.TabletFile;
-import org.apache.accumulo.core.metadata.schema.Ample;
-import org.apache.accumulo.core.metadata.schema.DataFileValue;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.DeletesSection;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.DeletesSection.SkewedKeyValue;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.core.metadata.schema.MetadataTime;
-import org.apache.accumulo.core.metadata.schema.RootTabletMetadata;
-import org.apache.accumulo.core.metadata.schema.TabletMetadata.LocationType;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.spi.compaction.SimpleCompactionDispatcher;
-import org.apache.accumulo.core.spi.crypto.NoCryptoServiceFactory;
-import org.apache.accumulo.core.tabletserver.log.LogEntry;
-import org.apache.accumulo.server.ServerContext;
-import org.apache.accumulo.server.conf.store.TablePropKey;
-import org.apache.accumulo.server.conf.util.ConfigPropertyUpgrader;
-import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.accumulo.server.gc.AllVolumesDirectory;
-import org.apache.accumulo.server.gc.GcVolumeUtil;
-import org.apache.accumulo.server.metadata.RootGcCandidates;
-import org.apache.accumulo.server.metadata.TabletMutatorBase;
-import org.apache.accumulo.server.util.PropUtil;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.KeeperException.NoNodeException;
-import org.apache.zookeeper.ZKUtil;
-import org.apache.zookeeper.ZooDefs;
-import org.apache.zookeeper.ZooKeeper;
-import org.apache.zookeeper.data.ACL;
-import org.apache.zookeeper.data.Stat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.net.HostAndPort;
-
-/**
- * Handles upgrading from 2.0 to 2.1.
- * <ul>
- * <li><strong>Rename master properties (Issue
- * <a href="https://github.com/apache/accumulo/issues/1640">#1640</a>):</strong> Rename any
- * ZooKeeper system properties that start with "master." to the equivalent property starting with
- * "manager." instead (see the {@code renameOldMasterPropsinZK(ServerContext)} method). Note that
- * this change was part of a larger effort to replace references to master with manager. See issues
- * <a href="https://github.com/apache/accumulo/issues/1641">#1641</a>,
- * <a href="https://github.com/apache/accumulo/issues/1642">#1642</a>, and
- * <a href="https://github.com/apache/accumulo/issues/1643">#1643</a> as well.</li>
- * </ul>
- */
-public class Upgrader9to10 implements Upgrader {
-
-  private static final Logger log = LoggerFactory.getLogger(Upgrader9to10.class);
-
-  public static final String ZROOT_TABLET_LOCATION = ZROOT_TABLET + "/location";
-  public static final String ZROOT_TABLET_FUTURE_LOCATION = ZROOT_TABLET + "/future_location";
-  public static final String ZROOT_TABLET_LAST_LOCATION = ZROOT_TABLET + "/lastlocation";
-  public static final String ZROOT_TABLET_WALOGS = ZROOT_TABLET + "/walogs";
-  public static final String ZROOT_TABLET_CURRENT_LOGS = ZROOT_TABLET + "/current_logs";
-  public static final String ZROOT_TABLET_PATH = ZROOT_TABLET + "/dir";
-  public static final Value UPGRADED = SkewedKeyValue.NAME;
-  public static final String OLD_DELETE_PREFIX = "~del";
-
-  // effectively an 8MB batch size, since this number is the number of Chars
-  public static final long CANDIDATE_BATCH_SIZE = 4_000_000;
-
-  @Override
-  public void upgradeZookeeper(ServerContext context) {
-    validateACLs(context);
-    upgradePropertyStorage(context);
-    setMetaTableProps(context);
-    upgradeRootTabletMetadata(context);
-    createExternalCompactionNodes(context);
-    // special case where old files need to be deleted
-    dropSortedMapWALFiles(context);
-    createScanServerNodes(context);
-  }
-
-  private static String extractAuthName(ACL acl) {
-    Objects.requireNonNull(acl, "provided ACL cannot be null");
-    try {
-      return acl.getId().getId().trim().split(":")[0];
-    } catch (Exception ex) {
-      log.debug("Invalid ACL passed, cannot parse id from '{}'", acl);
-      return "";
-    }
-  }
-
-  private static boolean hasAllPermissions(final Set<String> users, final List<ACL> acls) {
-    return acls.stream()
-        .anyMatch(a -> users.contains(extractAuthName(a)) && a.getPerms() == ZooDefs.Perms.ALL);
-  }
-
-  private void validateACLs(ServerContext context) {
-
-    final AtomicBoolean aclErrorOccurred = new AtomicBoolean(false);
-    final ZooReaderWriter zrw = context.getZooReaderWriter();
-    final ZooKeeper zk = zrw.getZooKeeper();
-    final String rootPath = context.getZooKeeperRoot();
-    final Set<String> users = Set.of("accumulo", "anyone");
-
-    try {
-      ZKUtil.visitSubTreeDFS(zk, rootPath, false, (rc, path, ctx, name) -> {
-        try {
-          final List<ACL> acls = zk.getACL(path, new Stat());
-          if (!hasAllPermissions(users, acls)) {
-            log.error(
-                "ZNode at {} does not have an ACL that allows accumulo to write to it. ZNode ACL will need to be modified. Current ACLs: {}",
-                path, acls);
-            aclErrorOccurred.set(true);
-          }
-        } catch (KeeperException | InterruptedException e) {
-          log.error("Error getting ACL for path: {}", path, e);
-          aclErrorOccurred.set(true);
-        }
-      });
-      if (aclErrorOccurred.get()) {
-        throw new RuntimeException(
-            "Upgrade precondition failed! ACLs will need to be modified for some ZooKeeper nodes. "
-                + "Check the log for specific failed paths, check ZooKeeper troubleshooting in user documentation "
-                + "for instructions on how to fix.");
-      }
-    } catch (KeeperException | InterruptedException e) {
-      throw new RuntimeException("Upgrade Failed! Error validating nodes under " + rootPath, e);
-    }
-  }
-
-  @Override
-  public void upgradeRoot(ServerContext context) {
-    upgradeRelativePaths(context, Ample.DataLevel.METADATA);
-    upgradeDirColumns(context, Ample.DataLevel.METADATA);
-    upgradeFileDeletes(context, Ample.DataLevel.METADATA);
-  }
-
-  @Override
-  public void upgradeMetadata(ServerContext context) {
-    upgradeRelativePaths(context, Ample.DataLevel.USER);
-    upgradeDirColumns(context, Ample.DataLevel.USER);
-    upgradeFileDeletes(context, Ample.DataLevel.USER);
-  }
-
-  /**
-   * Convert system properties (if necessary) and all table properties to a single node
-   */
-  private void upgradePropertyStorage(ServerContext context) {
-    log.info("Starting property conversion");
-    ConfigPropertyUpgrader configUpgrader = new ConfigPropertyUpgrader();
-    configUpgrader.doUpgrade(context.getInstanceID(), context.getZooReaderWriter());
-    log.info("Completed property conversion");
-  }
-
-  /**
-   * Setup properties for External compactions.
-   */
-  private void setMetaTableProps(ServerContext context) {
-    try {
-      // sets the compaction dispatcher props for the given table and service name
-      BiConsumer<TableId,String> setDispatcherProps =
-          (TableId tableId, String dispatcherService) -> {
-            var dispatcherPropsMap = Map.of(Property.TABLE_COMPACTION_DISPATCHER.getKey(),
-                SimpleCompactionDispatcher.class.getName(),
-                Property.TABLE_COMPACTION_DISPATCHER_OPTS.getKey() + "service", dispatcherService);
-            PropUtil.setProperties(context, TablePropKey.of(context, tableId), dispatcherPropsMap);
-          };
-
-      // root compaction props
-      setDispatcherProps.accept(RootTable.ID, "root");
-      // metadata compaction props
-      setDispatcherProps.accept(MetadataTable.ID, "meta");
-    } catch (IllegalStateException ex) {
-      throw new RuntimeException("Unable to set system table properties", ex);
-    }
-  }
-
-  private void createScanServerNodes(ServerContext context) {
-    final byte[] EMPTY_BYTE_ARRAY = new byte[0];
-    try {
-      context.getZooReaderWriter().putPersistentData(
-          context.getZooKeeperRoot() + Constants.ZSSERVERS, EMPTY_BYTE_ARRAY,
-          NodeExistsPolicy.SKIP);
-    } catch (KeeperException | InterruptedException e) {
-      throw new RuntimeException("Unable to create scan server paths", e);
-    }
-  }
-
-  private void createExternalCompactionNodes(ServerContext context) {
-
-    final byte[] EMPTY_BYTE_ARRAY = new byte[0];
-    try {
-      context.getZooReaderWriter().putPersistentData(
-          context.getZooKeeperRoot() + Constants.ZCOORDINATOR, EMPTY_BYTE_ARRAY,
-          NodeExistsPolicy.SKIP);
-      context.getZooReaderWriter().putPersistentData(
-          context.getZooKeeperRoot() + Constants.ZCOORDINATOR_LOCK, EMPTY_BYTE_ARRAY,
-          NodeExistsPolicy.SKIP);
-      context.getZooReaderWriter().putPersistentData(
-          context.getZooKeeperRoot() + Constants.ZCOMPACTORS, EMPTY_BYTE_ARRAY,
-          NodeExistsPolicy.SKIP);
-    } catch (KeeperException | InterruptedException e) {
-      throw new RuntimeException("Unable to create external compaction paths", e);
-    }
-  }
-
-  /**
-   * Improvements to the metadata and root tables were made in this version. See pull request
-   * <a href="https://github.com/apache/accumulo/pull/1174">#1174</a> for more details.
-   */
-  private void upgradeRootTabletMetadata(ServerContext context) {
-    String rootMetaSer = getFromZK(context, ZROOT_TABLET);
-
-    if (rootMetaSer == null || rootMetaSer.isEmpty()) {
-      String dir = getFromZK(context, ZROOT_TABLET_PATH);
-      List<LogEntry> logs = getRootLogEntries(context);
-
-      TServerInstance last = getLocation(context, ZROOT_TABLET_LAST_LOCATION);
-      TServerInstance future = getLocation(context, ZROOT_TABLET_FUTURE_LOCATION);
-      TServerInstance current = getLocation(context, ZROOT_TABLET_LOCATION);
-
-      UpgradeMutator tabletMutator = new UpgradeMutator(context);
-
-      tabletMutator.putPrevEndRow(RootTable.EXTENT.prevEndRow());
-
-      tabletMutator.putDirName(upgradeDirColumn(dir));
-
-      if (last != null) {
-        tabletMutator.putLocation(last, LocationType.LAST);
-      }
-
-      if (future != null) {
-        tabletMutator.putLocation(future, LocationType.FUTURE);
-      }
-
-      if (current != null) {
-        tabletMutator.putLocation(current, LocationType.CURRENT);
-      }
-
-      logs.forEach(tabletMutator::putWal);
-
-      Map<String,DataFileValue> files = cleanupRootTabletFiles(context.getVolumeManager(), dir);
-      files.forEach((path, dfv) -> tabletMutator.putFile(new TabletFile(new Path(path)), dfv));
-
-      tabletMutator.putTime(computeRootTabletTime(context, files.keySet()));
-
-      tabletMutator.mutate();
-    }
-
-    try {
-      context.getZooReaderWriter().putPersistentData(
-          context.getZooKeeperRoot() + ZROOT_TABLET_GC_CANDIDATES,
-          new RootGcCandidates().toJson().getBytes(UTF_8), NodeExistsPolicy.SKIP);
-    } catch (KeeperException | InterruptedException e) {
-      throw new RuntimeException(e);
-    }
-
-    // this operation must be idempotent, so deleting after updating is very important
-
-    delete(context, ZROOT_TABLET_CURRENT_LOGS);
-    delete(context, ZROOT_TABLET_FUTURE_LOCATION);
-    delete(context, ZROOT_TABLET_LAST_LOCATION);
-    delete(context, ZROOT_TABLET_LOCATION);
-    delete(context, ZROOT_TABLET_WALOGS);
-    delete(context, ZROOT_TABLET_PATH);
-  }
-
-  private static class UpgradeMutator extends TabletMutatorBase {
-
-    private final ServerContext context;
-
-    UpgradeMutator(ServerContext context) {
-      super(context, RootTable.EXTENT);
-      this.context = context;
-    }
-
-    @Override
-    public void mutate() {
-      Mutation mutation = getMutation();
-
-      try {
-        context.getZooReaderWriter().mutateOrCreate(
-            context.getZooKeeperRoot() + RootTable.ZROOT_TABLET, new byte[0], currVal -> {
-              // Earlier, it was checked that root tablet metadata did not exists. However the
-              // earlier check does handle race conditions. Race conditions are unexpected. This is
-              // a sanity check when making the update in ZK using compare and set. If this fails
-              // and its not a bug, then its likely some concurrency issue. For example two managers
-              // concurrently running upgrade could cause this to fail.
-              Preconditions.checkState(currVal.length == 0,
-                  "Expected root tablet metadata to be empty!");
-              var rtm = new RootTabletMetadata();
-              rtm.update(mutation);
-              String json = rtm.toJson();
-              log.info("Upgrading root tablet metadata, writing following to ZK : \n {}", json);
-              return json.getBytes(UTF_8);
-            });
-      } catch (Exception e) {
-        throw new RuntimeException(e);
-      }
-
-    }
-
-  }
-
-  protected TServerInstance getLocation(ServerContext context, String relpath) {
-    String str = getFromZK(context, relpath);
-    if (str == null) {
-      return null;
-    }
-
-    String[] parts = str.split("[|]", 2);
-    HostAndPort address = HostAndPort.fromString(parts[0]);
-    if (parts.length > 1 && parts[1] != null && !parts[1].isEmpty()) {
-      return new TServerInstance(address, parts[1]);
-    } else {
-      // a 1.2 location specification: DO NOT WANT
-      return null;
-    }
-  }
-
-  static List<LogEntry> getRootLogEntries(ServerContext context) {
-
-    try {
-      ArrayList<LogEntry> result = new ArrayList<>();
-
-      ZooReaderWriter zoo = context.getZooReaderWriter();
-      String root = context.getZooKeeperRoot() + ZROOT_TABLET_WALOGS;
-      // there's a little race between getting the children and fetching
-      // the data. The log can be removed in between.
-      outer: while (true) {
-        result.clear();
-        for (String child : zoo.getChildren(root)) {
-          try {
-            @SuppressWarnings("removal")
-            LogEntry e = LogEntry.fromBytes(zoo.getData(root + "/" + child));
-            // upgrade from !0;!0<< -> +r<<
-            e = new LogEntry(RootTable.EXTENT, 0, e.filename);
-            result.add(e);
-          } catch (KeeperException.NoNodeException ex) {
-            // TODO I think this is a bug, probably meant to continue to while loop... was probably
-            // a bug in the original code.
-            continue outer;
-          }
-        }
-        break;
-      }
-
-      return result;
-    } catch (KeeperException | InterruptedException | IOException e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-  private String getFromZK(ServerContext context, String relpath) {
-    try {
-      byte[] data = context.getZooReaderWriter().getData(context.getZooKeeperRoot() + relpath);
-      if (data == null) {
-        return null;
-      }
-
-      return new String(data, UTF_8);
-    } catch (NoNodeException e) {
-      return null;
-    } catch (KeeperException | InterruptedException e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-  private void delete(ServerContext context, String relpath) {
-    try {
-      context.getZooReaderWriter().recursiveDelete(context.getZooKeeperRoot() + relpath,
-          NodeMissingPolicy.SKIP);
-    } catch (KeeperException | InterruptedException e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-  MetadataTime computeRootTabletTime(ServerContext context, Collection<String> goodPaths) {
-
-    try {
-      long rtime = Long.MIN_VALUE;
-      for (String good : goodPaths) {
-        Path path = new Path(good);
-
-        FileSystem ns = context.getVolumeManager().getFileSystemByPath(path);
-        var tableConf = context.getTableConfiguration(RootTable.ID);
-        long maxTime = -1;
-        try (FileSKVIterator reader = FileOperations.getInstance().newReaderBuilder()
-            .forFile(path.toString(), ns, ns.getConf(), NoCryptoServiceFactory.NONE)
-            .withTableConfiguration(tableConf).seekToBeginning().build()) {
-          while (reader.hasTop()) {
-            maxTime = Math.max(maxTime, reader.getTopKey().getTimestamp());
-            reader.next();
-          }
-        }
-        if (maxTime > rtime) {
-
-          rtime = maxTime;
-        }
-      }
-
-      if (rtime < 0) {
-        throw new IllegalStateException("Unexpected root tablet logical time " + rtime);
-      }
-
-      return new MetadataTime(rtime, TimeType.LOGICAL);
-    } catch (IOException e) {
-      throw new UncheckedIOException(e);
-    }
-  }
-
-  static Map<String,DataFileValue> cleanupRootTabletFiles(VolumeManager fs, String dir) {
-
-    try {
-      FileStatus[] files = fs.listStatus(new Path(dir));
-
-      Map<String,DataFileValue> goodFiles = new HashMap<>(files.length);
-
-      for (FileStatus file : files) {
-
-        String path = file.getPath().toString();
-        if (file.getPath().toUri().getScheme() == null) {
-          // depending on the behavior of HDFS, if list status does not return fully qualified
-          // volumes
-          // then could switch to the default volume
-          throw new IllegalArgumentException("Require fully qualified paths " + file.getPath());
-        }
-
-        String filename = file.getPath().getName();
-
-        // check for incomplete major compaction, this should only occur
-        // for root tablet
-        if (filename.startsWith("delete+")) {
-          String expectedCompactedFile =
-              path.substring(0, path.lastIndexOf("/delete+")) + "/" + filename.split("\\+")[1];
-          if (fs.exists(new Path(expectedCompactedFile))) {
-            // compaction finished, but did not finish deleting compacted files.. so delete it
-            if (!fs.deleteRecursively(file.getPath())) {
-              log.warn("Delete of file: {} return false", file.getPath());
-            }
-            continue;
-          }
-          // compaction did not finish, so put files back
-
-          // reset path and filename for rest of loop
-          filename = filename.split("\\+", 3)[2];
-          path = path.substring(0, path.lastIndexOf("/delete+")) + "/" + filename;
-          Path src = file.getPath();
-          Path dst = new Path(path);
-
-          if (!fs.rename(src, dst)) {
-            throw new IOException("Rename " + src + " to " + dst + " returned false ");
-          }
-        }
-
-        if (filename.endsWith("_tmp")) {
-          log.warn("cleaning up old tmp file: {}", path);
-          if (!fs.deleteRecursively(file.getPath())) {
-            log.warn("Delete of tmp file: {} return false", file.getPath());
-          }
-
-          continue;
-        }
-
-        if (!filename.startsWith(Constants.MAPFILE_EXTENSION + "_")
-            && !FileOperations.getValidExtensions().contains(filename.split("\\.")[1])) {
-          log.error("unknown file in tablet: {}", path);
-          continue;
-        }
-
-        goodFiles.put(path, new DataFileValue(file.getLen(), 0));
-      }
-
-      return goodFiles;
-    } catch (IOException e) {
-      throw new UncheckedIOException(e);
-    }
-  }
-
-  /**
-   * Improve how Delete markers are stored. For more information see:
-   * <a href="https://github.com/apache/accumulo/issues/1043">#1043</a>
-   * <a href="https://github.com/apache/accumulo/pull/1366">#1366</a>
-   */
-  public void upgradeFileDeletes(ServerContext context, Ample.DataLevel level) {
-
-    String tableName = level.metaTable();
-    Ample ample = context.getAmple();
-
-    // find all deletes
-    try (BatchWriter writer = context.createBatchWriter(tableName)) {
-      log.info("looking for candidates in table {}", tableName);
-      Iterator<String> oldCandidates = getOldCandidates(context, tableName);
-      String upgradeProp =
-          context.getConfiguration().get(Property.INSTANCE_VOLUMES_UPGRADE_RELATIVE);
-
-      while (oldCandidates.hasNext()) {
-        List<String> deletes = readCandidatesInBatch(oldCandidates);
-        log.info("found {} deletes to upgrade", deletes.size());
-        for (String olddelete : deletes) {
-          // create new formatted delete
-          log.trace("upgrading delete entry for {}", olddelete);
-
-          Path absolutePath = resolveRelativeDelete(olddelete, upgradeProp);
-          ReferenceFile updatedDel = switchToAllVolumes(absolutePath);
-
-          writer.addMutation(ample.createDeleteMutation(updatedDel));
-        }
-        writer.flush();
-        // if nothing thrown then we're good so mark all deleted
-        log.info("upgrade processing completed so delete old entries");
-        for (String olddelete : deletes) {
-          log.trace("deleting old entry for {}", olddelete);
-          writer.addMutation(deleteOldDeleteMutation(olddelete));
-        }
-        writer.flush();
-      }
-    } catch (TableNotFoundException | MutationsRejectedException e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-  /**
-   * If path of file to delete is a directory, change it to all volumes. See {@link GcVolumeUtil}.
-   * For example: A directory "hdfs://localhost:9000/accumulo/tables/5a/t-0005" with volume removed
-   * "tables/5a/t-0005" depth = 3 will be switched to "agcav:/tables/5a/t-0005". A file
-   * "hdfs://localhost:9000/accumulo/tables/5a/t-0005/A0012.rf" with volume removed
-   * "tables/5a/t-0005/A0012.rf" depth = 4 will be returned as is.
-   */
-  @VisibleForTesting
-  static ReferenceFile switchToAllVolumes(Path olddelete) {
-    Path pathNoVolume = Objects.requireNonNull(VolumeManager.FileType.TABLE.removeVolume(olddelete),
-        "Invalid delete marker. No volume in path: " + olddelete);
-
-    // a directory path with volume removed will have a depth of 3 like, "tables/5a/t-0005"
-    if (pathNoVolume.depth() == 3) {
-      String tabletDir = pathNoVolume.getName();
-      var tableId = TableId.of(pathNoVolume.getParent().getName());
-      // except bulk directories don't get an all volume prefix
-      if (pathNoVolume.getName().startsWith(Constants.BULK_PREFIX)) {
-        return new ReferenceFile(tableId, olddelete.toString());
-      } else {
-        return new AllVolumesDirectory(tableId, tabletDir);
-      }
-    } else {
-      // depth of 4 should be a file like, "tables/5a/t-0005/A0012.rf"
-      if (pathNoVolume.depth() == 4) {
-        Path tabletDirPath = pathNoVolume.getParent();
-        var tableId = TableId.of(tabletDirPath.getParent().getName());
-        return new ReferenceFile(tableId, olddelete.toString());
-      } else {
-        throw new IllegalStateException("Invalid delete marker: " + olddelete);
-      }
-    }
-  }
-
-  /**
-   * Return path of the file from old delete markers
-   */
-  private Iterator<String> getOldCandidates(ServerContext context, String tableName)
-      throws TableNotFoundException {
-    Range range = DeletesSection.getRange();
-    Scanner scanner = context.createScanner(tableName, Authorizations.EMPTY);
-    scanner.setRange(range);
-    return scanner.stream().filter(entry -> !entry.getValue().equals(UPGRADED))
-        .map(entry -> entry.getKey().getRow().toString().substring(OLD_DELETE_PREFIX.length()))
-        .iterator();
-  }
-
-  private List<String> readCandidatesInBatch(Iterator<String> candidates) {
-    long candidateLength = 0;
-    List<String> result = new ArrayList<>();
-    while (candidates.hasNext()) {
-      String candidate = candidates.next();
-      candidateLength += candidate.length();
-      result.add(candidate);
-      if (candidateLength > CANDIDATE_BATCH_SIZE) {
-        log.trace("List of delete candidates has exceeded the batch size"
-            + " threshold. Attempting to delete what has been gathered so far.");
-        break;
-      }
-    }
-    return result;
-  }
-
-  private Mutation deleteOldDeleteMutation(final String delete) {
-    Mutation m = new Mutation(OLD_DELETE_PREFIX + delete);
-    m.putDelete(EMPTY_TEXT, EMPTY_TEXT);
-    return m;
-  }
-
-  /**
-   * Changes to how volumes were stored in the metadata and have Accumulo always call the volume
-   * chooser for new tablet files. These changes were done in
-   * <a href="https://github.com/apache/accumulo/pull/1389">#1389</a>
-   */
-  public void upgradeDirColumns(ServerContext context, Ample.DataLevel level) {
-    String tableName = level.metaTable();
-
-    try (Scanner scanner = context.createScanner(tableName, Authorizations.EMPTY);
-        BatchWriter writer = context.createBatchWriter(tableName)) {
-      DIRECTORY_COLUMN.fetch(scanner);
-
-      for (Entry<Key,Value> entry : scanner) {
-        Mutation m = new Mutation(entry.getKey().getRow());
-        DIRECTORY_COLUMN.put(m, new Value(upgradeDirColumn(entry.getValue().toString())));
-        writer.addMutation(m);
-      }
-    } catch (TableNotFoundException | AccumuloException e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-  public static String upgradeDirColumn(String dir) {
-    return new Path(dir).getName();
-  }
-
-  /**
-   * Remove all file entries containing relative paths and replace them with absolute URI paths.
-   * Absolute paths are resolved by prefixing relative paths with a volume configured by the user in
-   * the instance.volumes.upgrade.relative property, which is only used during an upgrade. If any
-   * relative paths are found and this property is not configured, or if any resolved absolute path
-   * does not correspond to a file that actually exists, the upgrade step fails and aborts without
-   * making changes. See the property {@link Property#INSTANCE_VOLUMES_UPGRADE_RELATIVE} and the
-   * pull request <a href="https://github.com/apache/accumulo/pull/1461">#1461</a>.
-   */
-  public static void upgradeRelativePaths(ServerContext context, Ample.DataLevel level) {
-    String tableName = level.metaTable();
-    VolumeManager fs = context.getVolumeManager();
-    String upgradeProp = context.getConfiguration().get(Property.INSTANCE_VOLUMES_UPGRADE_RELATIVE);
-
-    // first pass check for relative paths - if any, check existence of the file path
-    // constructed from the upgrade property + relative path
-    if (checkForRelativePaths(context, fs, tableName, upgradeProp)) {
-      log.info("Relative Tablet File paths exist in {}, replacing with absolute using {}",
-          tableName, upgradeProp);
-    } else {
-      log.info("No relative paths found in {} during upgrade.", tableName);
-      return;
-    }
-
-    // second pass, create atomic mutations to replace the relative path
-    replaceRelativePaths(context, fs, tableName, upgradeProp);
-  }
-
-  /**
-   * Replace relative paths but only if the constructed absolute path exists on FileSystem
-   */
-  public static void replaceRelativePaths(AccumuloClient c, VolumeManager fs, String tableName,
-      String upgradeProperty) {
-    try (Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY);
-        BatchWriter writer = c.createBatchWriter(tableName)) {
-
-      scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
-      for (Entry<Key,Value> entry : scanner) {
-        Key key = entry.getKey();
-        String metaEntry = key.getColumnQualifier().toString();
-        if (!metaEntry.contains(":")) {
-          // found relative paths so get the property used to build the absolute paths
-          if (upgradeProperty == null || upgradeProperty.isBlank()) {
-            throw new IllegalArgumentException(
-                "Missing required property " + Property.INSTANCE_VOLUMES_UPGRADE_RELATIVE.getKey());
-          }
-          Path relPath = resolveRelativePath(metaEntry, key);
-          Path absPath = new Path(upgradeProperty, relPath);
-          if (fs.exists(absPath)) {
-            log.debug("Changing Tablet File path from {} to {}", metaEntry, absPath);
-            Mutation m = new Mutation(key.getRow());
-            // add the new path
-            m.at().family(key.getColumnFamily()).qualifier(absPath.toString())
-                .visibility(key.getColumnVisibility()).put(entry.getValue());
-            // delete the old path
-            m.at().family(key.getColumnFamily()).qualifier(key.getColumnQualifierData().toArray())
-                .visibility(key.getColumnVisibility()).delete();
-            writer.addMutation(m);
-          } else {
-            throw new IllegalArgumentException(
-                "Relative Tablet file " + relPath + " not found at " + absPath);
-          }
-        }
-      }
-    } catch (MutationsRejectedException | TableNotFoundException e) {
-      throw new IllegalStateException(e);
-    } catch (IOException ioe) {
-      throw new UncheckedIOException(ioe);
-    }
-  }
-
-  /**
-   * Check if table has any relative paths, return false if none are found. When a relative path is
-   * found, check existence of the file path constructed from the upgrade property + relative path
-   */
-  public static boolean checkForRelativePaths(AccumuloClient client, VolumeManager fs,
-      String tableName, String upgradeProperty) {
-    boolean hasRelatives = false;
-
-    try (Scanner scanner = client.createScanner(tableName, Authorizations.EMPTY)) {
-      log.info("Looking for relative paths in {}", tableName);
-      scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
-      for (Entry<Key,Value> entry : scanner) {
-        Key key = entry.getKey();
-        String metaEntry = key.getColumnQualifier().toString();
-        if (!metaEntry.contains(":")) {
-          // found relative paths so verify the property used to build the absolute paths
-          hasRelatives = true;
-          if (upgradeProperty == null || upgradeProperty.isBlank()) {
-            throw new IllegalArgumentException(
-                "Missing required property " + Property.INSTANCE_VOLUMES_UPGRADE_RELATIVE.getKey());
-          }
-          Path relPath = resolveRelativePath(metaEntry, key);
-          Path absPath = new Path(upgradeProperty, relPath);
-          if (!fs.exists(absPath)) {
-            throw new IllegalArgumentException("Tablet file " + relPath + " not found at " + absPath
-                + " using volume: " + upgradeProperty);
-          }
-        }
-      }
-    } catch (TableNotFoundException e) {
-      throw new IllegalStateException(e);
-    } catch (IOException e) {
-      throw new UncheckedIOException(e);
-    }
-
-    return hasRelatives;
-  }
-
-  /**
-   * Resolve old-style relative paths, returning Path of everything except volume and base
-   */
-  private static Path resolveRelativePath(String metadataEntry, Key key) {
-    String prefix = VolumeManager.FileType.TABLE.getDirectory() + "/";
-    if (metadataEntry.startsWith("../")) {
-      // resolve style "../2a/t-0003/C0004.rf"
-      return new Path(prefix + metadataEntry.substring(3));
-    } else {
-      // resolve style "/t-0003/C0004.rf"
-      TableId tableId = KeyExtent.fromMetaRow(key.getRow()).tableId();
-      return new Path(prefix + tableId.canonical() + metadataEntry);
-    }
-  }
-
-  /**
-   * Resolve old relative delete markers of the form /tableId/tabletDir/[file] to
-   * UpgradeVolume/tables/tableId/tabletDir/[file]
-   */
-  static Path resolveRelativeDelete(String oldDelete, String upgradeProperty) {
-    Path pathNoVolume = VolumeManager.FileType.TABLE.removeVolume(new Path(oldDelete));
-    Path pathToCheck = new Path(oldDelete);
-
-    // if the volume was removed properly, the path is absolute so return, otherwise
-    // it is a relative path so proceed with more checks
-    if (pathNoVolume != null) {
-      return pathToCheck;
-    }
-
-    // A relative path directory of the form "/tableId/tabletDir" will have depth == 2
-    // A relative path file of the form "/tableId/tabletDir/file" will have depth == 3
-    Preconditions.checkState(
-        oldDelete.startsWith("/") && (pathToCheck.depth() == 2 || pathToCheck.depth() == 3),
-        "Unrecognized relative delete marker {}", oldDelete);
-
-    // found relative paths so verify the property used to build the absolute paths
-    if (upgradeProperty == null || upgradeProperty.isBlank()) {
-      throw new IllegalArgumentException(
-          "Missing required property " + Property.INSTANCE_VOLUMES_UPGRADE_RELATIVE.getKey());
-    }
-    return new Path(upgradeProperty, VolumeManager.FileType.TABLE.getDirectory() + oldDelete);
-  }
-
-  /**
-   * Remove old temporary map files to prevent problems during recovery. Sorted recovery was updated
-   * to use RFiles instead of map files. So to prevent issues during tablet recovery, remove the old
-   * temporary map files and resort using RFiles. For more information see the following issues:
-   * <a href="https://github.com/apache/accumulo/issues/2117">#2117</a> and
-   * <a href="https://github.com/apache/accumulo/issues/2179">#2179</a>
-   */
-  static void dropSortedMapWALFiles(ServerContext context) {
-    VolumeManager vm = context.getVolumeManager();
-    for (String recoveryDir : context.getRecoveryDirs()) {
-      Path recoveryDirPath = new Path(recoveryDir);
-      try {
-        if (!vm.exists(recoveryDirPath)) {
-          log.info("There are no recovery files in {}", recoveryDir);
-          continue;
-        }
-        List<Path> directoriesToDrop = new ArrayList<>();
-        for (FileStatus walDir : vm.listStatus(recoveryDirPath)) {
-          // map files will be in a directory starting with "part"
-          Path walDirPath = walDir.getPath();
-          for (FileStatus dirOrFile : vm.listStatus(walDirPath)) {
-            if (dirOrFile.isDirectory()) {
-              directoriesToDrop.add(walDirPath);
-              break;
-            }
-          }
-        }
-        if (!directoriesToDrop.isEmpty()) {
-          log.info("Found {} old sorted map directories to delete.", directoriesToDrop.size());
-          for (Path dir : directoriesToDrop) {
-            log.info("Deleting everything in old sorted map directory: {}", dir);
-            vm.deleteRecursively(dir);
-          }
-        }
-      } catch (IOException ioe) {
-        throw new UncheckedIOException(ioe);
-      }
-    }
-  }
-}
diff --git a/server/manager/src/test/java/org/apache/accumulo/manager/upgrade/RootFilesUpgradeTest.java b/server/manager/src/test/java/org/apache/accumulo/manager/upgrade/RootFilesUpgradeTest.java
deleted file mode 100644
index e90e4cee4e..0000000000
--- a/server/manager/src/test/java/org/apache/accumulo/manager/upgrade/RootFilesUpgradeTest.java
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.accumulo.manager.upgrade;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.Set;
-
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.ConfigurationCopy;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.spi.fs.RandomVolumeChooser;
-import org.apache.accumulo.manager.WithTestNames;
-import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.accumulo.server.fs.VolumeManagerImpl;
-import org.apache.hadoop.fs.Path;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.io.TempDir;
-
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-
-@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN", justification = "paths not set by user input")
-public class RootFilesUpgradeTest extends WithTestNames {
-
-  @TempDir
-  private static File tempDir;
-
-  static void rename(VolumeManager fs, Path src, Path dst) throws IOException {
-    if (!fs.rename(src, dst)) {
-      throw new IOException("Rename " + src + " to " + dst + " returned false ");
-    }
-  }
-
-  private class TestWrapper {
-    File rootTabletDir;
-    Set<Path> oldDatafiles;
-    String compactName;
-    Path tmpDatafile;
-    Path newDatafile;
-    VolumeManager vm;
-    AccumuloConfiguration conf;
-
-    public void prepareReplacement(VolumeManager fs, Path location, Set<Path> oldDatafiles,
-        String compactName) throws IOException {
-      for (Path path : oldDatafiles) {
-        rename(fs, path, new Path(location + "/delete+" + compactName + "+" + path.getName()));
-      }
-    }
-
-    public void renameReplacement(VolumeManager fs, Path tmpDatafile, Path newDatafile)
-        throws IOException {
-      if (fs.exists(newDatafile)) {
-        throw new IllegalStateException("Target map file already exist " + newDatafile);
-      }
-
-      rename(fs, tmpDatafile, newDatafile);
-    }
-
-    public void finishReplacement(AccumuloConfiguration acuTableConf, VolumeManager fs,
-        Path location, Set<Path> oldDatafiles, String compactName) throws IOException {
-      // start deleting files, if we do not finish they will be cleaned
-      // up later
-      for (Path path : oldDatafiles) {
-        Path deleteFile = new Path(location + "/delete+" + compactName + "+" + path.getName());
-        if (acuTableConf.getBoolean(Property.GC_TRASH_IGNORE) || !fs.moveToTrash(deleteFile)) {
-          fs.deleteRecursively(deleteFile);
-        }
-      }
-    }
-
-    TestWrapper(VolumeManager vm, AccumuloConfiguration conf, String dirName, String compactName,
-        String... inputFiles) throws IOException {
-      this.vm = vm;
-      this.conf = conf;
-
-      rootTabletDir = new File(tempDir, dirName + "/accumulo/tables/+r/root_tablet");
-      assertTrue(rootTabletDir.mkdirs() || rootTabletDir.isDirectory());
-      oldDatafiles = new HashSet<>();
-      for (String filename : inputFiles) {
-        File file = new File(rootTabletDir, filename);
-        assertTrue(file.createNewFile());
-        oldDatafiles.add(new Path(file.toURI()));
-      }
-
-      this.compactName = compactName;
-
-      File tmpFile = new File(rootTabletDir, compactName + "_tmp");
-      assertTrue(tmpFile.createNewFile());
-      tmpDatafile = new Path(tmpFile.toURI());
-
-      newDatafile = new Path(new File(rootTabletDir, compactName).toURI());
-    }
-
-    void prepareReplacement() throws IOException {
-      prepareReplacement(vm, new Path(rootTabletDir.toURI()), oldDatafiles, compactName);
-    }
-
-    void renameReplacement() throws IOException {
-      renameReplacement(vm, tmpDatafile, newDatafile);
-    }
-
-    public void finishReplacement() throws IOException {
-      finishReplacement(conf, vm, new Path(rootTabletDir.toURI()), oldDatafiles, compactName);
-    }
-
-    public Collection<String> cleanupReplacement(String... expectedFiles) throws IOException {
-      Collection<String> ret =
-          Upgrader9to10.cleanupRootTabletFiles(vm, rootTabletDir.toString()).keySet();
-
-      HashSet<String> expected = new HashSet<>();
-      for (String efile : expectedFiles) {
-        expected.add(new File(rootTabletDir, efile).toURI().toString());
-      }
-
-      assertEquals(expected, new HashSet<>(ret));
-
-      return ret;
-    }
-
-    public void assertFiles(String... files) {
-      HashSet<String> actual = new HashSet<>();
-      File[] children = rootTabletDir.listFiles();
-      if (children != null) {
-        for (File file : children) {
-          actual.add(file.getName());
-        }
-      }
-
-      HashSet<String> expected = new HashSet<>();
-      expected.addAll(Arrays.asList(files));
-
-      assertEquals(expected, actual);
-    }
-  }
-
-  @Test
-  public void testFileReplacement() throws IOException {
-
-    ConfigurationCopy conf = new ConfigurationCopy();
-    conf.set(Property.INSTANCE_VOLUMES, "file:///");
-    conf.set(Property.GENERAL_VOLUME_CHOOSER, RandomVolumeChooser.class.getName());
-
-    try (var vm = VolumeManagerImpl.getLocalForTesting("file:///")) {
-
-      String[] uniqueDirNames = getUniqueNames(4);
-
-      TestWrapper wrapper =
-          new TestWrapper(vm, conf, uniqueDirNames[0], "A00004.rf", "A00002.rf", "F00003.rf");
-      wrapper.prepareReplacement();
-      wrapper.renameReplacement();
-      wrapper.finishReplacement();
-      wrapper.assertFiles("A00004.rf");
-
-      wrapper = new TestWrapper(vm, conf, uniqueDirNames[1], "A00004.rf", "A00002.rf", "F00003.rf");
-      wrapper.prepareReplacement();
-      wrapper.cleanupReplacement("A00002.rf", "F00003.rf");
-      wrapper.assertFiles("A00002.rf", "F00003.rf");
-
-      wrapper = new TestWrapper(vm, conf, uniqueDirNames[2], "A00004.rf", "A00002.rf", "F00003.rf");
-      wrapper.prepareReplacement();
-      wrapper.renameReplacement();
-      wrapper.cleanupReplacement("A00004.rf");
-      wrapper.assertFiles("A00004.rf");
-
-      wrapper = new TestWrapper(vm, conf, uniqueDirNames[3], "A00004.rf", "A00002.rf", "F00003.rf");
-      wrapper.prepareReplacement();
-      wrapper.renameReplacement();
-      wrapper.finishReplacement();
-      wrapper.cleanupReplacement("A00004.rf");
-      wrapper.assertFiles("A00004.rf");
-    }
-  }
-
-  public String[] getUniqueNames(int numOfNames) {
-    String[] result = new String[numOfNames];
-    for (int i = 0; i < result.length; i++) {
-      result[i] = testName() + i;
-    }
-    return result;
-  }
-}
diff --git a/server/manager/src/test/java/org/apache/accumulo/manager/upgrade/Upgrader9to10Test.java b/server/manager/src/test/java/org/apache/accumulo/manager/upgrade/Upgrader9to10Test.java
deleted file mode 100644
index a1ad4415e1..0000000000
--- a/server/manager/src/test/java/org/apache/accumulo/manager/upgrade/Upgrader9to10Test.java
+++ /dev/null
@@ -1,405 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.accumulo.manager.upgrade;
-
-import static org.apache.accumulo.core.Constants.BULK_PREFIX;
-import static org.easymock.EasyMock.anyObject;
-import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
-import static org.easymock.EasyMock.replay;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Set;
-import java.util.SortedMap;
-import java.util.TreeMap;
-import java.util.stream.Collectors;
-
-import org.apache.accumulo.core.client.AccumuloClient;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.ConfigurationCopy;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.ColumnUpdate;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.TableId;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.gc.ReferenceFile;
-import org.apache.accumulo.core.metadata.schema.Ample;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.volume.Volume;
-import org.apache.accumulo.core.volume.VolumeImpl;
-import org.apache.accumulo.server.ServerContext;
-import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.accumulo.server.gc.AllVolumesDirectory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.junit.jupiter.api.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class Upgrader9to10Test {
-  private static Logger log = LoggerFactory.getLogger(Upgrader9to10Test.class);
-
-  private static final String VOL_PROP = "hdfs://nn1:8020/accumulo";
-  private static final TableId tableId5a = TableId.of("5a");
-
-  @Test
-  public void testSwitchRelativeDeletes() {
-    Path resolved = Upgrader9to10.resolveRelativeDelete("/5a/t-0005", VOL_PROP);
-    assertEquals(new Path(VOL_PROP + "/tables/5a/t-0005"), resolved);
-    var allVolumesDir = new AllVolumesDirectory(tableId5a, "t-0005");
-    var ref1 = Upgrader9to10.switchToAllVolumes(resolved);
-    compareReferences(allVolumesDir, ref1);
-
-    resolved = Upgrader9to10.resolveRelativeDelete("/5a/" + BULK_PREFIX + "0005", VOL_PROP);
-    assertEquals(new Path(VOL_PROP + "/tables/5a/" + BULK_PREFIX + "0005"), resolved);
-    ref1 = new ReferenceFile(tableId5a, VOL_PROP + "/tables/5a/" + BULK_PREFIX + "0005");
-    var ref2 = Upgrader9to10.switchToAllVolumes(resolved);
-    compareReferences(ref1, ref2);
-
-    resolved = Upgrader9to10.resolveRelativeDelete("/5a/t-0005/F0009.rf", VOL_PROP);
-    assertEquals(new Path(VOL_PROP + "/tables/5a/t-0005/F0009.rf"), resolved);
-    ref1 = new ReferenceFile(tableId5a, VOL_PROP + "/tables/5a/t-0005/F0009.rf");
-    ref2 = Upgrader9to10.switchToAllVolumes(resolved);
-    compareReferences(ref1, ref2);
-  }
-
-  private void compareReferences(ReferenceFile ref1, ReferenceFile ref2) {
-    assertEquals(ref1.getMetadataEntry(), ref2.getMetadataEntry());
-    assertEquals(ref1.tableId, ref2.tableId);
-  }
-
-  @Test
-  public void testBadRelativeDeleteTooShort() {
-    assertThrows(IllegalStateException.class,
-        () -> Upgrader9to10.resolveRelativeDelete("/5a", VOL_PROP));
-  }
-
-  @Test
-  public void testBadRelativeDeleteTooLong() throws Exception {
-    assertThrows(IllegalStateException.class,
-        () -> Upgrader9to10.resolveRelativeDelete("/5a/5a/t-0005/F0009.rf", VOL_PROP));
-  }
-
-  @Test
-  public void testSwitchAllVolumes() {
-    Path resolved = Upgrader9to10
-        .resolveRelativeDelete("hdfs://localhost:9000/accumulo/tables/5a/t-0005", VOL_PROP);
-    var allVolumesDir = new AllVolumesDirectory(tableId5a, "t-0005");
-    var ref1 = Upgrader9to10.switchToAllVolumes(resolved);
-    compareReferences(allVolumesDir, ref1);
-
-    resolved = Upgrader9to10.resolveRelativeDelete(
-        "hdfs://localhost:9000/accumulo/tables/5a/" + BULK_PREFIX + "0005", VOL_PROP);
-    ref1 = new ReferenceFile(tableId5a,
-        "hdfs://localhost:9000/accumulo/tables/5a/" + BULK_PREFIX + "0005");
-    var ref2 = Upgrader9to10.switchToAllVolumes(resolved);
-    compareReferences(ref1, ref2);
-
-    resolved = Upgrader9to10.resolveRelativeDelete(
-        "hdfs://localhost:9000/accumulo/tables/5a/t-0005/C0009.rf", VOL_PROP);
-    ref1 = new ReferenceFile(tableId5a, "hdfs://localhost:9000/accumulo/tables/5a/t-0005/C0009.rf");
-    ref2 = Upgrader9to10.switchToAllVolumes(resolved);
-    compareReferences(ref1, ref2);
-  }
-
-  @Test
-  public void testUpgradeDir() {
-    assertEquals("t-0005",
-        Upgrader9to10.upgradeDirColumn("hdfs://localhost:9000/accumulo/tables/5a/t-0005"));
-    assertEquals("t-0005", Upgrader9to10.upgradeDirColumn("../5a/t-0005"));
-    assertEquals("t-0005", Upgrader9to10.upgradeDirColumn("/t-0005"));
-    assertEquals("t-0005", Upgrader9to10.upgradeDirColumn("t-0005"));
-  }
-
-  String tableName = Ample.DataLevel.USER.metaTable();
-  String volumeUpgrade = "file:///accumulo";
-
-  // mock objects for testing relative path replacement
-  private void setupMocks(AccumuloClient c, VolumeManager fs, SortedMap<Key,Value> map,
-      List<Mutation> results) throws Exception {
-    Scanner scanner = createMock(Scanner.class);
-    // buffer all the mutations that are created so we can verify they are correct
-    BatchWriter writer = new BatchWriter() {
-      List<Mutation> buffer = new ArrayList<>();
-
-      @Override
-      public void addMutation(Mutation m) throws MutationsRejectedException {
-        buffer.add(m);
-      }
-
-      @Override
-      public void addMutations(Iterable<Mutation> iterable) throws MutationsRejectedException {
-        iterable.forEach(buffer::add);
-      }
-
-      @Override
-      public void flush() throws MutationsRejectedException {}
-
-      @Override
-      // simulate the close by adding all to results and preventing anymore adds
-      public void close() throws MutationsRejectedException {
-        results.addAll(buffer);
-        buffer = null;
-      }
-    };
-
-    expect(c.createScanner(tableName, Authorizations.EMPTY)).andReturn(scanner).anyTimes();
-    expect(c.createBatchWriter(tableName)).andReturn(writer).anyTimes();
-    expect(scanner.iterator()).andReturn(map.entrySet().iterator()).anyTimes();
-
-    // void methods
-    scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
-    expectLastCall().anyTimes();
-    scanner.close();
-    expectLastCall().anyTimes();
-
-    replay(c, fs, scanner);
-  }
-
-  @Test
-  public void noRelativePaths() throws Exception {
-    VolumeManager fs = createMock(VolumeManager.class);
-    AccumuloClient c = createMock(AccumuloClient.class);
-    expect(fs.exists(anyObject())).andReturn(true).anyTimes();
-
-    SortedMap<Key,Value> map = new TreeMap<>();
-    map.put(new Key("1<", "file", "hdfs://nn1:8020/accumulo/tables/1/default_tablet/A000001c.rf"),
-        new Value());
-    map.put(new Key("1<", "file", "hdfs://nn1:8020/accumulo/tables/1/default_tablet/F000001m.rf"),
-        new Value());
-    map.put(new Key("1<", "file", "file://nn1:8020/accumulo/tables/1/t-0005/F000004x.rf"),
-        new Value());
-    map.put(new Key("1<", "file", "file://volume23:8000/accumulo/tables/1/t-1234/F0000054.rf"),
-        new Value());
-
-    List<Mutation> results = new ArrayList<>();
-
-    setupMocks(c, fs, map, results);
-    assertFalse(Upgrader9to10.checkForRelativePaths(c, fs, tableName, volumeUpgrade),
-        "Invalid Relative path check");
-    assertTrue(results.isEmpty());
-  }
-
-  @Test
-  public void filesDontExistAfterReplacingRelatives() throws Exception {
-    AccumuloClient c = createMock(AccumuloClient.class);
-    VolumeManager fs = createMock(VolumeManager.class);
-    SortedMap<Key,Value> map = new TreeMap<>();
-    map.put(new Key("1b;row_000050", "file", "../1b/default_tablet/A000001c.rf"), new Value("1"));
-    map.put(new Key("1b;row_000050", "file", "../1b/default_tablet/F000001m.rf"), new Value("2"));
-
-    expect(fs.exists(anyObject(Path.class))).andReturn(false).anyTimes();
-
-    setupMocks(c, fs, map, new ArrayList<>());
-    assertThrows(IllegalArgumentException.class,
-        () -> Upgrader9to10.checkForRelativePaths(c, fs, tableName, volumeUpgrade));
-  }
-
-  @Test
-  public void missingUpgradeRelativeProperty() throws Exception {
-    AccumuloClient c = createMock(AccumuloClient.class);
-    VolumeManager fs = createMock(VolumeManager.class);
-    SortedMap<Key,Value> map = new TreeMap<>();
-    map.put(new Key("1b;row_000050", "file", "../1b/default_tablet/A000001c.rf"), new Value("1"));
-    map.put(new Key("1b;row_000050", "file", "../1b/default_tablet/F000001m.rf"), new Value("2"));
-
-    expect(fs.exists(anyObject(Path.class))).andReturn(false).anyTimes();
-
-    setupMocks(c, fs, map, new ArrayList<>());
-    assertThrows(IllegalArgumentException.class,
-        () -> Upgrader9to10.checkForRelativePaths(c, fs, tableName, ""));
-  }
-
-  @Test
-  public void replaceRelatives() throws Exception {
-    AccumuloClient c = createMock(AccumuloClient.class);
-    VolumeManager fs = createMock(VolumeManager.class);
-    expect(fs.exists(anyObject())).andReturn(true).anyTimes();
-
-    SortedMap<Key,Value> map = new TreeMap<>();
-    map.put(new Key("1b;row_000050", "file", "../1b/default_tablet/A000001c.rf"), new Value("1"));
-    map.put(new Key("1b;row_000050", "file", "../1b/default_tablet/F000001m.rf"), new Value("2"));
-    map.put(new Key("1b;row_000050", "file", "../1b/t-000008t/F000004x.rf"), new Value("3"));
-    map.put(new Key("1b;row_000050", "file", "/t-000008t/F0000054.rf"), new Value("4"));
-    map.put(new Key("1b<", "file", "../1b/default_tablet/A000001c.rf"), new Value("1"));
-    map.put(new Key("1b<", "file", "../1b/default_tablet/F000001m.rf"), new Value("2"));
-    map.put(new Key("1b<", "file", "../1b/t-000008t/F000004x.rf"), new Value("3"));
-    map.put(new Key("1b<", "file", "/t-000008t/F0000054.rf"), new Value("4"));
-    map.put(new Key("1b<", "file", "hdfs://nn1:8020/accumulo/tables/1b/t-000008t/A0000098.rf"),
-        new Value("5"));
-    map.put(new Key("1b<", "file", "hdfs://nn1:8020/accumulo/tables/1b/t-000008t/F0000098.rf"),
-        new Value("5"));
-
-    List<Mutation> expected = new ArrayList<>();
-    expected.add(replaceMut("1b;row_000050", "file:/accumulo/tables/1b/default_tablet/A000001c.rf",
-        "1", "../1b/default_tablet/A000001c.rf"));
-    expected.add(replaceMut("1b;row_000050", "file:/accumulo/tables/1b/default_tablet/F000001m.rf",
-        "2", "../1b/default_tablet/F000001m.rf"));
-    expected.add(replaceMut("1b;row_000050", "file:/accumulo/tables/1b/t-000008t/F000004x.rf", "3",
-        "../1b/t-000008t/F000004x.rf"));
-    expected.add(replaceMut("1b;row_000050", "file:/accumulo/tables/1b/t-000008t/F0000054.rf", "4",
-        "/t-000008t/F0000054.rf"));
-    expected.add(replaceMut("1b<", "file:/accumulo/tables/1b/default_tablet/A000001c.rf", "1",
-        "../1b/default_tablet/A000001c.rf"));
-    expected.add(replaceMut("1b<", "file:/accumulo/tables/1b/default_tablet/F000001m.rf", "2",
-        "../1b/default_tablet/F000001m.rf"));
-    expected.add(replaceMut("1b<", "file:/accumulo/tables/1b/t-000008t/F000004x.rf", "3",
-        "../1b/t-000008t/F000004x.rf"));
-    expected.add(replaceMut("1b<", "file:/accumulo/tables/1b/t-000008t/F0000054.rf", "4",
-        "/t-000008t/F0000054.rf"));
-
-    List<Mutation> results = new ArrayList<>();
-
-    setupMocks(c, fs, map, results);
-    Upgrader9to10.replaceRelativePaths(c, fs, tableName, volumeUpgrade);
-    verifyPathsReplaced(expected, results);
-  }
-
-  @Test
-  public void normalizeVolume() throws Exception {
-    String uglyVolume = "hdfs://nn.somewhere.com:86753/accumulo/blah/.././/bad/bad2/../.././/////";
-
-    AccumuloClient c = createMock(AccumuloClient.class);
-    VolumeManager fs = createMock(VolumeManager.class);
-    expect(fs.exists(anyObject())).andReturn(true).anyTimes();
-    SortedMap<Key,Value> map = new TreeMap<>();
-    map.put(new Key("1b<", "file", "../1b/t-000008t/F000004x.rf"), new Value("1"));
-    map.put(new Key("1b<", "file", "/t-000008t/F0000054.rf"), new Value("2"));
-    List<Mutation> results = new ArrayList<>();
-    List<Mutation> expected = new ArrayList<>();
-    expected.add(
-        replaceMut("1b<", "hdfs://nn.somewhere.com:86753/accumulo/tables/1b/t-000008t/F000004x.rf",
-            "1", "../1b/t-000008t/F000004x.rf"));
-    expected.add(
-        replaceMut("1b<", "hdfs://nn.somewhere.com:86753/accumulo/tables/1b/t-000008t/F0000054.rf",
-            "2", "/t-000008t/F0000054.rf"));
-
-    setupMocks(c, fs, map, results);
-    Upgrader9to10.replaceRelativePaths(c, fs, tableName, uglyVolume);
-    verifyPathsReplaced(expected, results);
-  }
-
-  private Mutation replaceMut(String row, String cq, String val, String delete) {
-    Mutation m = new Mutation(row);
-    m.at().family("file").qualifier(cq).put(new Value(val));
-    m.at().family("file").qualifier(delete).delete();
-    return m;
-  }
-
-  /**
-   * Make sure mutations are all the same, in the correct order
-   */
-  private void verifyPathsReplaced(List<Mutation> expected, List<Mutation> results) {
-    Iterator<Mutation> expectIter = expected.iterator();
-    int deleteCount = 0;
-    int updateCount = 0;
-    for (Mutation mut : results) {
-      Mutation next = expectIter.next();
-      Iterator<ColumnUpdate> nextUpdates = next.getUpdates().iterator();
-      assertEquals(next.getUpdates().size(), mut.getUpdates().size());
-      assertEquals(new Text(next.getRow()), new Text(mut.getRow()));
-
-      // check updates are all the same
-      for (ColumnUpdate update : mut.getUpdates()) {
-        ColumnUpdate nextUpdate = nextUpdates.next();
-        Text cq = new Text(nextUpdate.getColumnQualifier());
-        log.debug("Checking for expected columnUpdate: " + cq + " deleted? " + update.isDeleted());
-        assertEquals(cq, new Text(update.getColumnQualifier()));
-        if (update.isDeleted()) {
-          deleteCount++;
-        } else {
-          updateCount++;
-          assertEquals(new Text(nextUpdate.getValue()), new Text(update.getValue()));
-        }
-      }
-    }
-
-    assertEquals(deleteCount, updateCount, "Replacements should have update for every delete");
-  }
-
-  @Test
-  public void testDropSortedMapWALs() throws IOException {
-    Configuration hadoopConf = new Configuration();
-    ConfigurationCopy conf = new ConfigurationCopy();
-    FileSystem fs = new Path("file:///").getFileSystem(hadoopConf);
-
-    List<String> volumes = Arrays.asList("/vol1/", "/vol2/");
-    Collection<Volume> vols =
-        volumes.stream().map(s -> new VolumeImpl(fs, s)).collect(Collectors.toList());
-    Set<String> fullyQualifiedVols = Set.of("file://vol1/", "file://vol2/");
-    Set<String> recoveryDirs =
-        Set.of("file://vol1/accumulo/recovery", "file://vol2/accumulo/recovery");
-    conf.set(Property.INSTANCE_VOLUMES, String.join(",", fullyQualifiedVols));
-
-    ServerContext context = createMock(ServerContext.class);
-    Path recoveryDir1 = new Path("file://vol1/accumulo/recovery");
-    Path recoveryDir2 = new Path("file://vol2/accumulo/recovery");
-    VolumeManager volumeManager = createMock(VolumeManager.class);
-
-    FileStatus[] dirs = new FileStatus[2];
-    dirs[0] = createMock(FileStatus.class);
-    Path dir0 = new Path("file://vol1/accumulo/recovery/A123456789");
-    FileStatus[] dir0Files = new FileStatus[1];
-    dir0Files[0] = createMock(FileStatus.class);
-    dirs[1] = createMock(FileStatus.class);
-    Path dir1 = new Path("file://vol1/accumulo/recovery/B123456789");
-    FileStatus[] dir1Files = new FileStatus[1];
-    dir1Files[0] = createMock(FileStatus.class);
-    Path part1Dir = new Path("file://vol1/accumulo/recovery/B123456789/part-r-0000");
-
-    expect(context.getVolumeManager()).andReturn(volumeManager).once();
-    expect(context.getConfiguration()).andReturn(conf).once();
-    expect(context.getHadoopConf()).andReturn(hadoopConf).once();
-    expect(context.getRecoveryDirs()).andReturn(recoveryDirs).once();
-    expect(volumeManager.getVolumes()).andReturn(vols).once();
-
-    expect(volumeManager.exists(recoveryDir1)).andReturn(true).once();
-    expect(volumeManager.exists(recoveryDir2)).andReturn(false).once();
-    expect(volumeManager.listStatus(recoveryDir1)).andReturn(dirs).once();
-    expect(dirs[0].getPath()).andReturn(dir0).once();
-    expect(volumeManager.listStatus(dir0)).andReturn(dir0Files).once();
-    expect(dir0Files[0].isDirectory()).andReturn(false).once();
-
-    expect(dirs[1].getPath()).andReturn(dir1).once();
-    expect(volumeManager.listStatus(dir1)).andReturn(dir1Files).once();
-    expect(dir1Files[0].isDirectory()).andReturn(true).once();
-    expect(dir1Files[0].getPath()).andReturn(part1Dir).once();
-    expect(volumeManager.deleteRecursively(dir1)).andReturn(true).once();
-
-    replay(context, volumeManager, dirs[0], dirs[1], dir0Files[0], dir1Files[0]);
-    Upgrader9to10.dropSortedMapWALFiles(context);
-  }
-}
diff --git a/test/src/main/java/org/apache/accumulo/test/conf/util/ConfigTransformerIT.java b/test/src/main/java/org/apache/accumulo/test/conf/util/ConfigTransformerIT.java
deleted file mode 100644
index a3a59ca992..0000000000
--- a/test/src/main/java/org/apache/accumulo/test/conf/util/ConfigTransformerIT.java
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.accumulo.test.conf.util;
-
-import static java.util.concurrent.TimeUnit.MILLISECONDS;
-import static java.util.concurrent.TimeUnit.MINUTES;
-import static java.util.concurrent.TimeUnit.SECONDS;
-import static org.apache.accumulo.harness.AccumuloITBase.ZOOKEEPER_TESTING_SERVER;
-import static org.easymock.EasyMock.anyObject;
-import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.verify;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertThrows;
-
-import java.io.File;
-import java.util.List;
-import java.util.UUID;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.data.InstanceId;
-import org.apache.accumulo.core.fate.zookeeper.ZooReaderWriter;
-import org.apache.accumulo.core.fate.zookeeper.ZooUtil;
-import org.apache.accumulo.core.util.Retry;
-import org.apache.accumulo.server.ServerContext;
-import org.apache.accumulo.server.conf.codec.VersionedPropCodec;
-import org.apache.accumulo.server.conf.store.SystemPropKey;
-import org.apache.accumulo.server.conf.store.impl.PropStoreWatcher;
-import org.apache.accumulo.server.conf.store.impl.ZooPropStore;
-import org.apache.accumulo.server.conf.util.ConfigTransformer;
-import org.apache.accumulo.server.conf.util.TransformToken;
-import org.apache.accumulo.test.zookeeper.ZooKeeperTestingServer;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.ZKUtil;
-import org.apache.zookeeper.ZooKeeper;
-import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeAll;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Tag;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.io.TempDir;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@Tag(ZOOKEEPER_TESTING_SERVER)
-public class ConfigTransformerIT {
-
-  private static final Logger log = LoggerFactory.getLogger(ConfigTransformerIT.class);
-  private final static VersionedPropCodec codec = VersionedPropCodec.getDefault();
-  @TempDir
-  private static File tempDir;
-  private static ZooKeeperTestingServer testZk = null;
-  private static ZooKeeper zooKeeper;
-  private static ZooReaderWriter zrw;
-
-  private InstanceId instanceId = null;
-  private ZooPropStore propStore = null;
-  private ServerContext context = null;
-  private PropStoreWatcher watcher = null;
-
-  @BeforeAll
-  public static void setupZk() {
-
-    // using default zookeeper port - we don't have a full configuration
-    testZk = new ZooKeeperTestingServer(tempDir);
-    zooKeeper = testZk.getZooKeeper();
-    ZooUtil.digestAuth(zooKeeper, ZooKeeperTestingServer.SECRET);
-    zrw = testZk.getZooReaderWriter();
-  }
-
-  @AfterAll
-  public static void shutdownZK() throws Exception {
-    testZk.close();
-  }
-
-  @BeforeEach
-  public void testSetup() throws Exception {
-    instanceId = InstanceId.of(UUID.randomUUID());
-
-    List<LegacyPropData.PropNode> nodes = LegacyPropData.getData(instanceId);
-    for (LegacyPropData.PropNode node : nodes) {
-      zrw.putPersistentData(node.getPath(), node.getData(), ZooUtil.NodeExistsPolicy.SKIP);
-    }
-    propStore = ZooPropStore.initialize(instanceId, zrw);
-
-    context = createMock(ServerContext.class);
-    expect(context.getInstanceID()).andReturn(instanceId).anyTimes();
-    expect(context.getZooReaderWriter()).andReturn(zrw).anyTimes();
-    expect(context.getPropStore()).andReturn(propStore).anyTimes();
-
-    watcher = createMock(PropStoreWatcher.class);
-    watcher.process(anyObject());
-    expectLastCall().anyTimes();
-
-    replay(context, watcher);
-
-  }
-
-  @AfterEach
-  public void cleanupZnodes() {
-    try {
-      ZKUtil.deleteRecursive(zooKeeper, Constants.ZROOT);
-    } catch (KeeperException | InterruptedException ex) {
-      throw new IllegalStateException("Failed to clean-up test zooKeeper nodes.", ex);
-    }
-    verify(context, watcher);
-  }
-
-  @Test
-  public void propStoreConversionTest() throws Exception {
-
-    var sysPropKey = SystemPropKey.of(instanceId);
-
-    List<String> sysLegacy = zrw.getChildren(sysPropKey.getPath());
-    log.info("Before: {}", sysLegacy);
-
-    var vProps = propStore.get(sysPropKey);
-    assertNotNull(vProps);
-    log.info("Converted: {}", vProps);
-
-    sysLegacy = zrw.getChildren(sysPropKey.getPath());
-    log.info("After: {}", sysLegacy);
-
-  }
-
-  @Test
-  public void transformTest() throws Exception {
-
-    var sysPropKey = SystemPropKey.of(instanceId);
-
-    ConfigTransformer transformer = new ConfigTransformer(zrw, codec, watcher);
-    List<String> sysLegacy = zrw.getChildren(sysPropKey.getPath());
-    log.info("Before: {}", sysLegacy);
-
-    var converted = transformer.transform(sysPropKey, sysPropKey.getPath(), false);
-
-    assertEquals(sysLegacy.size(), converted.asMap().size());
-  }
-
-  @Test
-  public void failToGetLock() throws Exception {
-    var sysPropKey = SystemPropKey.of(instanceId);
-
-    Retry retry =
-        Retry.builder().maxRetries(3).retryAfter(250, MILLISECONDS).incrementBy(500, MILLISECONDS)
-            .maxWait(5, SECONDS).backOffFactor(1.75).logInterval(3, MINUTES).createRetry();
-
-    ConfigTransformer transformer = new ConfigTransformer(zrw, codec, watcher, retry);
-    // manually create a lock so transformer fails
-    zrw.putEphemeralData(sysPropKey.getPath() + TransformToken.TRANSFORM_TOKEN, new byte[0]);
-
-    assertThrows(IllegalStateException.class,
-        () -> transformer.transform(sysPropKey, sysPropKey.getPath(), false));
-
-  }
-
-  @Test
-  public void continueOnLockRelease() {
-
-  }
-
-  @Test
-  public void createdByAnother() {
-
-  }
-
-}
diff --git a/test/src/main/java/org/apache/accumulo/test/conf/util/LegacyPropData.java b/test/src/main/java/org/apache/accumulo/test/conf/util/LegacyPropData.java
deleted file mode 100644
index 59be787cd0..0000000000
--- a/test/src/main/java/org/apache/accumulo/test/conf/util/LegacyPropData.java
+++ /dev/null
@@ -1,386 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.accumulo.test.conf.util;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.data.InstanceId;
-import org.apache.accumulo.core.fate.zookeeper.ZooUtil;
-
-/**
- * Provides sampled ZooKeeper properties paths / data from a 1.10 instance for testing.
- */
-public class LegacyPropData {
-
-  /**
-   * Generates a list of ZooKeeper nodes captured from a 1.10 instance. The list is ordered so that
-   * parent nodes are created before the children. This is used for property conversion testing and
-   * other values are irrelevant and left as empty ZooKeeper nodes.
-   */
-  public static List<PropNode> getData(final InstanceId instanceId) {
-    String zkRoot = ZooUtil.getRoot(instanceId);
-    List<PropNode> names = new ArrayList<>(250);
-
-    names.add(new PropNode(Constants.ZROOT, null));
-    names.add(new PropNode(ZooUtil.getRoot(instanceId), null));
-
-    names.add(new PropNode(zkRoot + "/bulk_failed_copyq", null));
-    names.add(new PropNode(zkRoot + "/config", null));
-    names.add(new PropNode(zkRoot + "/dead", null));
-    names.add(new PropNode(zkRoot + "/fate", null));
-    names.add(new PropNode(zkRoot + "/gc", null));
-    names.add(new PropNode(zkRoot + "/hdfs_reservations", null));
-    names.add(new PropNode(zkRoot + "/masters", null));
-    names.add(new PropNode(zkRoot + "/monitor", null));
-    names.add(new PropNode(zkRoot + "/namespaces", null));
-    names.add(new PropNode(zkRoot + "/next_file", null));
-    names.add(new PropNode(zkRoot + "/problems", null));
-    names.add(new PropNode(zkRoot + "/recovery", null));
-    names.add(new PropNode(zkRoot + "/replication", null));
-    names.add(new PropNode(zkRoot + "/root_tablet", null));
-    names.add(new PropNode(zkRoot + "/table_locks", null));
-    names.add(new PropNode(zkRoot + "/tables", null));
-    names.add(new PropNode(zkRoot + "/tservers", null));
-    names.add(new PropNode(zkRoot + Constants.ZUSERS, null));
-    names.add(new PropNode(zkRoot + "/wals", null));
-    names.add(new PropNode(zkRoot + "/bulk_failed_copyq/locks", null));
-    names.add(new PropNode(zkRoot + "/config/table.bloom.enabled", "true"));
-    names.add(new PropNode(zkRoot + "/dead/tservers", null));
-    names.add(new PropNode(zkRoot + "/gc/lock", null));
-    names.add(new PropNode(zkRoot + "/gc/lock/zlock-0000000000", null));
-    names.add(new PropNode(zkRoot + "/masters/goal_state", null));
-    names.add(new PropNode(zkRoot + "/masters/lock", null));
-    names.add(new PropNode(zkRoot + "/masters/repl_coord_addr", null));
-    names.add(new PropNode(zkRoot + "/masters/tick", null));
-    names.add(new PropNode(zkRoot + "/masters/lock/zlock-0000000000", null));
-    names.add(new PropNode(zkRoot + "/monitor/http_addr", null));
-    names.add(new PropNode(zkRoot + "/monitor/lock", null));
-    names.add(new PropNode(zkRoot + "/monitor/log4j_addr", null));
-    names.add(new PropNode(zkRoot + "/monitor/lock/zlock-0000000000", null));
-    names.add(new PropNode(zkRoot + "/namespaces/+accumulo", null));
-    names.add(new PropNode(zkRoot + "/namespaces/+default", null));
-    names.add(new PropNode(zkRoot + "/namespaces/2", null));
-    names.add(new PropNode(zkRoot + "/namespaces/3", null));
-    names.add(new PropNode(zkRoot + "/namespaces/+accumulo/conf", null));
-    names.add(new PropNode(zkRoot + "/namespaces/+accumulo/name", "accumulo"));
-    names.add(new PropNode(zkRoot + "/namespaces/+default/conf", null));
-    names.add(new PropNode(zkRoot + "/namespaces/+default/name", null));
-    names.add(new PropNode(zkRoot + "/namespaces/2/conf", null));
-    names.add(new PropNode(zkRoot + "/namespaces/2/name", "ns1"));
-    names.add(new PropNode(zkRoot + "/namespaces/2/conf/table.bloom.enabled", "false"));
-    names.add(new PropNode(zkRoot + "/namespaces/3/conf", null));
-    names.add(new PropNode(zkRoot + "/namespaces/3/name", "ns2"));
-    names.add(new PropNode(zkRoot + "/recovery/locks", null));
-    names.add(new PropNode(zkRoot + "/replication/tservers", null));
-    names.add(new PropNode(zkRoot + "/replication/workqueue", null));
-    names.add(new PropNode(zkRoot + "/replication/tservers/localhost:11000", null));
-    names.add(new PropNode(zkRoot + "/replication/workqueue/locks", null));
-    names.add(new PropNode(zkRoot + "/root_tablet/current_logs", null));
-    names.add(new PropNode(zkRoot + "/root_tablet/dir", null));
-    names.add(new PropNode(zkRoot + "/root_tablet/lastlocation", null));
-    names.add(new PropNode(zkRoot + "/root_tablet/location", null));
-    names.add(new PropNode(zkRoot + "/root_tablet/walogs", null));
-    names.add(new PropNode(zkRoot + "/tables/!0", null));
-    names.add(new PropNode(zkRoot + "/tables/+r", null));
-    names.add(new PropNode(zkRoot + "/tables/+rep", null));
-    names.add(new PropNode(zkRoot + "/tables/1", null));
-    names.add(new PropNode(zkRoot + "/tables/4", null));
-    names.add(new PropNode(zkRoot + "/tables/5", null));
-    names.add(new PropNode(zkRoot + "/tables/6", null));
-    names.add(new PropNode(zkRoot + "/tables/7", null));
-    names.add(new PropNode(zkRoot + "/tables/!0/compact-cancel-id", null));
-    names.add(new PropNode(zkRoot + "/tables/!0/compact-id", null));
-    names.add(new PropNode(zkRoot + "/tables/!0/conf", null));
-    names.add(new PropNode(zkRoot + "/tables/!0/flush-id", null));
-    names.add(new PropNode(zkRoot + "/tables/!0/name", "metadata"));
-    names.add(new PropNode(zkRoot + "/tables/!0/namespace", "+accumulo"));
-    names.add(new PropNode(zkRoot + "/tables/!0/state", null));
-    names.add(new PropNode(zkRoot + "/tables/!0/conf/table.failures.ignore", "false"));
-    names.add(new PropNode(zkRoot + "/tables/!0/conf/table.cache.index.enable", "true"));
-    names.add(new PropNode(zkRoot + "/tables/!0/conf/table.group.server", "file,log,srv,future"));
-    names.add(new PropNode(zkRoot + "/tables/!0/conf/table.iterator.scan.replcombiner.opt.columns",
-        "stat"));
-    names.add(new PropNode(zkRoot + "/tables/!0/conf/table.compaction.major.ratio", "1"));
-    names.add(new PropNode(zkRoot + "/tables/!0/conf/table.file.replication", "5"));
-    names.add(new PropNode(zkRoot + "/tables/!0/conf/table.iterator.majc.replcombiner",
-        "9,org.apache.accumulo.server.replication.StatusCombiner"));
-    names.add(new PropNode(zkRoot + "/tables/!0/conf/table.iterator.scan.replcombiner",
-        "9,org.apache.accumulo.server.replication.StatusCombiner"));
-    names.add(
-        new PropNode(zkRoot + "/tables/!0/conf/table.iterator.scan.vers.opt.maxVersions", "1"));
-    names.add(new PropNode(zkRoot + "/tables/!0/conf/table.iterator.minc.vers",
-        "10,org.apache.accumulo.core.iterators.user.VersioningIterator"));
-    names.add(
-        new PropNode(zkRoot + "/tables/!0/conf/table.iterator.majc.vers.opt.maxVersions", "1"));
-    names.add(new PropNode(zkRoot + "/tables/!0/conf/table.iterator.scan.vers",
-        "10,org.apache.accumulo.core.iterators.user.VersioningIterator"));
-    names.add(new PropNode(zkRoot + "/tables/!0/conf/table.group.tablet", "~tab,loc"));
-    names.add(new PropNode(zkRoot + "/tables/!0/conf/table.groups.enabled", "tablet,server"));
-    names.add(new PropNode(zkRoot + "/tables/!0/conf/table.iterator.majc.vers",
-        "10,org.apache.accumulo.core.iterators.user.VersioningIterator"));
-    names.add(new PropNode(zkRoot + "/tables/!0/conf/table.constraint.1",
-        "org.apache.accumulo.server.constraints.MetadataConstraints"));
-    names.add(new PropNode(zkRoot + "/tables/!0/conf/table.iterator.minc.replcombiner.opt.columns",
-        "stat"));
-    names.add(new PropNode(zkRoot + "/tables/!0/conf/table.split.threshold", "64M"));
-    names.add(new PropNode(zkRoot + "/tables/!0/conf/table.iterator.majc.bulkLoadFilter",
-        "20,org.apache.accumulo.server.iterators.MetadataBulkLoadFilter"));
-    names.add(
-        new PropNode(zkRoot + "/tables/!0/conf/table.iterator.minc.vers.opt.maxVersions", "1"));
-    names.add(new PropNode(zkRoot + "/tables/!0/conf/table.iterator.minc.replcombiner",
-        "9,org.apache.accumulo.server.replication.StatusCombiner"));
-    names.add(new PropNode(zkRoot + "/tables/!0/conf/table.file.compress.blocksize", "32K"));
-    names.add(new PropNode(zkRoot + "/tables/!0/conf/table.durability", "sync"));
-    names
-        .add(new PropNode(zkRoot + "/tables/!0/conf/table.security.scan.visibility.default", null));
-    names.add(new PropNode(zkRoot + "/tables/!0/conf/table.cache.block.enable", "true"));
-    names.add(new PropNode(zkRoot + "/tables/!0/conf/table.iterator.majc.replcombiner.opt.columns",
-        "stat"));
-    names.add(new PropNode(zkRoot + "/tables/+r/compact-cancel-id", null));
-    names.add(new PropNode(zkRoot + "/tables/+r/compact-id", null));
-    names.add(new PropNode(zkRoot + "/tables/+r/conf", null));
-    names.add(new PropNode(zkRoot + "/tables/+r/flush-id", null));
-    names.add(new PropNode(zkRoot + "/tables/+r/name", "root"));
-    names.add(new PropNode(zkRoot + "/tables/+r/namespace", "+accumulo"));
-    names.add(new PropNode(zkRoot + "/tables/+r/state", null));
-    names.add(new PropNode(zkRoot + "/tables/+r/conf/table.group.tablet", "~tab,loc"));
-    names.add(new PropNode(zkRoot + "/tables/+r/conf/table.groups.enabled", "tablet,server"));
-    names.add(new PropNode(zkRoot + "/tables/+r/conf/table.iterator.majc.vers",
-        "10,org.apache.accumulo.core.iterators.user.VersioningIterator"));
-    names.add(new PropNode(zkRoot + "/tables/+r/conf/table.constraint.1",
-        "org.apache.accumulo.server.constraints.MetadataConstraints"));
-    names.add(new PropNode(zkRoot + "/tables/+r/conf/table.split.threshold", "64M"));
-    names.add(new PropNode(zkRoot + "/tables/+r/conf/table.failures.ignore", "false"));
-    names.add(new PropNode(zkRoot + "/tables/+r/conf/table.iterator.majc.bulkLoadFilter",
-        "20,org.apache.accumulo.server.iterators.MetadataBulkLoadFilter"));
-    names.add(new PropNode(zkRoot + "/tables/+r/conf/table.cache.index.enable", "true"));
-    names.add(
-        new PropNode(zkRoot + "/tables/+r/conf/table.iterator.minc.vers.opt.maxVersions", "1"));
-    names.add(new PropNode(zkRoot + "/tables/+r/conf/table.group.server", "file,log,srv,future"));
-    names.add(new PropNode(zkRoot + "/tables/+r/conf/table.file.compress.blocksize", "32K"));
-    names.add(new PropNode(zkRoot + "/tables/+r/conf/table.durability", "sync"));
-    names
-        .add(new PropNode(zkRoot + "/tables/+r/conf/table.security.scan.visibility.default", null));
-    names.add(new PropNode(zkRoot + "/tables/+r/conf/table.compaction.major.ratio", "1"));
-    names.add(new PropNode(zkRoot + "/tables/+r/conf/table.file.replication", "5"));
-    names.add(
-        new PropNode(zkRoot + "/tables/+r/conf/table.iterator.scan.vers.opt.maxVersions", "1"));
-    names.add(new PropNode(zkRoot + "/tables/+r/conf/table.iterator.minc.vers",
-        "10,org.apache.accumulo.core.iterators.user.VersioningIterator"));
-    names.add(
-        new PropNode(zkRoot + "/tables/+r/conf/table.iterator.majc.vers.opt.maxVersions", "1"));
-    names.add(new PropNode(zkRoot + "/tables/+r/conf/table.cache.block.enable", "true"));
-    names.add(new PropNode(zkRoot + "/tables/+r/conf/table.iterator.scan.vers",
-        "10,org.apache.accumulo.core.iterators.user.VersioningIterator"));
-    names.add(new PropNode(zkRoot + "/tables/+rep/compact-cancel-id", null));
-    names.add(new PropNode(zkRoot + "/tables/+rep/compact-id", null));
-    names.add(new PropNode(zkRoot + "/tables/+rep/conf", null));
-    names.add(new PropNode(zkRoot + "/tables/+rep/flush-id", null));
-    names.add(new PropNode(zkRoot + "/tables/+rep/name", null));
-    names.add(new PropNode(zkRoot + "/tables/+rep/namespace", null));
-    names.add(new PropNode(zkRoot + "/tables/+rep/state", null));
-    names.add(new PropNode(zkRoot + "/tables/+rep/conf/table.iterator.minc.statuscombiner",
-        "30,org.apache.accumulo.server.replication.StatusCombiner"));
-    names.add(new PropNode(zkRoot + "/tables/+rep/conf/table.groups.enabled", "repl,work"));
-    names.add(new PropNode(
-        zkRoot + "/tables/+rep/conf/table.iterator.majc.statuscombiner.opt.columns", "repl,work"));
-    names.add(new PropNode(
-        zkRoot + "/tables/+rep/conf/table.iterator.minc.statuscombiner.opt.columns", "repl,work"));
-    names.add(new PropNode(zkRoot + "/tables/+rep/conf/table.group.work", "work"));
-    names.add(new PropNode(
-        zkRoot + "/tables/+rep/conf/table.iterator.scan.statuscombiner.opt.columns", "repl,work"));
-    names.add(new PropNode(zkRoot + "/tables/+rep/conf/table.iterator.majc.statuscombiner",
-        "30,org.apache.accumulo.server.replication.StatusCombiner"));
-    names.add(new PropNode(zkRoot + "/tables/+rep/conf/table.group.repl", "repl"));
-    names.add(new PropNode(zkRoot + "/tables/+rep/conf/table.iterator.scan.statuscombiner",
-        "30,org.apache.accumulo.server.replication.StatusCombiner"));
-    names.add(new PropNode(zkRoot + "/tables/+rep/conf/table.formatter",
-        "org.apache.accumulo.server.replication.StatusFormatter"));
-    names.add(new PropNode(zkRoot + "/tables/1/compact-cancel-id", null));
-    names.add(new PropNode(zkRoot + "/tables/1/compact-id", null));
-    names.add(new PropNode(zkRoot + "/tables/1/conf", null));
-    names.add(new PropNode(zkRoot + "/tables/1/flush-id", null));
-    names.add(new PropNode(zkRoot + "/tables/1/name", "trace"));
-    names.add(new PropNode(zkRoot + "/tables/1/namespace", "+default"));
-    names.add(new PropNode(zkRoot + "/tables/1/state", null));
-    names.add(new PropNode(zkRoot + "/tables/1/conf/table.constraint.1",
-        "org.apache.accumulo.core.constraints.DefaultKeySizeConstraint"));
-    names.add(new PropNode(zkRoot + "/tables/1/conf/table.formatter",
-        "org.apache.accumulo.tracer.TraceFormatter"));
-    names.add(new PropNode(zkRoot + "/tables/1/conf/table.iterator.majc.ageoff",
-        "10,org.apache.accumulo.core.iterators.user.AgeOffFilter"));
-    names.add(
-        new PropNode(zkRoot + "/tables/1/conf/table.iterator.majc.ageoff.opt.ttl", "604800000"));
-    names.add(new PropNode(zkRoot + "/tables/1/conf/table.iterator.majc.vers",
-        "20,org.apache.accumulo.core.iterators.user.VersioningIterator"));
-    names
-        .add(new PropNode(zkRoot + "/tables/1/conf/table.iterator.majc.vers.opt.maxVersions", "1"));
-    names.add(new PropNode(zkRoot + "/tables/1/conf/table.iterator.minc.ageoff",
-        "10,org.apache.accumulo.core.iterators.user.AgeOffFilter"));
-    names.add(
-        new PropNode(zkRoot + "/tables/1/conf/table.iterator.minc.ageoff.opt.ttl", "604800000"));
-    names.add(new PropNode(zkRoot + "/tables/1/conf/table.iterator.minc.vers",
-        "20,org.apache.accumulo.core.iterators.user.VersioningIterator"));
-    names
-        .add(new PropNode(zkRoot + "/tables/1/conf/table.iterator.minc.vers.opt.maxVersions", "1"));
-    names.add(new PropNode(zkRoot + "/tables/1/conf/table.iterator.scan.ageoff",
-        "10,org.apache.accumulo.core.iterators.user.AgeOffFilter"));
-    names.add(
-        new PropNode(zkRoot + "/tables/1/conf/table.iterator.scan.ageoff.opt.ttl", "604800000"));
-    names.add(new PropNode(zkRoot + "/tables/1/conf/table.iterator.scan.vers",
-        "20,org.apache.accumulo.core.iterators.user.VersioningIterator"));
-    names
-        .add(new PropNode(zkRoot + "/tables/1/conf/table.iterator.scan.vers.opt.maxVersions", "1"));
-    names.add(new PropNode(zkRoot + "/tables/4/compact-cancel-id", null));
-    names.add(new PropNode(zkRoot + "/tables/4/compact-id", null));
-    names.add(new PropNode(zkRoot + "/tables/4/conf", null));
-    names.add(new PropNode(zkRoot + "/tables/4/flush-id", null));
-    names.add(new PropNode(zkRoot + "/tables/4/name", "tbl1"));
-    names.add(new PropNode(zkRoot + "/tables/4/namespace", "2"));
-    names.add(new PropNode(zkRoot + "/tables/4/state", null));
-    names.add(new PropNode(zkRoot + "/tables/4/conf/table.iterator.majc.vers",
-        "20,org.apache.accumulo.core.iterators.user.VersioningIterator"));
-    names.add(new PropNode(zkRoot + "/tables/4/conf/table.constraint.1",
-        "org.apache.accumulo.core.constraints.DefaultKeySizeConstraint"));
-    names
-        .add(new PropNode(zkRoot + "/tables/4/conf/table.iterator.scan.vers.opt.maxVersions", "1"));
-    names.add(new PropNode(zkRoot + "/tables/4/conf/table.iterator.minc.vers",
-        "/20,org.apache.accumulo.core.iterators.user.VersioningIterator"));
-    names
-        .add(new PropNode(zkRoot + "/tables/4/conf/table.iterator.majc.vers.opt.maxVersions", "1"));
-    names
-        .add(new PropNode(zkRoot + "/tables/4/conf/table.iterator.minc.vers.opt.maxVersions", "1"));
-    names.add(new PropNode(zkRoot + "/tables/4/conf/table.iterator.scan.vers",
-        "20,org.apache.accumulo.core.iterators.user.VersioningIterator"));
-    names.add(new PropNode(zkRoot + "/tables/4/conf/table.bloom.enabled", "true"));
-    names.add(new PropNode(zkRoot + "/tables/5/compact-cancel-id", null));
-    names.add(new PropNode(zkRoot + "/tables/5/compact-id", null));
-    names.add(new PropNode(zkRoot + "/tables/5/conf", null));
-    names.add(new PropNode(zkRoot + "/tables/5/flush-id", null));
-    names.add(new PropNode(zkRoot + "/tables/5/name", "tbl2"));
-    names.add(new PropNode(zkRoot + "/tables/5/namespace", "2"));
-    names.add(new PropNode(zkRoot + "/tables/5/state", null));
-    names.add(new PropNode(zkRoot + "/tables/5/conf/table.constraint.1",
-        "org.apache.accumulo.core.constraints.DefaultKeySizeConstraint"));
-    names.add(new PropNode(zkRoot + "/tables/5/conf/table.iterator.majc.vers",
-        "20,org.apache.accumulo.core.iterators.user.VersioningIterator"));
-    names
-        .add(new PropNode(zkRoot + "/tables/5/conf/table.iterator.majc.vers.opt.maxVersions", "1"));
-    names.add(new PropNode(zkRoot + "/tables/5/conf/table.iterator.minc.vers",
-        "20,org.apache.accumulo.core.iterators.user.VersioningIterator"));
-    names
-        .add(new PropNode(zkRoot + "/tables/5/conf/table.iterator.minc.vers.opt.maxVersions", "1"));
-    names.add(new PropNode(zkRoot + "/tables/5/conf/table.iterator.scan.vers",
-        "20,org.apache.accumulo.core.iterators.user.VersioningIterator"));
-    names
-        .add(new PropNode(zkRoot + "/tables/5/conf/table.iterator.scan.vers.opt.maxVersions", "1"));
-    names.add(new PropNode(zkRoot + "/tables/6/compact-cancel-id", null));
-    names.add(new PropNode(zkRoot + "/tables/6/compact-id", null));
-    names.add(new PropNode(zkRoot + "/tables/6/conf", null));
-    names.add(new PropNode(zkRoot + "/tables/6/flush-id", null));
-    names.add(new PropNode(zkRoot + "/tables/6/name", "tbl3"));
-    names.add(new PropNode(zkRoot + "/tables/6/namespace", "+default"));
-    names.add(new PropNode(zkRoot + "/tables/6/state", null));
-    names.add(new PropNode(zkRoot + "/tables/6/conf/table.bloom.enabled", "true"));
-    names.add(new PropNode(zkRoot + "/tables/6/conf/table.constraint.1",
-        "org.apache.accumulo.core.constraints.DefaultKeySizeConstraint"));
-    names.add(new PropNode(zkRoot + "/tables/6/conf/table.iterator.majc.vers",
-        "20,org.apache.accumulo.core.iterators.user.VersioningIterator"));
-    names
-        .add(new PropNode(zkRoot + "/tables/6/conf/table.iterator.majc.vers.opt.maxVersions", "1"));
-    names.add(new PropNode(zkRoot + "/tables/6/conf/table.iterator.minc.vers",
-        "20,org.apache.accumulo.core.iterators.user.VersioningIterator"));
-    names
-        .add(new PropNode(zkRoot + "/tables/6/conf/table.iterator.minc.vers.opt.maxVersions", "1"));
-    names.add(new PropNode(zkRoot + "/tables/6/conf/table.iterator.scan.vers",
-        "20,org.apache.accumulo.core.iterators.user.VersioningIterator"));
-    names
-        .add(new PropNode(zkRoot + "/tables/6/conf/table.iterator.scan.vers.opt.maxVersions", "1"));
-    names.add(new PropNode(zkRoot + "/tables/7/compact-cancel-id", null));
-    names.add(new PropNode(zkRoot + "/tables/7/compact-id", null));
-    names.add(new PropNode(zkRoot + "/tables/7/conf", null));
-    names.add(new PropNode(zkRoot + "/tables/7/flush-id", null));
-    names.add(new PropNode(zkRoot + "/tables/7/name", "tbl4"));
-    names.add(new PropNode(zkRoot + "/tables/7/namespace", "2"));
-    names.add(new PropNode(zkRoot + "/tables/7/state", null));
-    names.add(new PropNode(zkRoot + "/tables/7/conf/table.bloom.enabled", null));
-    names.add(new PropNode(zkRoot + "/tables/7/conf/table.constraint.1", null));
-    names.add(new PropNode(zkRoot + "/tables/7/conf/table.iterator.majc.vers", null));
-    names.add(
-        new PropNode(zkRoot + "/tables/7/conf/table.iterator.majc.vers.opt.maxVersions", null));
-    names.add(new PropNode(zkRoot + "/tables/7/conf/table.iterator.minc.vers", null));
-    names.add(
-        new PropNode(zkRoot + "/tables/7/conf/table.iterator.minc.vers.opt.maxVersions", null));
-    names.add(new PropNode(zkRoot + "/tables/7/conf/table.iterator.scan.vers", null));
-    names.add(
-        new PropNode(zkRoot + "/tables/7/conf/table.iterator.scan.vers.opt.maxVersions", null));
-    names.add(new PropNode(zkRoot + "/tservers/localhost:11000", null));
-    names.add(new PropNode(zkRoot + "/tservers/localhost:11000/zlock-0000000000", null));
-    names.add(new PropNode(zkRoot + Constants.ZUSERS + "/root", null));
-    names.add(new PropNode(zkRoot + Constants.ZUSERS + "/root/Authorizations", null));
-    names.add(new PropNode(zkRoot + Constants.ZUSERS + "/root/Namespaces", null));
-    names.add(new PropNode(zkRoot + Constants.ZUSERS + "/root/System", null));
-    names.add(new PropNode(zkRoot + Constants.ZUSERS + "/root/Tables", null));
-    names.add(new PropNode(zkRoot + Constants.ZUSERS + "/root/Namespaces/+accumulo", null));
-    names.add(new PropNode(zkRoot + Constants.ZUSERS + "/root/Namespaces/2", null));
-    names.add(new PropNode(zkRoot + Constants.ZUSERS + "/root/Namespaces/3", null));
-    names.add(new PropNode(zkRoot + Constants.ZUSERS + "/root/Tables/!0", null));
-    names.add(new PropNode(zkRoot + Constants.ZUSERS + "/root/Tables/+r", null));
-    names.add(new PropNode(zkRoot + Constants.ZUSERS + "/root/Tables/1", null));
-    names.add(new PropNode(zkRoot + Constants.ZUSERS + "/root/Tables/4", null));
-    names.add(new PropNode(zkRoot + Constants.ZUSERS + "/root/Tables/5", null));
-    names.add(new PropNode(zkRoot + Constants.ZUSERS + "/root/Tables/6", null));
-    names.add(new PropNode(zkRoot + Constants.ZUSERS + "/root/Tables/7", null));
-    names.add(new PropNode(zkRoot + "/wals/localhost:11000[10000c3202e0003]", null));
-    names.add(new PropNode(
-        zkRoot + "/wals/localhost:11000[10000c3202e0003]/0fef8f3b-d02d-413b-9a27-f1710812b216",
-        null));
-    names.add(new PropNode(
-        zkRoot + "/wals/localhost:11000[10000c3202e0003]/9e410484-e61b-4707-847a-67f96715aa04",
-        null));
-
-    return names;
-  }
-
-  public static class PropNode {
-    private static final byte[] empty = new byte[0];
-    private final String path;
-    private final String value;
-
-    public PropNode(final String path, final String value) {
-      this.path = path;
-      this.value = value;
-    }
-
-    public String getPath() {
-      return path;
-    }
-
-    public byte[] getData() {
-      if (value == null) {
-        return empty;
-      }
-      return value.getBytes(UTF_8);
-    }
-  }
-}
diff --git a/test/src/main/java/org/apache/accumulo/test/conf/util/TransformTokenIT.java b/test/src/main/java/org/apache/accumulo/test/conf/util/TransformTokenIT.java
deleted file mode 100644
index 647a1e45b9..0000000000
--- a/test/src/main/java/org/apache/accumulo/test/conf/util/TransformTokenIT.java
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.accumulo.test.conf.util;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.apache.accumulo.harness.AccumuloITBase.ZOOKEEPER_TESTING_SERVER;
-import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.verify;
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-import java.io.File;
-import java.util.List;
-import java.util.UUID;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.data.InstanceId;
-import org.apache.accumulo.core.fate.zookeeper.ZooReaderWriter;
-import org.apache.accumulo.core.fate.zookeeper.ZooUtil;
-import org.apache.accumulo.server.ServerContext;
-import org.apache.accumulo.server.conf.store.SystemPropKey;
-import org.apache.accumulo.server.conf.store.impl.PropStoreWatcher;
-import org.apache.accumulo.server.conf.store.impl.ZooPropStore;
-import org.apache.accumulo.server.conf.util.TransformToken;
-import org.apache.accumulo.test.zookeeper.ZooKeeperTestingServer;
-import org.apache.zookeeper.ZKUtil;
-import org.apache.zookeeper.ZooKeeper;
-import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeAll;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Tag;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.io.TempDir;
-
-@Tag(ZOOKEEPER_TESTING_SERVER)
-public class TransformTokenIT {
-
-  @TempDir
-  private static File tempDir;
-
-  private static ZooKeeperTestingServer testZk = null;
-  private static ZooKeeper zooKeeper;
-  private static ZooReaderWriter zrw;
-  private InstanceId instanceId = null;
-
-  private ServerContext context = null;
-  private PropStoreWatcher watcher = null;
-
-  @BeforeAll
-  public static void setupZk() {
-
-    // using default zookeeper port - we don't have a full configuration
-    testZk = new ZooKeeperTestingServer(tempDir);
-    zooKeeper = testZk.getZooKeeper();
-    zrw = testZk.getZooReaderWriter();
-  }
-
-  @AfterAll
-  public static void shutdownZK() throws Exception {
-    testZk.close();
-  }
-
-  @BeforeEach
-  public void testSetup() throws Exception {
-    instanceId = InstanceId.of(UUID.randomUUID());
-
-    List<LegacyPropData.PropNode> nodes = LegacyPropData.getData(instanceId);
-    for (LegacyPropData.PropNode node : nodes) {
-      zrw.putPersistentData(node.getPath(), node.getData(), ZooUtil.NodeExistsPolicy.SKIP);
-    }
-
-    ZooPropStore propStore = ZooPropStore.initialize(instanceId, zrw);
-
-    context = createMock(ServerContext.class);
-    expect(context.getInstanceID()).andReturn(instanceId).anyTimes();
-    expect(context.getZooReaderWriter()).andReturn(zrw).anyTimes();
-    expect(context.getPropStore()).andReturn(propStore).anyTimes();
-
-    watcher = createMock(PropStoreWatcher.class);
-  }
-
-  @AfterEach
-  public void cleanupZnodes() throws Exception {
-    ZooUtil.digestAuth(zooKeeper, ZooKeeperTestingServer.SECRET);
-    ZKUtil.deleteRecursive(zooKeeper, Constants.ZROOT);
-    verify(context, watcher);
-  }
-
-  @Test
-  public void tokenGoPathTest() {
-    replay(context, watcher);
-
-    var sysPropKey = SystemPropKey.of(instanceId);
-
-    TransformToken token = TransformToken.createToken(sysPropKey.getPath(), zrw);
-
-    assertTrue(token.haveTokenOwnership());
-    token.releaseToken();
-    assertFalse(token.haveTokenOwnership());
-
-    // relock by getting a new lock
-    TransformToken lock2 = TransformToken.createToken(sysPropKey.getPath(), zrw);
-    assertTrue(lock2.haveTokenOwnership());
-
-    // fail with a current lock node present
-    TransformToken lock3 = TransformToken.createToken(sysPropKey.getPath(), zrw);
-    assertFalse(lock3.haveTokenOwnership());
-    // and confirm lock still present
-    assertTrue(lock2.haveTokenOwnership());
-  }
-
-  @Test
-  public void failOnInvalidLockTest() throws Exception {
-
-    replay(context, watcher);
-
-    var sysPropKey = SystemPropKey.of(instanceId);
-    var tokenPath = sysPropKey.getPath() + TransformToken.TRANSFORM_TOKEN;
-
-    TransformToken lock = TransformToken.createToken(sysPropKey.getPath(), zrw);
-
-    // force change in lock
-    assertTrue(lock.haveTokenOwnership());
-    zrw.mutateExisting(tokenPath, v -> UUID.randomUUID().toString().getBytes(UTF_8));
-    assertThrows(IllegalStateException.class, lock::releaseToken,
-        "Expected unlock to fail on different UUID");
-
-    // clean-up and get new lock
-    zrw.delete(tokenPath);
-    TransformToken lock3 = TransformToken.createToken(sysPropKey.getPath(), zrw);
-    assertTrue(lock3.haveTokenOwnership());
-    zrw.delete(tokenPath);
-    assertThrows(IllegalStateException.class, lock::releaseToken,
-        "Expected unlock to fail when no lock present");
-
-  }
-}
diff --git a/test/src/main/java/org/apache/accumulo/test/start/KeywordStartIT.java b/test/src/main/java/org/apache/accumulo/test/start/KeywordStartIT.java
index c17bbd386f..95713d8c3a 100644
--- a/test/src/main/java/org/apache/accumulo/test/start/KeywordStartIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/start/KeywordStartIT.java
@@ -53,7 +53,6 @@ import org.apache.accumulo.monitor.Monitor;
 import org.apache.accumulo.monitor.MonitorExecutable;
 import org.apache.accumulo.server.conf.CheckCompactionConfig;
 import org.apache.accumulo.server.conf.CheckServerConfig;
-import org.apache.accumulo.server.conf.util.ConfigPropertyUpgrader;
 import org.apache.accumulo.server.conf.util.ZooInfoViewer;
 import org.apache.accumulo.server.init.Initialize;
 import org.apache.accumulo.server.util.Admin;
@@ -123,7 +122,6 @@ public class KeywordStartIT {
     expectSet.put("check-server-config", CheckServerConfig.class);
     expectSet.put("compaction-coordinator", CoordinatorExecutable.class);
     expectSet.put("compactor", CompactorExecutable.class);
-    expectSet.put("config-upgrade", ConfigPropertyUpgrader.class);
     expectSet.put("convert-config", ConvertConfig.class);
     expectSet.put("create-token", CreateToken.class);
     expectSet.put("dump-zoo", DumpZookeeper.class);
diff --git a/test/src/main/java/org/apache/accumulo/test/upgrade/ConfigPropertyUpgraderIT.java b/test/src/main/java/org/apache/accumulo/test/upgrade/ConfigPropertyUpgraderIT.java
deleted file mode 100644
index fd14764cd6..0000000000
--- a/test/src/main/java/org/apache/accumulo/test/upgrade/ConfigPropertyUpgraderIT.java
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.accumulo.test.upgrade;
-
-import static org.apache.accumulo.harness.AccumuloITBase.ZOOKEEPER_TESTING_SERVER;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.expect;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.fail;
-
-import java.io.File;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.conf.DeprecatedPropertyUtil;
-import org.apache.accumulo.core.data.InstanceId;
-import org.apache.accumulo.core.fate.zookeeper.ZooReaderWriter;
-import org.apache.accumulo.core.fate.zookeeper.ZooUtil;
-import org.apache.accumulo.server.ServerContext;
-import org.apache.accumulo.server.conf.store.PropStore;
-import org.apache.accumulo.server.conf.store.SystemPropKey;
-import org.apache.accumulo.server.conf.store.impl.ZooPropStore;
-import org.apache.accumulo.server.conf.util.ConfigPropertyUpgrader;
-import org.apache.accumulo.test.conf.store.PropStoreZooKeeperIT;
-import org.apache.accumulo.test.conf.util.LegacyPropData;
-import org.apache.accumulo.test.zookeeper.ZooKeeperTestingServer;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.ZKUtil;
-import org.apache.zookeeper.ZooKeeper;
-import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeAll;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Tag;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.io.TempDir;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@Tag(ZOOKEEPER_TESTING_SERVER)
-public class ConfigPropertyUpgraderIT {
-
-  private static final Logger log = LoggerFactory.getLogger(PropStoreZooKeeperIT.class);
-  private static ZooKeeperTestingServer testZk = null;
-  private static ZooKeeper zooKeeper;
-  private static ZooReaderWriter zrw;
-
-  private static final String TEST_DEPRECATED_PREFIX = "upgrader.test.deprecated.";
-  private static final String TEST_UPGRADED_PREFIX = "upgrader.test.upgraded.";
-
-  // Create legacy renamer for this test
-  private static final DeprecatedPropertyUtil.PropertyRenamer TEST_PROP_RENAMER =
-      DeprecatedPropertyUtil.PropertyRenamer.renamePrefix(TEST_DEPRECATED_PREFIX,
-          TEST_UPGRADED_PREFIX);
-
-  private InstanceId instanceId = null;
-
-  @TempDir
-  private static File tempDir;
-
-  @BeforeAll
-  public static void setupZk() {
-    DeprecatedPropertyUtil.getPropertyRenamers().add(TEST_PROP_RENAMER);
-
-    // using default zookeeper port - we don't have a full configuration
-    testZk = new ZooKeeperTestingServer(tempDir);
-    zooKeeper = testZk.getZooKeeper();
-    ZooUtil.digestAuth(zooKeeper, ZooKeeperTestingServer.SECRET);
-
-    zrw = testZk.getZooReaderWriter();
-
-  }
-
-  @AfterAll
-  public static void shutdownZK() throws Exception {
-    DeprecatedPropertyUtil.getPropertyRenamers().remove(TEST_PROP_RENAMER);
-
-    testZk.close();
-  }
-
-  @BeforeEach
-  public void setupZnodes() throws Exception {
-
-    instanceId = InstanceId.of(UUID.randomUUID());
-
-    testZk.initPaths(ZooUtil.getRoot(instanceId));
-
-    ServerContext context = createNiceMock(ServerContext.class);
-    expect(context.getZooReaderWriter()).andReturn(zrw).anyTimes();
-    expect(context.getZooKeepersSessionTimeOut()).andReturn(zooKeeper.getSessionTimeout())
-        .anyTimes();
-    expect(context.getInstanceID()).andReturn(instanceId).anyTimes();
-
-    // Add dummy legacy properties for testing
-    String zkRoot = ZooUtil.getRoot(instanceId);
-    List<LegacyPropData.PropNode> nodes = LegacyPropData.getData(instanceId);
-    nodes.add(new LegacyPropData.PropNode(
-        zkRoot + Constants.ZCONFIG + "/" + TEST_DEPRECATED_PREFIX + "prop1", "4"));
-    nodes.add(new LegacyPropData.PropNode(
-        zkRoot + Constants.ZCONFIG + "/" + TEST_DEPRECATED_PREFIX + "prop2", "10m"));
-    nodes.add(new LegacyPropData.PropNode(
-        zkRoot + Constants.ZCONFIG + "/" + TEST_DEPRECATED_PREFIX + "prop3", "10"));
-    nodes.add(new LegacyPropData.PropNode(
-        zkRoot + Constants.ZCONFIG + "/" + TEST_DEPRECATED_PREFIX + "prop4", "4"));
-
-    for (LegacyPropData.PropNode node : nodes) {
-      zrw.putPersistentData(node.getPath(), node.getData(), ZooUtil.NodeExistsPolicy.SKIP);
-    }
-
-    try {
-      zrw.putPersistentData(ZooUtil.getRoot(instanceId) + Constants.ZCONFIG, new byte[0],
-          ZooUtil.NodeExistsPolicy.SKIP);
-    } catch (KeeperException ex) {
-      log.trace("Issue during zk initialization, skipping", ex);
-    } catch (InterruptedException ex) {
-      Thread.currentThread().interrupt();
-      throw new IllegalStateException("Interrupted during zookeeper path initialization", ex);
-    }
-  }
-
-  @AfterEach
-  public void cleanupZnodes() {
-    try {
-      ZKUtil.deleteRecursive(zooKeeper, Constants.ZROOT);
-    } catch (KeeperException | InterruptedException ex) {
-      throw new IllegalStateException("Failed to clean-up test zooKeeper nodes.", ex);
-    }
-  }
-
-  @Test
-  void doUpgrade() {
-    ConfigPropertyUpgrader upgrader = new ConfigPropertyUpgrader();
-    upgrader.doUpgrade(instanceId, zrw);
-
-    PropStore propStore = ZooPropStore.initialize(instanceId, zrw);
-
-    var sysKey = SystemPropKey.of(instanceId);
-    log.info("PropStore: {}", propStore.get(sysKey));
-
-    var vProps = propStore.get(sysKey);
-    if (vProps == null) {
-      fail("unexpected null returned from prop store get for " + sysKey);
-      return; // keep spotbugs happy
-    }
-
-    Map<String,String> props = vProps.asMap();
-    assertEquals(5, props.size());
-    // also validates that rename occurred from deprecated to upgraded names
-    assertEquals("4", props.get(TEST_UPGRADED_PREFIX + "prop1"));
-    assertEquals("10m", props.get(TEST_UPGRADED_PREFIX + "prop2"));
-    assertEquals("10", props.get(TEST_UPGRADED_PREFIX + "prop3"));
-    assertEquals("4", props.get(TEST_UPGRADED_PREFIX + "prop4"));
-
-    assertEquals("true", props.get("table.bloom.enabled"));
-
-  }
-}
diff --git a/test/src/main/java/org/apache/accumulo/test/upgrade/GCUpgrade9to10TestIT.java b/test/src/main/java/org/apache/accumulo/test/upgrade/GCUpgrade9to10TestIT.java
deleted file mode 100644
index 6944204d1b..0000000000
--- a/test/src/main/java/org/apache/accumulo/test/upgrade/GCUpgrade9to10TestIT.java
+++ /dev/null
@@ -1,252 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.accumulo.test.upgrade;
-
-import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertNull;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-import java.time.Duration;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.TreeMap;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Accumulo;
-import org.apache.accumulo.core.client.AccumuloClient;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.TableId;
-import org.apache.accumulo.core.fate.zookeeper.ServiceLock;
-import org.apache.accumulo.core.fate.zookeeper.ZooReaderWriter;
-import org.apache.accumulo.core.metadata.schema.Ample;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.DeletesSection;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.manager.upgrade.Upgrader9to10;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.miniclusterImpl.ProcessNotFoundException;
-import org.apache.accumulo.server.gc.AllVolumesDirectory;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.apache.hadoop.io.Text;
-import org.apache.zookeeper.KeeperException;
-import org.junit.jupiter.api.Test;
-
-public class GCUpgrade9to10TestIT extends ConfigurableMacBase {
-  private static final String OUR_SECRET = "itsreallysecret";
-  private static final String OLDDELPREFIX = "~del";
-  private static final Upgrader9to10 upgrader = new Upgrader9to10();
-
-  @Override
-  protected Duration defaultTimeout() {
-    return Duration.ofMinutes(5);
-  }
-
-  @Override
-  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
-    cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "15s");
-    cfg.setProperty(Property.INSTANCE_SECRET, OUR_SECRET);
-    cfg.setProperty(Property.GC_CYCLE_START, "1000"); // gc will be killed before it is run
-
-    // use raw local file system so walogs sync and flush will work
-    hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
-  }
-
-  private void killMacGc() throws ProcessNotFoundException, InterruptedException, KeeperException {
-    // kill gc started by MAC
-    getCluster().killProcess(ServerType.GARBAGE_COLLECTOR,
-        getCluster().getProcesses().get(ServerType.GARBAGE_COLLECTOR).iterator().next());
-    // delete lock in zookeeper if there, this will allow next GC to start quickly
-    var path = ServiceLock.path(getServerContext().getZooKeeperRoot() + Constants.ZGC_LOCK);
-    ZooReaderWriter zk = getServerContext().getZooReaderWriter();
-    try {
-      ServiceLock.deleteLock(zk, path);
-    } catch (IllegalStateException e) {
-      log.error("Unable to delete ZooLock for mini accumulo-gc", e);
-    }
-
-    assertNull(getCluster().getProcesses().get(ServerType.GARBAGE_COLLECTOR));
-  }
-
-  @Test
-  public void gcUpgradeRootTableDeletesIT() throws Exception {
-    gcUpgradeDeletesTest(Ample.DataLevel.METADATA, 3);
-  }
-
-  @Test
-  public void gcUpgradeMetadataTableDeletesIT() throws Exception {
-    gcUpgradeDeletesTest(Ample.DataLevel.USER, 3);
-  }
-
-  @Test
-  public void gcUpgradeNoDeletesIT() throws Exception {
-    gcUpgradeDeletesTest(Ample.DataLevel.METADATA, 0);
-
-  }
-
-  /**
-   * Ensure that the size of the candidates exceeds the {@link Upgrader9to10}'s CANDIDATE_BATCH_SIZE
-   * and will clean up candidates in multiple batches, without running out of memory.
-   */
-  @Test
-  public void gcUpgradeOutofMemoryTest() throws Exception {
-    killMacGc(); // we do not want anything deleted
-
-    int numberOfEntries = 100_000;
-    String longpathname = StringUtils.repeat("abcde", 100);
-    assertEquals(500, longpathname.length());
-
-    // sanity check to ensure that any batch size assumptions are still valid in this test
-    assertEquals(4_000_000, Upgrader9to10.CANDIDATE_BATCH_SIZE);
-
-    // ensure test quality by making sure we have enough candidates to
-    // exceed the batch size at least ten times
-    long numBatches = numberOfEntries * longpathname.length() / Upgrader9to10.CANDIDATE_BATCH_SIZE;
-    assertTrue(numBatches > 10 && numBatches < 15,
-        "Expected numBatches between 10 and 15, but was " + numBatches);
-
-    Ample.DataLevel level = Ample.DataLevel.USER;
-
-    log.info("Filling metadata table with lots of bogus delete flags");
-    try (AccumuloClient c = Accumulo.newClient().from(getClientProperties()).build()) {
-      Map<String,String> expected = addEntries(c, level.metaTable(), numberOfEntries, longpathname);
-      assertEquals(numberOfEntries + numberOfEntries / 10, expected.size());
-
-      Range range = DeletesSection.getRange();
-
-      sleepUninterruptibly(1, TimeUnit.SECONDS);
-      try (Scanner scanner = c.createScanner(level.metaTable(), Authorizations.EMPTY)) {
-        Map<String,String> actualOldStyle = new HashMap<>();
-        scanner.setRange(range);
-        scanner.forEach(entry -> {
-          String strKey = entry.getKey().getRow().toString();
-          String strValue = entry.getValue().toString();
-          actualOldStyle.put(strKey, strValue);
-        });
-        assertEquals(expected.size(), actualOldStyle.size());
-        assertTrue(Collections.disjoint(expected.keySet(), actualOldStyle.keySet()));
-      }
-
-      upgrader.upgradeFileDeletes(getServerContext(), level);
-
-      sleepUninterruptibly(1, TimeUnit.SECONDS);
-      try (Scanner scanner = c.createScanner(level.metaTable(), Authorizations.EMPTY)) {
-        Map<String,String> actualNewStyle = new HashMap<>();
-        scanner.setRange(range);
-        scanner.forEach(entry -> {
-          String strKey = entry.getKey().getRow().toString();
-          String expectedValue = expected.get(strKey);
-          assertNotNull(expectedValue);
-          String strValue = entry.getValue().toString();
-          assertEquals(expectedValue, strValue);
-          actualNewStyle.put(strKey, strValue);
-        });
-        assertEquals(expected.size(), actualNewStyle.size());
-        assertEquals(expected, actualNewStyle);
-      }
-    }
-  }
-
-  private void gcUpgradeDeletesTest(Ample.DataLevel level, int count) throws Exception {
-    killMacGc();// we do not want anything deleted
-
-    log.info("Testing delete upgrades for {}", level.metaTable());
-    try (AccumuloClient c = Accumulo.newClient().from(getClientProperties()).build()) {
-
-      Map<String,String> expected = addEntries(c, level.metaTable(), count, "somefile");
-
-      sleepUninterruptibly(1, TimeUnit.SECONDS);
-      upgrader.upgradeFileDeletes(getServerContext(), level);
-      sleepUninterruptibly(1, TimeUnit.SECONDS);
-      Range range = DeletesSection.getRange();
-
-      try (Scanner scanner = c.createScanner(level.metaTable(), Authorizations.EMPTY)) {
-        Map<String,String> actual = new HashMap<>();
-        scanner.setRange(range);
-        scanner.forEach(entry -> {
-          actual.put(entry.getKey().getRow().toString(), entry.getValue().toString());
-        });
-        assertEquals(expected, actual);
-      }
-
-      // ENSURE IDEMPOTENCE - run upgrade again to ensure nothing is changed because there is
-      // nothing to change
-      upgrader.upgradeFileDeletes(getServerContext(), level);
-      try (Scanner scanner = c.createScanner(level.metaTable(), Authorizations.EMPTY)) {
-        Map<String,String> actual = new HashMap<>();
-        scanner.setRange(range);
-        scanner.forEach(entry -> {
-          actual.put(entry.getKey().getRow().toString(), entry.getValue().toString());
-        });
-        assertEquals(expected, actual);
-      }
-    }
-  }
-
-  private Mutation createOldDelMutation(String path, String cf, String cq, String val) {
-    Text row = new Text(OLDDELPREFIX + path);
-    Mutation delFlag = new Mutation(row);
-    delFlag.put(cf, cq, val);
-    return delFlag;
-  }
-
-  private Map<String,String> addEntries(AccumuloClient client, String table, int count,
-      String filename) throws Exception {
-    client.securityOperations().grantTablePermission(client.whoami(), table, TablePermission.WRITE);
-    Map<String,String> expected = new TreeMap<>();
-    try (BatchWriter bw = client.createBatchWriter(table)) {
-      for (int i = 0; i < count; ++i) {
-        String longpath =
-            String.format("hdfs://localhost:8020/accumulo/tables/5a/t-%08x/%s", i, filename);
-        Mutation delFlag = createOldDelMutation(longpath, "", "", "");
-        bw.addMutation(delFlag);
-        expected.put(DeletesSection.encodeRow(longpath), Upgrader9to10.UPGRADED.toString());
-      }
-
-      // create directory delete entries
-
-      TableId tableId = TableId.of("5a");
-
-      for (int i = 0; i < count; i += 10) {
-        String dirName = String.format("t-%08x", i);
-        String longpath =
-            String.format("hdfs://localhost:8020/accumulo/tables/%s/%s", tableId, dirName);
-        Mutation delFlag = createOldDelMutation(longpath, "", "", "");
-        bw.addMutation(delFlag);
-        expected.put(
-            DeletesSection.encodeRow(new AllVolumesDirectory(tableId, dirName).getMetadataEntry()),
-            Upgrader9to10.UPGRADED.toString());
-      }
-
-      return expected;
-    }
-  }
-
-}