You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ct...@apache.org on 2022/01/31 10:06:58 UTC

[accumulo] branch main updated: Refactor Initialize into multiple classes (#2438)

This is an automated email from the ASF dual-hosted git repository.

ctubbsii pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/main by this push:
     new 358fabd  Refactor Initialize into multiple classes (#2438)
358fabd is described below

commit 358fabde758ee3f90fbff74cc455af204baeff34
Author: Mike Miller <mm...@apache.org>
AuthorDate: Mon Jan 31 05:06:48 2022 -0500

    Refactor Initialize into multiple classes (#2438)
    
    * Create ZooKeeperInitializer
    * Create FileSystemInitializer
    * Create InitialConfiguration
    * Clean up code in Initialize by dropping redundant try/catch blocks,
    improving error handling, passing around new InitialConfiguration object
    to get config properties and improve readability
    * Create new methods in Initialize: checkSASL(), checkUploadProps(), resetSecurity()
    
    * Limit visibility to private or package-private where possible
    * Pass Opts.clearInstanceName as a boolean parameter instead of passing
      all of Opts, so Opts can be private
    * Rename 'go' to 'initialize' so that the method name that performed the
      action matched the object's name
    
    Co-authored-by: Christopher Tubbs <ct...@apache.org>
---
 .../server/init/FileSystemInitializer.java         | 220 ++++++
 .../accumulo/server/init/InitialConfiguration.java | 229 ++++++
 .../apache/accumulo/server/init/Initialize.java    | 864 +++++----------------
 .../accumulo/server/init/ZooKeeperInitializer.java | 131 ++++
 .../accumulo/server/init/InitializeTest.java       |  19 +-
 5 files changed, 788 insertions(+), 675 deletions(-)

diff --git a/server/base/src/main/java/org/apache/accumulo/server/init/FileSystemInitializer.java b/server/base/src/main/java/org/apache/accumulo/server/init/FileSystemInitializer.java
new file mode 100644
index 0000000..4827f64
--- /dev/null
+++ b/server/base/src/main/java/org/apache/accumulo/server/init/FileSystemInitializer.java
@@ -0,0 +1,220 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.server.init;
+
+import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN;
+import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily.TIME_COLUMN;
+import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN;
+import static org.apache.accumulo.server.init.Initialize.REPL_TABLE_ID;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.UUID;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.admin.TimeType;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.DefaultConfiguration;
+import org.apache.accumulo.core.conf.SiteConfiguration;
+import org.apache.accumulo.core.crypto.CryptoServiceFactory;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.TableId;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.file.FileOperations;
+import org.apache.accumulo.core.file.FileSKVWriter;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.DataFileValue;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.metadata.schema.MetadataTime;
+import org.apache.accumulo.core.spi.crypto.CryptoService;
+import org.apache.accumulo.core.spi.fs.VolumeChooserEnvironment;
+import org.apache.accumulo.core.util.ColumnFQ;
+import org.apache.accumulo.fate.zookeeper.ZooReaderWriter;
+import org.apache.accumulo.server.ServerContext;
+import org.apache.accumulo.server.fs.VolumeChooserEnvironmentImpl;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.util.TablePropUtil;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.apache.zookeeper.KeeperException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+class FileSystemInitializer {
+  private static final String TABLE_TABLETS_TABLET_DIR = "table_info";
+  private static final Logger log = LoggerFactory.getLogger(FileSystemInitializer.class);
+
+  // config only for root table
+  private final InitialConfiguration initConfig;
+  private final ZooReaderWriter zoo;
+  private final String zkRoot;
+
+  FileSystemInitializer(InitialConfiguration initConfig, ZooReaderWriter zoo, UUID uuid) {
+    this.initConfig = initConfig;
+    this.zoo = zoo;
+    this.zkRoot = Constants.ZROOT + "/" + uuid;
+  }
+
+  private static class Tablet {
+    TableId tableId;
+    String dirName;
+    Text prevEndRow, endRow;
+    String[] files;
+
+    Tablet(TableId tableId, String dirName, Text prevEndRow, Text endRow, String... files) {
+      this.tableId = tableId;
+      this.dirName = dirName;
+      this.prevEndRow = prevEndRow;
+      this.endRow = endRow;
+      this.files = files;
+    }
+  }
+
+  void initialize(VolumeManager fs, String rootTabletDirUri, String rootTabletFileUri,
+      ServerContext context) throws IOException, InterruptedException, KeeperException {
+    SiteConfiguration siteConfig = initConfig.getSiteConf();
+    // initialize initial system tables config in zookeeper
+    initSystemTablesConfig();
+
+    Text splitPoint = MetadataSchema.TabletsSection.getRange().getEndKey().getRow();
+
+    VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironmentImpl(
+        VolumeChooserEnvironment.Scope.INIT, MetadataTable.ID, splitPoint, context);
+    String tableMetadataTabletDirName = TABLE_TABLETS_TABLET_DIR;
+    String tableMetadataTabletDirUri =
+        fs.choose(chooserEnv, context.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR
+            + MetadataTable.ID + Path.SEPARATOR + tableMetadataTabletDirName;
+    chooserEnv = new VolumeChooserEnvironmentImpl(VolumeChooserEnvironment.Scope.INIT,
+        REPL_TABLE_ID, null, context);
+    String replicationTableDefaultTabletDirName =
+        MetadataSchema.TabletsSection.ServerColumnFamily.DEFAULT_TABLET_DIR_NAME;
+    String replicationTableDefaultTabletDirUri =
+        fs.choose(chooserEnv, context.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR
+            + REPL_TABLE_ID + Path.SEPARATOR + replicationTableDefaultTabletDirName;
+    chooserEnv = new VolumeChooserEnvironmentImpl(VolumeChooserEnvironment.Scope.INIT,
+        MetadataTable.ID, null, context);
+    String defaultMetadataTabletDirName =
+        MetadataSchema.TabletsSection.ServerColumnFamily.DEFAULT_TABLET_DIR_NAME;
+    String defaultMetadataTabletDirUri =
+        fs.choose(chooserEnv, context.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR
+            + MetadataTable.ID + Path.SEPARATOR + defaultMetadataTabletDirName;
+
+    // create table and default tablets directories
+    createDirectories(fs, rootTabletDirUri, tableMetadataTabletDirUri, defaultMetadataTabletDirUri,
+        replicationTableDefaultTabletDirUri);
+
+    String ext = FileOperations.getNewFileExtension(DefaultConfiguration.getInstance());
+
+    // populate the metadata tablet with info about the replication tablet
+    String metadataFileName = tableMetadataTabletDirUri + Path.SEPARATOR + "0_1." + ext;
+    Tablet replicationTablet =
+        new Tablet(REPL_TABLE_ID, replicationTableDefaultTabletDirName, null, null);
+    createMetadataFile(fs, metadataFileName, siteConfig, replicationTablet);
+
+    // populate the root tablet with info about the metadata table's two initial tablets
+    Tablet tablesTablet = new Tablet(MetadataTable.ID, tableMetadataTabletDirName, null, splitPoint,
+        metadataFileName);
+    Tablet defaultTablet =
+        new Tablet(MetadataTable.ID, defaultMetadataTabletDirName, splitPoint, null);
+    createMetadataFile(fs, rootTabletFileUri, siteConfig, tablesTablet, defaultTablet);
+  }
+
+  private void createDirectories(VolumeManager fs, String... dirs) throws IOException {
+    for (String s : dirs) {
+      Path dir = new Path(s);
+      try {
+        FileStatus fstat = fs.getFileStatus(dir);
+        if (!fstat.isDirectory()) {
+          log.error("FATAL: location {} exists but is not a directory", dir);
+          return;
+        }
+      } catch (FileNotFoundException fnfe) {
+        // attempt to create directory, since it doesn't exist
+        if (!fs.mkdirs(dir)) {
+          log.error("FATAL: unable to create directory {}", dir);
+          return;
+        }
+      }
+    }
+  }
+
+  private void initSystemTablesConfig() throws IOException, InterruptedException, KeeperException {
+    setTableProperties(RootTable.ID, initConfig.getRootTableConf());
+    setTableProperties(RootTable.ID, initConfig.getRootMetaConf());
+    setTableProperties(MetadataTable.ID, initConfig.getRootMetaConf());
+    setTableProperties(MetadataTable.ID, initConfig.getMetaTableConf());
+    setTableProperties(REPL_TABLE_ID, initConfig.getReplTableConf());
+  }
+
+  private void setTableProperties(TableId tableId, HashMap<String,String> props)
+      throws IOException, InterruptedException, KeeperException {
+    for (Map.Entry<String,String> entry : props.entrySet()) {
+      if (!TablePropUtil.setTableProperty(zoo, zkRoot, tableId, entry.getKey(), entry.getValue())) {
+        throw new IOException("Cannot create per-table property " + entry.getKey());
+      }
+    }
+  }
+
+  private void createMetadataFile(VolumeManager volmanager, String fileName,
+      AccumuloConfiguration conf, Tablet... tablets) throws IOException {
+    // sort file contents in memory, then play back to the file
+    TreeMap<Key,Value> sorted = new TreeMap<>();
+    for (Tablet tablet : tablets) {
+      createEntriesForTablet(sorted, tablet);
+    }
+    FileSystem fs = volmanager.getFileSystemByPath(new Path(fileName));
+
+    CryptoService cs =
+        CryptoServiceFactory.newInstance(conf, CryptoServiceFactory.ClassloaderType.ACCUMULO);
+
+    FileSKVWriter tabletWriter = FileOperations.getInstance().newWriterBuilder()
+        .forFile(fileName, fs, fs.getConf(), cs).withTableConfiguration(conf).build();
+    tabletWriter.startDefaultLocalityGroup();
+
+    for (Map.Entry<Key,Value> entry : sorted.entrySet()) {
+      tabletWriter.append(entry.getKey(), entry.getValue());
+    }
+
+    tabletWriter.close();
+  }
+
+  private void createEntriesForTablet(TreeMap<Key,Value> map, Tablet tablet) {
+    Value EMPTY_SIZE = new DataFileValue(0, 0).encodeAsValue();
+    Text extent = new Text(MetadataSchema.TabletsSection.encodeRow(tablet.tableId, tablet.endRow));
+    addEntry(map, extent, DIRECTORY_COLUMN, new Value(tablet.dirName));
+    addEntry(map, extent, TIME_COLUMN, new Value(new MetadataTime(0, TimeType.LOGICAL).encode()));
+    addEntry(map, extent, PREV_ROW_COLUMN,
+        MetadataSchema.TabletsSection.TabletColumnFamily.encodePrevEndRow(tablet.prevEndRow));
+    for (String file : tablet.files) {
+      addEntry(map, extent,
+          new ColumnFQ(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME, new Text(file)),
+          EMPTY_SIZE);
+    }
+  }
+
+  private void addEntry(TreeMap<Key,Value> map, Text row, ColumnFQ col, Value value) {
+    map.put(new Key(row, col.getColumnFamily(), col.getColumnQualifier(), 0), value);
+  }
+}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/init/InitialConfiguration.java b/server/base/src/main/java/org/apache/accumulo/server/init/InitialConfiguration.java
new file mode 100644
index 0000000..a70a0bd
--- /dev/null
+++ b/server/base/src/main/java/org/apache/accumulo/server/init/InitialConfiguration.java
@@ -0,0 +1,229 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.server.init;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.function.Predicate;
+
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.conf.SiteConfiguration;
+import org.apache.accumulo.core.iterators.Combiner;
+import org.apache.accumulo.core.iterators.IteratorUtil;
+import org.apache.accumulo.core.iterators.user.VersioningIterator;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.spi.compaction.SimpleCompactionDispatcher;
+import org.apache.accumulo.core.util.LocalityGroupUtil;
+import org.apache.accumulo.core.volume.VolumeConfiguration;
+import org.apache.accumulo.server.constraints.MetadataConstraints;
+import org.apache.accumulo.server.iterators.MetadataBulkLoadFilter;
+import org.apache.accumulo.server.util.ReplicationTableUtil;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+
+import com.google.common.base.Joiner;
+
+class InitialConfiguration {
+
+  // config only for root table
+  private final HashMap<String,String> initialRootConf = new HashMap<>();
+  // config for root and metadata table
+  private final HashMap<String,String> initialRootMetaConf = new HashMap<>();
+  // config for only metadata table
+  private final HashMap<String,String> initialMetaConf = new HashMap<>();
+  private final HashMap<String,String> initialReplicationTableConf = new HashMap<>();
+  private final Configuration hadoopConf;
+  private final SiteConfiguration siteConf;
+
+  InitialConfiguration(Configuration hadoopConf, SiteConfiguration siteConf) {
+    this.hadoopConf = hadoopConf;
+    this.siteConf = siteConf;
+    initialRootConf.put(Property.TABLE_COMPACTION_DISPATCHER.getKey(),
+        SimpleCompactionDispatcher.class.getName());
+    initialRootConf.put(Property.TABLE_COMPACTION_DISPATCHER_OPTS.getKey() + "service", "root");
+
+    initialRootMetaConf.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "32K");
+    initialRootMetaConf.put(Property.TABLE_FILE_REPLICATION.getKey(), "5");
+    initialRootMetaConf.put(Property.TABLE_DURABILITY.getKey(), "sync");
+    initialRootMetaConf.put(Property.TABLE_MAJC_RATIO.getKey(), "1");
+    initialRootMetaConf.put(Property.TABLE_SPLIT_THRESHOLD.getKey(), "64M");
+    initialRootMetaConf.put(Property.TABLE_CONSTRAINT_PREFIX.getKey() + "1",
+        MetadataConstraints.class.getName());
+    initialRootMetaConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "scan.vers",
+        "10," + VersioningIterator.class.getName());
+    initialRootMetaConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "scan.vers.opt.maxVersions",
+        "1");
+    initialRootMetaConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "minc.vers",
+        "10," + VersioningIterator.class.getName());
+    initialRootMetaConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "minc.vers.opt.maxVersions",
+        "1");
+    initialRootMetaConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "majc.vers",
+        "10," + VersioningIterator.class.getName());
+    initialRootMetaConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "majc.vers.opt.maxVersions",
+        "1");
+    initialRootMetaConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "majc.bulkLoadFilter",
+        "20," + MetadataBulkLoadFilter.class.getName());
+    initialRootMetaConf.put(Property.TABLE_FAILURES_IGNORE.getKey(), "false");
+    initialRootMetaConf.put(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + "tablet",
+        String.format("%s,%s", MetadataSchema.TabletsSection.TabletColumnFamily.NAME,
+            MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME));
+    initialRootMetaConf.put(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + "server",
+        String.format("%s,%s,%s,%s", MetadataSchema.TabletsSection.DataFileColumnFamily.NAME,
+            MetadataSchema.TabletsSection.LogColumnFamily.NAME,
+            MetadataSchema.TabletsSection.ServerColumnFamily.NAME,
+            MetadataSchema.TabletsSection.FutureLocationColumnFamily.NAME));
+    initialRootMetaConf.put(Property.TABLE_LOCALITY_GROUPS.getKey(), "tablet,server");
+    initialRootMetaConf.put(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY.getKey(), "");
+    initialRootMetaConf.put(Property.TABLE_INDEXCACHE_ENABLED.getKey(), "true");
+    initialRootMetaConf.put(Property.TABLE_BLOCKCACHE_ENABLED.getKey(), "true");
+
+    initialMetaConf.put(Property.TABLE_COMPACTION_DISPATCHER.getKey(),
+        SimpleCompactionDispatcher.class.getName());
+    initialMetaConf.put(Property.TABLE_COMPACTION_DISPATCHER_OPTS.getKey() + "service", "meta");
+
+    // ACCUMULO-3077 Set the combiner on accumulo.metadata during init to reduce the likelihood of a
+    // race condition where a tserver compacts away Status updates because it didn't see the
+    // Combiner
+    // configured
+    @SuppressWarnings("deprecation")
+    var statusCombinerClass = org.apache.accumulo.server.replication.StatusCombiner.class;
+    IteratorSetting setting =
+        new IteratorSetting(9, ReplicationTableUtil.COMBINER_NAME, statusCombinerClass);
+    Combiner.setColumns(setting, Collections
+        .singletonList(new IteratorSetting.Column(MetadataSchema.ReplicationSection.COLF)));
+    for (IteratorUtil.IteratorScope scope : IteratorUtil.IteratorScope.values()) {
+      String root = String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX,
+          scope.name().toLowerCase(), setting.getName());
+      for (Map.Entry<String,String> prop : setting.getOptions().entrySet()) {
+        initialMetaConf.put(root + ".opt." + prop.getKey(), prop.getValue());
+      }
+      initialMetaConf.put(root, setting.getPriority() + "," + setting.getIteratorClass());
+    }
+
+    // add combiners to replication table
+    @SuppressWarnings("deprecation")
+    String replicationCombinerName =
+        org.apache.accumulo.core.replication.ReplicationTable.COMBINER_NAME;
+    setting = new IteratorSetting(30, replicationCombinerName, statusCombinerClass);
+    setting.setPriority(30);
+    @SuppressWarnings("deprecation")
+    Text statusSectionName =
+        org.apache.accumulo.core.replication.ReplicationSchema.StatusSection.NAME;
+    @SuppressWarnings("deprecation")
+    Text workSectionName = org.apache.accumulo.core.replication.ReplicationSchema.WorkSection.NAME;
+    Combiner.setColumns(setting, Arrays.asList(new IteratorSetting.Column(statusSectionName),
+        new IteratorSetting.Column(workSectionName)));
+    for (IteratorUtil.IteratorScope scope : EnumSet.allOf(IteratorUtil.IteratorScope.class)) {
+      String root = String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX,
+          scope.name().toLowerCase(), setting.getName());
+      for (Map.Entry<String,String> prop : setting.getOptions().entrySet()) {
+        initialReplicationTableConf.put(root + ".opt." + prop.getKey(), prop.getValue());
+      }
+      initialReplicationTableConf.put(root,
+          setting.getPriority() + "," + setting.getIteratorClass());
+    }
+    // add locality groups to replication table
+    @SuppressWarnings("deprecation")
+    Map<String,Set<Text>> replicationLocalityGroups =
+        org.apache.accumulo.core.replication.ReplicationTable.LOCALITY_GROUPS;
+    for (Map.Entry<String,Set<Text>> g : replicationLocalityGroups.entrySet()) {
+      initialReplicationTableConf.put(Property.TABLE_LOCALITY_GROUP_PREFIX + g.getKey(),
+          LocalityGroupUtil.encodeColumnFamilies(g.getValue()));
+    }
+    initialReplicationTableConf.put(Property.TABLE_LOCALITY_GROUPS.getKey(),
+        Joiner.on(",").join(replicationLocalityGroups.keySet()));
+    // add formatter to replication table
+    @SuppressWarnings("deprecation")
+    String replicationFormatterClassName =
+        org.apache.accumulo.server.replication.ReplicationUtil.STATUS_FORMATTER_CLASS_NAME;
+    initialReplicationTableConf.put(Property.TABLE_FORMATTER_CLASS.getKey(),
+        replicationFormatterClassName);
+
+    int max = hadoopConf.getInt("dfs.replication.max", 512);
+    // Hadoop 0.23 switched the min value configuration name
+    int min = Math.max(hadoopConf.getInt("dfs.replication.min", 1),
+        hadoopConf.getInt("dfs.namenode.replication.min", 1));
+    if (max < 5) {
+      setMetadataReplication(max, "max");
+    }
+    if (min > 5) {
+      setMetadataReplication(min, "min");
+    }
+  }
+
+  private void setMetadataReplication(int replication, String reason) {
+    String rep = System.console()
+        .readLine("Your HDFS replication " + reason + " is not compatible with our default "
+            + MetadataTable.NAME + " replication of 5. What do you want to set your "
+            + MetadataTable.NAME + " replication to? (" + replication + ") ");
+    if (rep == null || rep.isEmpty()) {
+      rep = Integer.toString(replication);
+    } else {
+      // Lets make sure it's a number
+      Integer.parseInt(rep);
+    }
+    initialRootMetaConf.put(Property.TABLE_FILE_REPLICATION.getKey(), rep);
+  }
+
+  HashMap<String,String> getRootTableConf() {
+    return initialRootConf;
+  }
+
+  HashMap<String,String> getRootMetaConf() {
+    return initialRootMetaConf;
+  }
+
+  HashMap<String,String> getMetaTableConf() {
+    return initialMetaConf;
+  }
+
+  HashMap<String,String> getReplTableConf() {
+    return initialReplicationTableConf;
+  }
+
+  Configuration getHadoopConf() {
+    return hadoopConf;
+  }
+
+  SiteConfiguration getSiteConf() {
+    return siteConf;
+  }
+
+  Set<String> getVolumeUris() {
+    return VolumeConfiguration.getVolumeUris(siteConf);
+  }
+
+  String get(Property property) {
+    return siteConf.get(property);
+  }
+
+  boolean getBoolean(Property property) {
+    return siteConf.getBoolean(property);
+  }
+
+  void getProperties(Map<String,String> props, Predicate<String> filter, boolean defaults) {
+    siteConf.getProperties(props, filter, defaults);
+  }
+}
diff --git a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
index 95873ce..42b0f83 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
@@ -19,20 +19,15 @@
 package org.apache.accumulo.server.init;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN;
-import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily.TIME_COLUMN;
-import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN;
+import static org.apache.accumulo.core.Constants.TABLE_DIR;
+import static org.apache.accumulo.core.clientImpl.thrift.SecurityErrorCode.BAD_CREDENTIALS;
+import static org.apache.hadoop.fs.Path.SEPARATOR;
 
-import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.EnumSet;
-import java.util.HashMap;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Locale;
 import java.util.Map;
-import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeMap;
 import java.util.UUID;
@@ -40,85 +35,41 @@ import java.util.UUID;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.cli.Help;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.IteratorSetting.Column;
-import org.apache.accumulo.core.client.admin.TimeType;
-import org.apache.accumulo.core.clientImpl.Namespace;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.DefaultConfiguration;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.conf.SiteConfiguration;
-import org.apache.accumulo.core.crypto.CryptoServiceFactory;
-import org.apache.accumulo.core.crypto.CryptoServiceFactory.ClassloaderType;
-import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.TableId;
-import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.file.FileOperations;
-import org.apache.accumulo.core.file.FileSKVWriter;
-import org.apache.accumulo.core.iterators.Combiner;
-import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
-import org.apache.accumulo.core.iterators.user.VersioningIterator;
-import org.apache.accumulo.core.manager.state.tables.TableState;
-import org.apache.accumulo.core.manager.thrift.ManagerGoalState;
-import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.core.metadata.schema.DataFileValue;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.ReplicationSection;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.CurrentLocationColumnFamily;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.FutureLocationColumnFamily;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.TabletColumnFamily;
-import org.apache.accumulo.core.metadata.schema.MetadataTime;
-import org.apache.accumulo.core.metadata.schema.RootTabletMetadata;
 import org.apache.accumulo.core.singletons.SingletonManager;
 import org.apache.accumulo.core.singletons.SingletonManager.Mode;
-import org.apache.accumulo.core.spi.compaction.SimpleCompactionDispatcher;
-import org.apache.accumulo.core.spi.crypto.CryptoService;
-import org.apache.accumulo.core.spi.fs.VolumeChooserEnvironment;
 import org.apache.accumulo.core.spi.fs.VolumeChooserEnvironment.Scope;
-import org.apache.accumulo.core.util.ColumnFQ;
-import org.apache.accumulo.core.util.LocalityGroupUtil;
 import org.apache.accumulo.core.util.Pair;
 import org.apache.accumulo.core.volume.VolumeConfiguration;
 import org.apache.accumulo.fate.zookeeper.ZooReaderWriter;
-import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
-import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
 import org.apache.accumulo.server.AccumuloDataVersion;
 import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.ServerDirs;
-import org.apache.accumulo.server.constraints.MetadataConstraints;
 import org.apache.accumulo.server.fs.VolumeChooserEnvironmentImpl;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
-import org.apache.accumulo.server.iterators.MetadataBulkLoadFilter;
-import org.apache.accumulo.server.log.WalStateManager;
-import org.apache.accumulo.server.metadata.RootGcCandidates;
 import org.apache.accumulo.server.security.AuditedSecurityOperation;
 import org.apache.accumulo.server.security.SecurityUtil;
-import org.apache.accumulo.server.tables.TableManager;
-import org.apache.accumulo.server.util.ReplicationTableUtil;
+import org.apache.accumulo.server.util.ChangeSecret;
 import org.apache.accumulo.server.util.SystemPropUtil;
-import org.apache.accumulo.server.util.TablePropUtil;
 import org.apache.accumulo.start.spi.KeywordExecutable;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.ZooDefs.Ids;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.beust.jcommander.Parameter;
 import com.google.auto.service.AutoService;
-import com.google.common.base.Joiner;
 
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 
@@ -132,152 +83,24 @@ public class Initialize implements KeywordExecutable {
 
   private static final Logger log = LoggerFactory.getLogger(Initialize.class);
   private static final String DEFAULT_ROOT_USER = "root";
-  private static final String TABLE_TABLETS_TABLET_DIR = "table_info";
   @SuppressWarnings("deprecation")
-  private static final TableId REPLICATION_TABLE =
-      org.apache.accumulo.core.replication.ReplicationTable.ID;
+  static final TableId REPL_TABLE_ID = org.apache.accumulo.core.replication.ReplicationTable.ID;
 
-  private static ZooReaderWriter zoo = null;
-
-  /**
-   * Sets this class's ZooKeeper reader/writer.
-   *
-   * @param zooReaderWriter
-   *          reader/writer
-   */
-  static void setZooReaderWriter(ZooReaderWriter zooReaderWriter) {
-    zoo = zooReaderWriter;
-  }
-
-  /**
-   * Gets this class's ZooKeeper reader/writer.
-   *
-   * @return reader/writer
-   */
-  static ZooReaderWriter getZooReaderWriter() {
-    return zoo;
-  }
-
-  // config only for root table
-  private static HashMap<String,String> initialRootConf = new HashMap<>();
-  // config for root and metadata table
-  private static HashMap<String,String> initialRootMetaConf = new HashMap<>();
-  // config for only metadata table
-  private static HashMap<String,String> initialMetaConf = new HashMap<>();
-  private static HashMap<String,String> initialReplicationTableConf = new HashMap<>();
-
-  static {
-    initialRootConf.put(Property.TABLE_COMPACTION_DISPATCHER.getKey(),
-        SimpleCompactionDispatcher.class.getName());
-    initialRootConf.put(Property.TABLE_COMPACTION_DISPATCHER_OPTS.getKey() + "service", "root");
-
-    initialRootMetaConf.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "32K");
-    initialRootMetaConf.put(Property.TABLE_FILE_REPLICATION.getKey(), "5");
-    initialRootMetaConf.put(Property.TABLE_DURABILITY.getKey(), "sync");
-    initialRootMetaConf.put(Property.TABLE_MAJC_RATIO.getKey(), "1");
-    initialRootMetaConf.put(Property.TABLE_SPLIT_THRESHOLD.getKey(), "64M");
-    initialRootMetaConf.put(Property.TABLE_CONSTRAINT_PREFIX.getKey() + "1",
-        MetadataConstraints.class.getName());
-    initialRootMetaConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "scan.vers",
-        "10," + VersioningIterator.class.getName());
-    initialRootMetaConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "scan.vers.opt.maxVersions",
-        "1");
-    initialRootMetaConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "minc.vers",
-        "10," + VersioningIterator.class.getName());
-    initialRootMetaConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "minc.vers.opt.maxVersions",
-        "1");
-    initialRootMetaConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "majc.vers",
-        "10," + VersioningIterator.class.getName());
-    initialRootMetaConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "majc.vers.opt.maxVersions",
-        "1");
-    initialRootMetaConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "majc.bulkLoadFilter",
-        "20," + MetadataBulkLoadFilter.class.getName());
-    initialRootMetaConf.put(Property.TABLE_FAILURES_IGNORE.getKey(), "false");
-    initialRootMetaConf.put(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + "tablet",
-        String.format("%s,%s", TabletColumnFamily.NAME, CurrentLocationColumnFamily.NAME));
-    initialRootMetaConf.put(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + "server",
-        String.format("%s,%s,%s,%s", DataFileColumnFamily.NAME, LogColumnFamily.NAME,
-            ServerColumnFamily.NAME, FutureLocationColumnFamily.NAME));
-    initialRootMetaConf.put(Property.TABLE_LOCALITY_GROUPS.getKey(), "tablet,server");
-    initialRootMetaConf.put(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY.getKey(), "");
-    initialRootMetaConf.put(Property.TABLE_INDEXCACHE_ENABLED.getKey(), "true");
-    initialRootMetaConf.put(Property.TABLE_BLOCKCACHE_ENABLED.getKey(), "true");
-
-    initialMetaConf.put(Property.TABLE_COMPACTION_DISPATCHER.getKey(),
-        SimpleCompactionDispatcher.class.getName());
-    initialMetaConf.put(Property.TABLE_COMPACTION_DISPATCHER_OPTS.getKey() + "service", "meta");
-
-    // ACCUMULO-3077 Set the combiner on accumulo.metadata during init to reduce the likelihood of a
-    // race condition where a tserver compacts away Status updates because it didn't see the
-    // Combiner
-    // configured
-    @SuppressWarnings("deprecation")
-    var statusCombinerClass = org.apache.accumulo.server.replication.StatusCombiner.class;
-    IteratorSetting setting =
-        new IteratorSetting(9, ReplicationTableUtil.COMBINER_NAME, statusCombinerClass);
-    Combiner.setColumns(setting, Collections.singletonList(new Column(ReplicationSection.COLF)));
-    for (IteratorScope scope : IteratorScope.values()) {
-      String root = String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX,
-          scope.name().toLowerCase(), setting.getName());
-      for (Entry<String,String> prop : setting.getOptions().entrySet()) {
-        initialMetaConf.put(root + ".opt." + prop.getKey(), prop.getValue());
-      }
-      initialMetaConf.put(root, setting.getPriority() + "," + setting.getIteratorClass());
-    }
-
-    // add combiners to replication table
-    @SuppressWarnings("deprecation")
-    String replicationCombinerName =
-        org.apache.accumulo.core.replication.ReplicationTable.COMBINER_NAME;
-    setting = new IteratorSetting(30, replicationCombinerName, statusCombinerClass);
-    setting.setPriority(30);
-    @SuppressWarnings("deprecation")
-    Text statusSectionName =
-        org.apache.accumulo.core.replication.ReplicationSchema.StatusSection.NAME;
-    @SuppressWarnings("deprecation")
-    Text workSectionName = org.apache.accumulo.core.replication.ReplicationSchema.WorkSection.NAME;
-    Combiner.setColumns(setting,
-        Arrays.asList(new Column(statusSectionName), new Column(workSectionName)));
-    for (IteratorScope scope : EnumSet.allOf(IteratorScope.class)) {
-      String root = String.format("%s%s.%s", Property.TABLE_ITERATOR_PREFIX,
-          scope.name().toLowerCase(), setting.getName());
-      for (Entry<String,String> prop : setting.getOptions().entrySet()) {
-        initialReplicationTableConf.put(root + ".opt." + prop.getKey(), prop.getValue());
-      }
-      initialReplicationTableConf.put(root,
-          setting.getPriority() + "," + setting.getIteratorClass());
-    }
-    // add locality groups to replication table
-    @SuppressWarnings("deprecation")
-    Map<String,Set<Text>> replicationLocalityGroups =
-        org.apache.accumulo.core.replication.ReplicationTable.LOCALITY_GROUPS;
-    for (Entry<String,Set<Text>> g : replicationLocalityGroups.entrySet()) {
-      initialReplicationTableConf.put(Property.TABLE_LOCALITY_GROUP_PREFIX + g.getKey(),
-          LocalityGroupUtil.encodeColumnFamilies(g.getValue()));
-    }
-    initialReplicationTableConf.put(Property.TABLE_LOCALITY_GROUPS.getKey(),
-        Joiner.on(",").join(replicationLocalityGroups.keySet()));
-    // add formatter to replication table
-    @SuppressWarnings("deprecation")
-    String replicationFormatterClassName =
-        org.apache.accumulo.server.replication.ReplicationUtil.STATUS_FORMATTER_CLASS_NAME;
-    initialReplicationTableConf.put(Property.TABLE_FORMATTER_CLASS.getKey(),
-        replicationFormatterClassName);
-  }
-
-  static boolean checkInit(VolumeManager fs, SiteConfiguration sconf, Configuration hadoopConf)
+  static void checkInit(ZooReaderWriter zoo, VolumeManager fs, InitialConfiguration initConfig)
       throws IOException {
+    var hadoopConf = initConfig.getHadoopConf();
     log.info("Hadoop Filesystem is {}", FileSystem.getDefaultUri(hadoopConf));
-    log.info("Accumulo data dirs are {}", Arrays.asList(VolumeConfiguration.getVolumeUris(sconf)));
-    log.info("Zookeeper server is {}", sconf.get(Property.INSTANCE_ZK_HOST));
+    log.info("Accumulo data dirs are {}", List.of(initConfig.getVolumeUris()));
+    log.info("Zookeeper server is {}", initConfig.get(Property.INSTANCE_ZK_HOST));
     log.info("Checking if Zookeeper is available. If this hangs, then you need"
         + " to make sure zookeeper is running");
-    if (!zookeeperAvailable()) {
+    if (!zookeeperAvailable(zoo)) {
       // ACCUMULO-3651 Changed level to error and added FATAL to message for slf4j compatibility
-      log.error("FATAL Zookeeper needs to be up and running in order to init. Exiting ...");
-      return false;
+      throw new IllegalStateException(
+          "FATAL Zookeeper needs to be up and running in order to init. Exiting ...");
     }
-    if (sconf.get(Property.INSTANCE_SECRET).equals(Property.INSTANCE_SECRET.getDefaultValue())) {
+    if (initConfig.get(Property.INSTANCE_SECRET)
+        .equals(Property.INSTANCE_SECRET.getDefaultValue())) {
 
       System.out.println();
       System.out.println();
@@ -286,397 +109,170 @@ public class Initialize implements KeywordExecutable {
       System.out.println();
       System.out.println();
       System.out.println("You can change the instance secret in accumulo by using:");
-      System.out.println(
-          "   bin/accumulo " + org.apache.accumulo.server.util.ChangeSecret.class.getName());
+      System.out.println("   bin/accumulo " + ChangeSecret.class.getName());
       System.out.println("You will also need to edit your secret in your configuration"
           + " file by adding the property instance.secret to your"
           + " accumulo.properties. Without this accumulo will not operate" + " correctly");
     }
-    try {
-      if (isInitialized(fs, sconf)) {
-        printInitializeFailureMessages(sconf);
-        return false;
-      }
-    } catch (IOException e) {
-      throw new IOException("Failed to check if filesystem already initialized", e);
-    }
 
-    return true;
+    if (isInitialized(fs, initConfig)) {
+      printInitializeFailureMessages(initConfig);
+      throw new IOException("Filesystem is already initialized");
+    }
   }
 
-  static void printInitializeFailureMessages(SiteConfiguration sconf) {
+  private static void printInitializeFailureMessages(InitialConfiguration initConfig) {
     log.error("It appears the directories {}",
-        VolumeConfiguration.getVolumeUris(sconf) + " were previously initialized.");
+        initConfig.getVolumeUris() + " were previously initialized.");
     log.error("Change the property {} to use different volumes.",
         Property.INSTANCE_VOLUMES.getKey());
     log.error("The current value of {} is |{}|", Property.INSTANCE_VOLUMES.getKey(),
-        sconf.get(Property.INSTANCE_VOLUMES));
+        initConfig.get(Property.INSTANCE_VOLUMES));
   }
 
-  public boolean doInit(SiteConfiguration siteConfig, Opts opts, Configuration hadoopConf,
-      VolumeManager fs) throws IOException {
-    if (!checkInit(fs, siteConfig, hadoopConf)) {
-      return false;
-    }
-
-    // prompt user for instance name and root password early, in case they
-    // abort, we don't leave an inconsistent HDFS/ZooKeeper structure
+  private boolean doInit(ZooReaderWriter zoo, Opts opts, VolumeManager fs,
+      InitialConfiguration initConfig) {
     String instanceNamePath;
-    try {
-      instanceNamePath = getInstanceNamePath(opts);
-    } catch (Exception e) {
-      log.error("FATAL: Failed to talk to zookeeper", e);
-      return false;
-    }
-
+    String instanceName;
     String rootUser;
+
     try {
-      rootUser = getRootUserName(siteConfig, opts);
+      checkInit(zoo, fs, initConfig);
+
+      // prompt user for instance name and root password early, in case they
+      // abort, we don't leave an inconsistent HDFS/ZooKeeper structure
+      instanceNamePath = getInstanceNamePath(zoo, opts);
+      rootUser = getRootUserName(initConfig, opts);
+
+      // Don't prompt for a password when we're running SASL(Kerberos)
+      if (initConfig.getBoolean(Property.INSTANCE_RPC_SASL_ENABLED)) {
+        opts.rootpass = UUID.randomUUID().toString().getBytes(UTF_8);
+      } else {
+        opts.rootpass = getRootPassword(initConfig, opts, rootUser);
+      }
+
+      // the actual disk locations of the root table and tablets
+      instanceName = instanceNamePath.substring(getInstanceNamePrefix().length());
     } catch (Exception e) {
-      log.error("FATAL: Failed to obtain user for administrative privileges");
+      log.error("FATAL: Problem during initialize", e);
       return false;
     }
 
-    // Don't prompt for a password when we're running SASL(Kerberos)
-    if (siteConfig.getBoolean(Property.INSTANCE_RPC_SASL_ENABLED)) {
-      opts.rootpass = UUID.randomUUID().toString().getBytes(UTF_8);
-    } else {
-      opts.rootpass = getRootPassword(siteConfig, opts, rootUser);
-    }
-
     UUID uuid = UUID.randomUUID();
-    // the actual disk locations of the root table and tablets
-    Set<String> configuredVolumes = VolumeConfiguration.getVolumeUris(siteConfig);
-    String instanceName = instanceNamePath.substring(getInstanceNamePrefix().length());
-
     try (ServerContext context =
-        ServerContext.initialize(siteConfig, instanceName, uuid.toString())) {
-      VolumeChooserEnvironment chooserEnv =
-          new VolumeChooserEnvironmentImpl(Scope.INIT, RootTable.ID, null, context);
+        ServerContext.initialize(initConfig.getSiteConf(), instanceName, uuid.toString())) {
+      var chooserEnv = new VolumeChooserEnvironmentImpl(Scope.INIT, RootTable.ID, null, context);
       String rootTabletDirName = RootTable.ROOT_TABLET_DIR_NAME;
       String ext = FileOperations.getNewFileExtension(DefaultConfiguration.getInstance());
-      String rootTabletFileUri = new Path(fs.choose(chooserEnv, configuredVolumes) + Path.SEPARATOR
-          + Constants.TABLE_DIR + Path.SEPARATOR + RootTable.ID + Path.SEPARATOR + rootTabletDirName
-          + Path.SEPARATOR + "00000_00000." + ext).toString();
-
-      try {
-        initZooKeeper(opts, uuid.toString(), instanceNamePath, rootTabletDirName,
-            rootTabletFileUri);
-      } catch (Exception e) {
-        log.error("FATAL: Failed to initialize zookeeper", e);
-        return false;
+      String rootTabletFileUri = new Path(
+          fs.choose(chooserEnv, initConfig.getVolumeUris()) + SEPARATOR + TABLE_DIR + SEPARATOR
+              + RootTable.ID + SEPARATOR + rootTabletDirName + SEPARATOR + "00000_00000." + ext)
+                  .toString();
+
+      ZooKeeperInitializer zki = new ZooKeeperInitializer();
+      zki.initialize(zoo, opts.clearInstanceName, uuid.toString(), instanceNamePath,
+          rootTabletDirName, rootTabletFileUri);
+      if (!createDirs(fs, uuid, initConfig.getVolumeUris())) {
+        throw new IOException("Problem creating directories on " + fs.getVolumes());
       }
+      var fileSystemInitializer = new FileSystemInitializer(initConfig, zoo, uuid);
+      var rootVol = fs.choose(chooserEnv, initConfig.getVolumeUris());
+      var rootPath =
+          new Path(rootVol + SEPARATOR + TABLE_DIR + SEPARATOR + RootTable.ID + rootTabletDirName);
+      fileSystemInitializer.initialize(fs, rootPath.toString(), rootTabletFileUri, context);
 
-      try {
-        initFileSystem(siteConfig, hadoopConf, fs, uuid,
-            new Path(fs.choose(chooserEnv, configuredVolumes) + Path.SEPARATOR + Constants.TABLE_DIR
-                + Path.SEPARATOR + RootTable.ID + rootTabletDirName).toString(),
-            rootTabletFileUri, context);
-      } catch (Exception e) {
-        log.error("FATAL Failed to initialize filesystem", e);
-        return false;
-      }
+      checkSASL(initConfig);
+      initSecurity(context, opts, rootUser);
 
-      // When we're using Kerberos authentication, we need valid credentials to perform
-      // initialization. If the user provided some, use them.
-      // If they did not, fall back to the credentials present in accumulo.properties that the
-      // servers will use themselves.
-      try {
-        if (siteConfig.getBoolean(Property.INSTANCE_RPC_SASL_ENABLED)) {
-          final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-          // We don't have any valid creds to talk to HDFS
-          if (!ugi.hasKerberosCredentials()) {
-            final String accumuloKeytab = siteConfig.get(Property.GENERAL_KERBEROS_KEYTAB),
-                accumuloPrincipal = siteConfig.get(Property.GENERAL_KERBEROS_PRINCIPAL);
-
-            // Fail if the site configuration doesn't contain appropriate credentials to login as
-            // servers
-            if (StringUtils.isBlank(accumuloKeytab) || StringUtils.isBlank(accumuloPrincipal)) {
-              log.error("FATAL: No Kerberos credentials provided, and Accumulo is"
-                  + " not properly configured for server login");
-              return false;
-            }
-
-            log.info("Logging in as {} with {}", accumuloPrincipal, accumuloKeytab);
-
-            // Login using the keytab as the 'accumulo' user
-            UserGroupInformation.loginUserFromKeytab(accumuloPrincipal, accumuloKeytab);
-          }
-        }
-      } catch (IOException e) {
-        log.error("FATAL: Failed to get the Kerberos user", e);
-        return false;
-      }
-
-      try {
-        initSecurity(context, opts, rootUser);
-      } catch (Exception e) {
-        log.error("FATAL: Failed to initialize security", e);
-        return false;
-      }
-
-      if (opts.uploadAccumuloProps) {
-        try {
-          log.info("Uploading properties in accumulo.properties to Zookeeper."
-              + " Properties that cannot be set in Zookeeper will be skipped:");
-          Map<String,String> entries = new TreeMap<>();
-          siteConfig.getProperties(entries, x -> true, false);
-          for (Map.Entry<String,String> entry : entries.entrySet()) {
-            String key = entry.getKey();
-            String value = entry.getValue();
-            if (Property.isValidZooPropertyKey(key)) {
-              SystemPropUtil.setSystemProperty(context, key, value);
-              log.info("Uploaded - {} = {}", key, Property.isSensitive(key) ? "<hidden>" : value);
-            } else {
-              log.info("Skipped - {} = {}", key, Property.isSensitive(key) ? "<hidden>" : value);
-            }
-          }
-        } catch (Exception e) {
-          log.error("FATAL: Failed to upload accumulo.properties to Zookeeper", e);
-          return false;
-        }
-      }
-
-      return true;
-    }
-  }
-
-  private static boolean zookeeperAvailable() {
-    try {
-      return zoo.exists("/");
-    } catch (KeeperException | InterruptedException e) {
+      checkUploadProps(context, initConfig, opts);
+    } catch (Exception e) {
+      log.error("FATAL: Problem during initialize", e);
       return false;
     }
+    return true;
   }
 
-  private static void initDirs(VolumeManager fs, UUID uuid, Set<String> baseDirs, boolean print)
-      throws IOException {
-    for (String baseDir : baseDirs) {
-      fs.mkdirs(new Path(new Path(baseDir, Constants.VERSION_DIR), "" + AccumuloDataVersion.get()),
-          new FsPermission("700"));
-
-      // create an instance id
-      Path iidLocation = new Path(baseDir, Constants.INSTANCE_ID_DIR);
-      fs.mkdirs(iidLocation);
-      fs.createNewFile(new Path(iidLocation, uuid.toString()));
-      if (print) {
-        log.info("Initialized volume {}", baseDir);
+  private void checkUploadProps(ServerContext context, InitialConfiguration initConfig, Opts opts)
+      throws InterruptedException, KeeperException {
+    if (opts.uploadAccumuloProps) {
+      log.info("Uploading properties in accumulo.properties to Zookeeper."
+          + " Properties that cannot be set in Zookeeper will be skipped:");
+      Map<String,String> entries = new TreeMap<>();
+      initConfig.getProperties(entries, x -> true, false);
+      for (Map.Entry<String,String> entry : entries.entrySet()) {
+        String key = entry.getKey();
+        String value = entry.getValue();
+        if (Property.isValidZooPropertyKey(key)) {
+          SystemPropUtil.setSystemProperty(context, key, value);
+          log.info("Uploaded - {} = {}", key, Property.isSensitive(key) ? "<hidden>" : value);
+        } else {
+          log.info("Skipped - {} = {}", key, Property.isSensitive(key) ? "<hidden>" : value);
+        }
       }
     }
   }
 
-  private void initFileSystem(SiteConfiguration siteConfig, Configuration hadoopConf,
-      VolumeManager fs, UUID uuid, String rootTabletDirUri, String rootTabletFileUri,
-      ServerContext context) throws IOException {
-    initDirs(fs, uuid, VolumeConfiguration.getVolumeUris(siteConfig), false);
-
-    // initialize initial system tables config in zookeeper
-    initSystemTablesConfig(zoo, Constants.ZROOT + "/" + uuid, hadoopConf);
-
-    Text splitPoint = TabletsSection.getRange().getEndKey().getRow();
-
-    VolumeChooserEnvironment chooserEnv =
-        new VolumeChooserEnvironmentImpl(Scope.INIT, MetadataTable.ID, splitPoint, context);
-    String tableMetadataTabletDirName = TABLE_TABLETS_TABLET_DIR;
-    String tableMetadataTabletDirUri =
-        fs.choose(chooserEnv, context.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR
-            + MetadataTable.ID + Path.SEPARATOR + tableMetadataTabletDirName;
-    chooserEnv = new VolumeChooserEnvironmentImpl(Scope.INIT, REPLICATION_TABLE, null, context);
-    String replicationTableDefaultTabletDirName = ServerColumnFamily.DEFAULT_TABLET_DIR_NAME;
-    String replicationTableDefaultTabletDirUri =
-        fs.choose(chooserEnv, context.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR
-            + REPLICATION_TABLE + Path.SEPARATOR + replicationTableDefaultTabletDirName;
-    chooserEnv = new VolumeChooserEnvironmentImpl(Scope.INIT, MetadataTable.ID, null, context);
-    String defaultMetadataTabletDirName = ServerColumnFamily.DEFAULT_TABLET_DIR_NAME;
-    String defaultMetadataTabletDirUri =
-        fs.choose(chooserEnv, context.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR
-            + MetadataTable.ID + Path.SEPARATOR + defaultMetadataTabletDirName;
-
-    // create table and default tablets directories
-    createDirectories(fs, rootTabletDirUri, tableMetadataTabletDirUri, defaultMetadataTabletDirUri,
-        replicationTableDefaultTabletDirUri);
-
-    String ext = FileOperations.getNewFileExtension(DefaultConfiguration.getInstance());
-
-    // populate the metadata tables tablet with info about the replication table's one initial
-    // tablet
-    String metadataFileName = tableMetadataTabletDirUri + Path.SEPARATOR + "0_1." + ext;
-    Tablet replicationTablet =
-        new Tablet(REPLICATION_TABLE, replicationTableDefaultTabletDirName, null, null);
-    createMetadataFile(fs, metadataFileName, siteConfig, replicationTablet);
-
-    // populate the root tablet with info about the metadata table's two initial tablets
-    Tablet tablesTablet = new Tablet(MetadataTable.ID, tableMetadataTabletDirName, null, splitPoint,
-        metadataFileName);
-    Tablet defaultTablet =
-        new Tablet(MetadataTable.ID, defaultMetadataTabletDirName, splitPoint, null);
-    createMetadataFile(fs, rootTabletFileUri, siteConfig, tablesTablet, defaultTablet);
-  }
-
-  private static class Tablet {
-    TableId tableId;
-    String dirName;
-    Text prevEndRow, endRow;
-    String[] files;
-
-    Tablet(TableId tableId, String dirName, Text prevEndRow, Text endRow, String... files) {
-      this.tableId = tableId;
-      this.dirName = dirName;
-      this.prevEndRow = prevEndRow;
-      this.endRow = endRow;
-      this.files = files;
-    }
-  }
-
-  private static void createMetadataFile(VolumeManager volmanager, String fileName,
-      AccumuloConfiguration conf, Tablet... tablets) throws IOException {
-    // sort file contents in memory, then play back to the file
-    TreeMap<Key,Value> sorted = new TreeMap<>();
-    for (Tablet tablet : tablets) {
-      createEntriesForTablet(sorted, tablet);
-    }
-    FileSystem fs = volmanager.getFileSystemByPath(new Path(fileName));
-
-    CryptoService cs = CryptoServiceFactory.newInstance(conf, ClassloaderType.ACCUMULO);
+  /**
+   * When we're using Kerberos authentication, we need valid credentials to perform initialization.
+   * If the user provided some, use them. If they did not, fall back to the credentials present in
+   * accumulo.properties that the servers will use themselves.
+   */
+  private void checkSASL(InitialConfiguration initConfig)
+      throws IOException, AccumuloSecurityException {
+    if (initConfig.getBoolean(Property.INSTANCE_RPC_SASL_ENABLED)) {
+      final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+      // We don't have any valid creds to talk to HDFS
+      if (!ugi.hasKerberosCredentials()) {
+        final String accumuloKeytab = initConfig.get(Property.GENERAL_KERBEROS_KEYTAB),
+            accumuloPrincipal = initConfig.get(Property.GENERAL_KERBEROS_PRINCIPAL);
+
+        // Fail if the site configuration doesn't contain appropriate credentials
+        if (StringUtils.isBlank(accumuloKeytab) || StringUtils.isBlank(accumuloPrincipal)) {
+          log.error("FATAL: No Kerberos credentials provided, and Accumulo is"
+              + " not properly configured for server login");
+          throw new AccumuloSecurityException(ugi.getUserName(), BAD_CREDENTIALS);
+        }
 
-    FileSKVWriter tabletWriter = FileOperations.getInstance().newWriterBuilder()
-        .forFile(fileName, fs, fs.getConf(), cs).withTableConfiguration(conf).build();
-    tabletWriter.startDefaultLocalityGroup();
+        log.info("Logging in as {} with {}", accumuloPrincipal, accumuloKeytab);
 
-    for (Entry<Key,Value> entry : sorted.entrySet()) {
-      tabletWriter.append(entry.getKey(), entry.getValue());
+        // Login using the keytab as the 'accumulo' user
+        UserGroupInformation.loginUserFromKeytab(accumuloPrincipal, accumuloKeytab);
+      }
     }
-
-    tabletWriter.close();
   }
 
-  private static void createEntriesForTablet(TreeMap<Key,Value> map, Tablet tablet) {
-    Value EMPTY_SIZE = new DataFileValue(0, 0).encodeAsValue();
-    Text extent = new Text(TabletsSection.encodeRow(tablet.tableId, tablet.endRow));
-    addEntry(map, extent, DIRECTORY_COLUMN, new Value(tablet.dirName));
-    addEntry(map, extent, TIME_COLUMN, new Value(new MetadataTime(0, TimeType.LOGICAL).encode()));
-    addEntry(map, extent, PREV_ROW_COLUMN, TabletColumnFamily.encodePrevEndRow(tablet.prevEndRow));
-    for (String file : tablet.files) {
-      addEntry(map, extent, new ColumnFQ(DataFileColumnFamily.NAME, new Text(file)), EMPTY_SIZE);
+  private static boolean zookeeperAvailable(ZooReaderWriter zoo) {
+    try {
+      return zoo.exists("/");
+    } catch (KeeperException | InterruptedException e) {
+      return false;
     }
   }
 
-  private static void addEntry(TreeMap<Key,Value> map, Text row, ColumnFQ col, Value value) {
-    map.put(new Key(row, col.getColumnFamily(), col.getColumnQualifier(), 0), value);
-  }
-
-  private static void createDirectories(VolumeManager fs, String... dirs) throws IOException {
-    for (String s : dirs) {
-      Path dir = new Path(s);
-      try {
-        FileStatus fstat = fs.getFileStatus(dir);
-        if (!fstat.isDirectory()) {
-          log.error("FATAL: location {} exists but is not a directory", dir);
-          return;
-        }
-      } catch (FileNotFoundException fnfe) {
-        // attempt to create directory, since it doesn't exist
-        if (!fs.mkdirs(dir)) {
-          log.error("FATAL: unable to create directory {}", dir);
-          return;
-        }
+  private static boolean createDirs(VolumeManager fs, UUID uuid, Set<String> baseDirs) {
+    try {
+      for (String baseDir : baseDirs) {
+        fs.mkdirs(
+            new Path(new Path(baseDir, Constants.VERSION_DIR), "" + AccumuloDataVersion.get()),
+            new FsPermission("700"));
+        Path iidLocation = new Path(baseDir, Constants.INSTANCE_ID_DIR);
+        fs.mkdirs(iidLocation);
+        fs.createNewFile(new Path(iidLocation, uuid.toString()));
+        log.info("Created directory {}", baseDir);
       }
+      return true;
+    } catch (IOException e) {
+      log.error("Problem creating new directories", e);
+      return false;
     }
   }
 
-  private static void initZooKeeper(Opts opts, String uuid, String instanceNamePath,
-      String rootTabletDirName, String rootTabletFileUri)
-      throws KeeperException, InterruptedException {
-    // setup basic data in zookeeper
-    zoo.putPersistentData(Constants.ZROOT, new byte[0], NodeExistsPolicy.SKIP, Ids.OPEN_ACL_UNSAFE);
-    zoo.putPersistentData(Constants.ZROOT + Constants.ZINSTANCES, new byte[0],
-        NodeExistsPolicy.SKIP, Ids.OPEN_ACL_UNSAFE);
-
-    // setup instance name
-    if (opts.clearInstanceName) {
-      zoo.recursiveDelete(instanceNamePath, NodeMissingPolicy.SKIP);
-    }
-    zoo.putPersistentData(instanceNamePath, uuid.getBytes(UTF_8), NodeExistsPolicy.FAIL);
-
-    final byte[] EMPTY_BYTE_ARRAY = new byte[0];
-    final byte[] ZERO_CHAR_ARRAY = {'0'};
-
-    // setup the instance
-    String zkInstanceRoot = Constants.ZROOT + "/" + uuid;
-    zoo.putPersistentData(zkInstanceRoot, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZTABLES, Constants.ZTABLES_INITIAL_ID,
-        NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZNAMESPACES, new byte[0],
-        NodeExistsPolicy.FAIL);
-    TableManager.prepareNewNamespaceState(zoo, uuid, Namespace.DEFAULT.id(),
-        Namespace.DEFAULT.name(), NodeExistsPolicy.FAIL);
-    TableManager.prepareNewNamespaceState(zoo, uuid, Namespace.ACCUMULO.id(),
-        Namespace.ACCUMULO.name(), NodeExistsPolicy.FAIL);
-    TableManager.prepareNewTableState(zoo, uuid, RootTable.ID, Namespace.ACCUMULO.id(),
-        RootTable.NAME, TableState.ONLINE, NodeExistsPolicy.FAIL);
-    TableManager.prepareNewTableState(zoo, uuid, MetadataTable.ID, Namespace.ACCUMULO.id(),
-        MetadataTable.NAME, TableState.ONLINE, NodeExistsPolicy.FAIL);
-    @SuppressWarnings("deprecation")
-    String replicationTableName = org.apache.accumulo.core.replication.ReplicationTable.NAME;
-    TableManager.prepareNewTableState(zoo, uuid, REPLICATION_TABLE, Namespace.ACCUMULO.id(),
-        replicationTableName, TableState.OFFLINE, NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZTSERVERS, EMPTY_BYTE_ARRAY,
-        NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZPROBLEMS, EMPTY_BYTE_ARRAY,
-        NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + RootTable.ZROOT_TABLET,
-        RootTabletMetadata.getInitialJson(rootTabletDirName, rootTabletFileUri),
-        NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + RootTable.ZROOT_TABLET_GC_CANDIDATES,
-        new RootGcCandidates().toJson().getBytes(UTF_8), NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZMANAGERS, EMPTY_BYTE_ARRAY,
-        NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZMANAGER_LOCK, EMPTY_BYTE_ARRAY,
-        NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZMANAGER_GOAL_STATE,
-        ManagerGoalState.NORMAL.toString().getBytes(UTF_8), NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZGC, EMPTY_BYTE_ARRAY, NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZGC_LOCK, EMPTY_BYTE_ARRAY,
-        NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZCONFIG, EMPTY_BYTE_ARRAY,
-        NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZTABLE_LOCKS, EMPTY_BYTE_ARRAY,
-        NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZHDFS_RESERVATIONS, EMPTY_BYTE_ARRAY,
-        NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZNEXT_FILE, ZERO_CHAR_ARRAY,
-        NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZRECOVERY, ZERO_CHAR_ARRAY,
-        NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZMONITOR, EMPTY_BYTE_ARRAY,
-        NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZMONITOR_LOCK, EMPTY_BYTE_ARRAY,
-        NodeExistsPolicy.FAIL);
-    @SuppressWarnings("deprecation")
-    String replicationZBase = org.apache.accumulo.core.replication.ReplicationConstants.ZOO_BASE;
-    zoo.putPersistentData(zkInstanceRoot + replicationZBase, EMPTY_BYTE_ARRAY,
-        NodeExistsPolicy.FAIL);
-    @SuppressWarnings("deprecation")
-    String replicationZServers =
-        org.apache.accumulo.core.replication.ReplicationConstants.ZOO_TSERVERS;
-    zoo.putPersistentData(zkInstanceRoot + replicationZServers, EMPTY_BYTE_ARRAY,
-        NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + WalStateManager.ZWALS, EMPTY_BYTE_ARRAY,
-        NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZCOORDINATOR, EMPTY_BYTE_ARRAY,
-        NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZCOORDINATOR_LOCK, EMPTY_BYTE_ARRAY,
-        NodeExistsPolicy.FAIL);
-    zoo.putPersistentData(zkInstanceRoot + Constants.ZCOMPACTORS, EMPTY_BYTE_ARRAY,
-        NodeExistsPolicy.FAIL);
-
-  }
-
   private String getInstanceNamePrefix() {
     return Constants.ZROOT + Constants.ZINSTANCES + "/";
   }
 
-  private String getInstanceNamePath(Opts opts) throws KeeperException, InterruptedException {
+  private String getInstanceNamePath(ZooReaderWriter zoo, Opts opts)
+      throws KeeperException, InterruptedException {
     // setup the instance name
     String instanceName, instanceNamePath = null;
     boolean exists = true;
@@ -715,10 +311,10 @@ public class Initialize implements KeywordExecutable {
     return instanceNamePath;
   }
 
-  private String getRootUserName(SiteConfiguration siteConfig, Opts opts) {
-    final String keytab = siteConfig.get(Property.GENERAL_KERBEROS_KEYTAB);
+  private String getRootUserName(InitialConfiguration initConfig, Opts opts) {
+    final String keytab = initConfig.get(Property.GENERAL_KERBEROS_KEYTAB);
     if (keytab.equals(Property.GENERAL_KERBEROS_KEYTAB.getDefaultValue())
-        || !siteConfig.getBoolean(Property.INSTANCE_RPC_SASL_ENABLED)) {
+        || !initConfig.getBoolean(Property.INSTANCE_RPC_SASL_ENABLED)) {
       return DEFAULT_ROOT_USER;
     }
 
@@ -741,7 +337,7 @@ public class Initialize implements KeywordExecutable {
     } while (true);
   }
 
-  private byte[] getRootPassword(SiteConfiguration siteConfig, Opts opts, String rootUser) {
+  private byte[] getRootPassword(InitialConfiguration initConfig, Opts opts, String rootUser) {
     if (opts.cliPassword != null) {
       return opts.cliPassword.getBytes(UTF_8);
     }
@@ -749,7 +345,7 @@ public class Initialize implements KeywordExecutable {
     String strconfirmpass;
     do {
       var rootpass = System.console().readPassword(
-          "Enter initial password for " + rootUser + getInitialPasswordWarning(siteConfig));
+          "Enter initial password for " + rootUser + getInitialPasswordWarning(initConfig));
       if (rootpass == null) {
         System.exit(0);
       }
@@ -777,10 +373,10 @@ public class Initialize implements KeywordExecutable {
    *
    * @return String containing warning portion of console message.
    */
-  private String getInitialPasswordWarning(SiteConfiguration siteConfig) {
+  private String getInitialPasswordWarning(InitialConfiguration initConfig) {
     String optionalWarning;
     Property authenticatorProperty = Property.INSTANCE_SECURITY_AUTHENTICATOR;
-    if (siteConfig.get(authenticatorProperty).equals(authenticatorProperty.getDefaultValue())) {
+    if (initConfig.get(authenticatorProperty).equals(authenticatorProperty.getDefaultValue())) {
       optionalWarning = ": ";
     } else {
       optionalWarning = " (this may not be applicable for your security setup): ";
@@ -794,87 +390,21 @@ public class Initialize implements KeywordExecutable {
         opts.rootpass);
   }
 
-  public static void initSystemTablesConfig(ZooReaderWriter zoo, String zooKeeperRoot,
-      Configuration hadoopConf) throws IOException {
-    try {
-      int max = hadoopConf.getInt("dfs.replication.max", 512);
-      // Hadoop 0.23 switched the min value configuration name
-      int min = Math.max(hadoopConf.getInt("dfs.replication.min", 1),
-          hadoopConf.getInt("dfs.namenode.replication.min", 1));
-      if (max < 5) {
-        setMetadataReplication(max, "max");
-      }
-      if (min > 5) {
-        setMetadataReplication(min, "min");
-      }
-
-      for (Entry<String,String> entry : initialRootConf.entrySet()) {
-        if (!TablePropUtil.setTableProperty(zoo, zooKeeperRoot, RootTable.ID, entry.getKey(),
-            entry.getValue())) {
-          throw new IOException("Cannot create per-table property " + entry.getKey());
-        }
-      }
-
-      for (Entry<String,String> entry : initialRootMetaConf.entrySet()) {
-        if (!TablePropUtil.setTableProperty(zoo, zooKeeperRoot, RootTable.ID, entry.getKey(),
-            entry.getValue())) {
-          throw new IOException("Cannot create per-table property " + entry.getKey());
-        }
-        if (!TablePropUtil.setTableProperty(zoo, zooKeeperRoot, MetadataTable.ID, entry.getKey(),
-            entry.getValue())) {
-          throw new IOException("Cannot create per-table property " + entry.getKey());
-        }
-      }
-
-      for (Entry<String,String> entry : initialMetaConf.entrySet()) {
-        if (!TablePropUtil.setTableProperty(zoo, zooKeeperRoot, MetadataTable.ID, entry.getKey(),
-            entry.getValue())) {
-          throw new IOException("Cannot create per-table property " + entry.getKey());
-        }
-      }
-
-      // add configuration to the replication table
-      for (Entry<String,String> entry : initialReplicationTableConf.entrySet()) {
-        if (!TablePropUtil.setTableProperty(zoo, zooKeeperRoot, REPLICATION_TABLE, entry.getKey(),
-            entry.getValue())) {
-          throw new IOException("Cannot create per-table property " + entry.getKey());
-        }
-      }
-    } catch (Exception e) {
-      log.error("FATAL: Error talking to ZooKeeper", e);
-      throw new IOException(e);
-    }
-  }
-
-  private static void setMetadataReplication(int replication, String reason) {
-    String rep = System.console()
-        .readLine("Your HDFS replication " + reason + " is not compatible with our default "
-            + MetadataTable.NAME + " replication of 5. What do you want to set your "
-            + MetadataTable.NAME + " replication to? (" + replication + ") ");
-    if (rep == null || rep.isEmpty()) {
-      rep = Integer.toString(replication);
-    } else {
-      // Lets make sure it's a number
-      Integer.parseInt(rep);
-    }
-    initialRootMetaConf.put(Property.TABLE_FILE_REPLICATION.getKey(), rep);
-  }
-
-  public static boolean isInitialized(VolumeManager fs, SiteConfiguration siteConfig)
+  static boolean isInitialized(VolumeManager fs, InitialConfiguration initConfig)
       throws IOException {
-    for (String baseDir : VolumeConfiguration.getVolumeUris(siteConfig)) {
+    for (String baseDir : initConfig.getVolumeUris()) {
       if (fs.exists(new Path(baseDir, Constants.INSTANCE_ID_DIR))
           || fs.exists(new Path(baseDir, Constants.VERSION_DIR))) {
         return true;
       }
     }
-
     return false;
   }
 
-  private static void addVolumes(VolumeManager fs, SiteConfiguration siteConfig,
-      Configuration hadoopConf, ServerDirs serverDirs) throws IOException {
-
+  private static boolean addVolumes(VolumeManager fs, InitialConfiguration initConfig,
+      ServerDirs serverDirs) {
+    var hadoopConf = initConfig.getHadoopConf();
+    var siteConfig = initConfig.getSiteConf();
     Set<String> volumeURIs = VolumeConfiguration.getVolumeUris(siteConfig);
 
     Set<String> initializedDirs = serverDirs.checkBaseUris(hadoopConf, volumeURIs, true);
@@ -898,17 +428,21 @@ public class Initialize implements KeywordExecutable {
       }
     }
 
-    int persistentVersion =
-        serverDirs.getAccumuloPersistentVersion(versionPath.getFileSystem(hadoopConf), versionPath);
-    if (persistentVersion != AccumuloDataVersion.get()) {
-      throw new IOException(
-          "Accumulo " + Constants.VERSION + " cannot initialize data version " + persistentVersion);
+    try {
+      int persistentVersion = serverDirs
+          .getAccumuloPersistentVersion(versionPath.getFileSystem(hadoopConf), versionPath);
+      if (persistentVersion != AccumuloDataVersion.get()) {
+        throw new IOException("Accumulo " + Constants.VERSION + " cannot initialize data version "
+            + persistentVersion);
+      }
+    } catch (IOException e) {
+      log.error("Problem getting accumulo data version", e);
+      return false;
     }
-
-    initDirs(fs, uuid, uinitializedDirs, true);
+    return createDirs(fs, uuid, uinitializedDirs);
   }
 
-  static class Opts extends Help {
+  private static class Opts extends Help {
     @Parameter(names = "--add-volumes",
         description = "Initialize any uninitialized volumes listed in instance.volumes")
     boolean addVolumes = false;
@@ -953,58 +487,60 @@ public class Initialize implements KeywordExecutable {
 
   @Override
   public void execute(final String[] args) {
+    boolean success = true;
     Opts opts = new Opts();
     opts.parseArgs("accumulo init", args);
     var siteConfig = SiteConfiguration.auto();
+    ZooReaderWriter zoo = new ZooReaderWriter(siteConfig);
+    SecurityUtil.serverLogin(siteConfig);
+    Configuration hadoopConfig = new Configuration();
+    InitialConfiguration initConfig = new InitialConfiguration(hadoopConfig, siteConfig);
+    ServerDirs serverDirs = new ServerDirs(siteConfig, hadoopConfig);
+
+    try (var fs = VolumeManagerImpl.get(siteConfig, hadoopConfig)) {
+      if (opts.resetSecurity) {
+        success = resetSecurity(initConfig, opts, fs);
+      }
+      if (success && opts.addVolumes) {
+        success = addVolumes(fs, initConfig, serverDirs);
+      }
+      if (!opts.resetSecurity && !opts.addVolumes) {
+        success = doInit(zoo, opts, fs, initConfig);
+      }
+    } catch (IOException e) {
+      log.error("Problem trying to get Volume configuration", e);
+      success = false;
+    } finally {
+      SingletonManager.setMode(Mode.CLOSED);
+      if (!success) {
+        System.exit(-1);
+      }
+    }
+  }
 
-    try {
-      setZooReaderWriter(new ZooReaderWriter(siteConfig));
-      SecurityUtil.serverLogin(siteConfig);
-      Configuration hadoopConfig = new Configuration();
-      ServerDirs serverDirs = new ServerDirs(siteConfig, hadoopConfig);
-
-      try (var fs = VolumeManagerImpl.get(siteConfig, hadoopConfig)) {
-
-        if (opts.resetSecurity) {
-          log.info("Resetting security on accumulo.");
-          try (ServerContext context = new ServerContext(siteConfig)) {
-            if (isInitialized(fs, siteConfig)) {
-              if (!opts.forceResetSecurity) {
-                String userEnteredName = System.console()
-                    .readLine("WARNING: This will remove all"
-                        + " users from Accumulo! If you wish to proceed enter the instance"
-                        + " name: ");
-                if (userEnteredName != null && !context.getInstanceName().equals(userEnteredName)) {
-                  log.error(
-                      "Aborted reset security: Instance name did not match current instance.");
-                  return;
-                }
-              }
-
-              final String rootUser = getRootUserName(siteConfig, opts);
-              opts.rootpass = getRootPassword(siteConfig, opts, rootUser);
-              initSecurity(context, opts, rootUser);
-            } else {
-              log.error("FATAL: Attempted to reset security on accumulo before it was initialized");
-            }
-          }
-        }
-
-        if (opts.addVolumes) {
-          addVolumes(fs, siteConfig, hadoopConfig, serverDirs);
-        }
-
-        if (!opts.resetSecurity && !opts.addVolumes) {
-          if (!doInit(siteConfig, opts, hadoopConfig, fs)) {
-            System.exit(-1);
-          }
+  private boolean resetSecurity(InitialConfiguration initConfig, Opts opts, VolumeManager fs) {
+    log.info("Resetting security on accumulo.");
+    try (ServerContext context = new ServerContext(initConfig.getSiteConf())) {
+      if (!isInitialized(fs, initConfig)) {
+        throw new IllegalStateException(
+            "FATAL: Attempted to reset security on accumulo before it was initialized");
+      }
+      if (!opts.forceResetSecurity) {
+        String userEnteredName = System.console().readLine("WARNING: This will remove all"
+            + " users from Accumulo! If you wish to proceed enter the instance" + " name: ");
+        if (userEnteredName != null && !context.getInstanceName().equals(userEnteredName)) {
+          throw new IllegalStateException(
+              "Aborted reset security: Instance name did not match current instance.");
         }
       }
+
+      final String rootUser = getRootUserName(initConfig, opts);
+      opts.rootpass = getRootPassword(initConfig, opts, rootUser);
+      initSecurity(context, opts, rootUser);
+      return true;
     } catch (Exception e) {
-      log.error("Fatal exception", e);
-      throw new RuntimeException(e);
-    } finally {
-      SingletonManager.setMode(Mode.CLOSED);
+      log.error("Problem calling reset security", e);
+      return false;
     }
   }
 
diff --git a/server/base/src/main/java/org/apache/accumulo/server/init/ZooKeeperInitializer.java b/server/base/src/main/java/org/apache/accumulo/server/init/ZooKeeperInitializer.java
new file mode 100644
index 0000000..515302d
--- /dev/null
+++ b/server/base/src/main/java/org/apache/accumulo/server/init/ZooKeeperInitializer.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.server.init;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.accumulo.server.init.Initialize.REPL_TABLE_ID;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.clientImpl.Namespace;
+import org.apache.accumulo.core.manager.state.tables.TableState;
+import org.apache.accumulo.core.manager.thrift.ManagerGoalState;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.RootTabletMetadata;
+import org.apache.accumulo.fate.zookeeper.ZooReaderWriter;
+import org.apache.accumulo.fate.zookeeper.ZooUtil;
+import org.apache.accumulo.server.log.WalStateManager;
+import org.apache.accumulo.server.metadata.RootGcCandidates;
+import org.apache.accumulo.server.tables.TableManager;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.ZooDefs;
+
+class ZooKeeperInitializer {
+
+  void initialize(ZooReaderWriter zoo, boolean clearInstanceName, String uuid,
+      String instanceNamePath, String rootTabletDirName, String rootTabletFileUri)
+      throws KeeperException, InterruptedException {
+    // setup basic data in zookeeper
+    zoo.putPersistentData(Constants.ZROOT, new byte[0], ZooUtil.NodeExistsPolicy.SKIP,
+        ZooDefs.Ids.OPEN_ACL_UNSAFE);
+    zoo.putPersistentData(Constants.ZROOT + Constants.ZINSTANCES, new byte[0],
+        ZooUtil.NodeExistsPolicy.SKIP, ZooDefs.Ids.OPEN_ACL_UNSAFE);
+
+    // setup instance name
+    if (clearInstanceName) {
+      zoo.recursiveDelete(instanceNamePath, ZooUtil.NodeMissingPolicy.SKIP);
+    }
+    zoo.putPersistentData(instanceNamePath, uuid.getBytes(UTF_8), ZooUtil.NodeExistsPolicy.FAIL);
+
+    final byte[] EMPTY_BYTE_ARRAY = new byte[0];
+    final byte[] ZERO_CHAR_ARRAY = {'0'};
+
+    // setup the instance
+    String zkInstanceRoot = Constants.ZROOT + "/" + uuid;
+    zoo.putPersistentData(zkInstanceRoot, EMPTY_BYTE_ARRAY, ZooUtil.NodeExistsPolicy.FAIL);
+    zoo.putPersistentData(zkInstanceRoot + Constants.ZTABLES, Constants.ZTABLES_INITIAL_ID,
+        ZooUtil.NodeExistsPolicy.FAIL);
+    zoo.putPersistentData(zkInstanceRoot + Constants.ZNAMESPACES, new byte[0],
+        ZooUtil.NodeExistsPolicy.FAIL);
+    TableManager.prepareNewNamespaceState(zoo, uuid, Namespace.DEFAULT.id(),
+        Namespace.DEFAULT.name(), ZooUtil.NodeExistsPolicy.FAIL);
+    TableManager.prepareNewNamespaceState(zoo, uuid, Namespace.ACCUMULO.id(),
+        Namespace.ACCUMULO.name(), ZooUtil.NodeExistsPolicy.FAIL);
+    TableManager.prepareNewTableState(zoo, uuid, RootTable.ID, Namespace.ACCUMULO.id(),
+        RootTable.NAME, TableState.ONLINE, ZooUtil.NodeExistsPolicy.FAIL);
+    TableManager.prepareNewTableState(zoo, uuid, MetadataTable.ID, Namespace.ACCUMULO.id(),
+        MetadataTable.NAME, TableState.ONLINE, ZooUtil.NodeExistsPolicy.FAIL);
+    @SuppressWarnings("deprecation")
+    String replicationTableName = org.apache.accumulo.core.replication.ReplicationTable.NAME;
+    TableManager.prepareNewTableState(zoo, uuid, REPL_TABLE_ID, Namespace.ACCUMULO.id(),
+        replicationTableName, TableState.OFFLINE, ZooUtil.NodeExistsPolicy.FAIL);
+    zoo.putPersistentData(zkInstanceRoot + Constants.ZTSERVERS, EMPTY_BYTE_ARRAY,
+        ZooUtil.NodeExistsPolicy.FAIL);
+    zoo.putPersistentData(zkInstanceRoot + Constants.ZPROBLEMS, EMPTY_BYTE_ARRAY,
+        ZooUtil.NodeExistsPolicy.FAIL);
+    zoo.putPersistentData(zkInstanceRoot + RootTable.ZROOT_TABLET,
+        RootTabletMetadata.getInitialJson(rootTabletDirName, rootTabletFileUri),
+        ZooUtil.NodeExistsPolicy.FAIL);
+    zoo.putPersistentData(zkInstanceRoot + RootTable.ZROOT_TABLET_GC_CANDIDATES,
+        new RootGcCandidates().toJson().getBytes(UTF_8), ZooUtil.NodeExistsPolicy.FAIL);
+    zoo.putPersistentData(zkInstanceRoot + Constants.ZMANAGERS, EMPTY_BYTE_ARRAY,
+        ZooUtil.NodeExistsPolicy.FAIL);
+    zoo.putPersistentData(zkInstanceRoot + Constants.ZMANAGER_LOCK, EMPTY_BYTE_ARRAY,
+        ZooUtil.NodeExistsPolicy.FAIL);
+    zoo.putPersistentData(zkInstanceRoot + Constants.ZMANAGER_GOAL_STATE,
+        ManagerGoalState.NORMAL.toString().getBytes(UTF_8), ZooUtil.NodeExistsPolicy.FAIL);
+    zoo.putPersistentData(zkInstanceRoot + Constants.ZGC, EMPTY_BYTE_ARRAY,
+        ZooUtil.NodeExistsPolicy.FAIL);
+    zoo.putPersistentData(zkInstanceRoot + Constants.ZGC_LOCK, EMPTY_BYTE_ARRAY,
+        ZooUtil.NodeExistsPolicy.FAIL);
+    zoo.putPersistentData(zkInstanceRoot + Constants.ZCONFIG, EMPTY_BYTE_ARRAY,
+        ZooUtil.NodeExistsPolicy.FAIL);
+    zoo.putPersistentData(zkInstanceRoot + Constants.ZTABLE_LOCKS, EMPTY_BYTE_ARRAY,
+        ZooUtil.NodeExistsPolicy.FAIL);
+    zoo.putPersistentData(zkInstanceRoot + Constants.ZHDFS_RESERVATIONS, EMPTY_BYTE_ARRAY,
+        ZooUtil.NodeExistsPolicy.FAIL);
+    zoo.putPersistentData(zkInstanceRoot + Constants.ZNEXT_FILE, ZERO_CHAR_ARRAY,
+        ZooUtil.NodeExistsPolicy.FAIL);
+    zoo.putPersistentData(zkInstanceRoot + Constants.ZRECOVERY, ZERO_CHAR_ARRAY,
+        ZooUtil.NodeExistsPolicy.FAIL);
+    zoo.putPersistentData(zkInstanceRoot + Constants.ZMONITOR, EMPTY_BYTE_ARRAY,
+        ZooUtil.NodeExistsPolicy.FAIL);
+    zoo.putPersistentData(zkInstanceRoot + Constants.ZMONITOR_LOCK, EMPTY_BYTE_ARRAY,
+        ZooUtil.NodeExistsPolicy.FAIL);
+    @SuppressWarnings("deprecation")
+    String replicationZBase = org.apache.accumulo.core.replication.ReplicationConstants.ZOO_BASE;
+    zoo.putPersistentData(zkInstanceRoot + replicationZBase, EMPTY_BYTE_ARRAY,
+        ZooUtil.NodeExistsPolicy.FAIL);
+    @SuppressWarnings("deprecation")
+    String replicationZServers =
+        org.apache.accumulo.core.replication.ReplicationConstants.ZOO_TSERVERS;
+    zoo.putPersistentData(zkInstanceRoot + replicationZServers, EMPTY_BYTE_ARRAY,
+        ZooUtil.NodeExistsPolicy.FAIL);
+    zoo.putPersistentData(zkInstanceRoot + WalStateManager.ZWALS, EMPTY_BYTE_ARRAY,
+        ZooUtil.NodeExistsPolicy.FAIL);
+    zoo.putPersistentData(zkInstanceRoot + Constants.ZCOORDINATOR, EMPTY_BYTE_ARRAY,
+        ZooUtil.NodeExistsPolicy.FAIL);
+    zoo.putPersistentData(zkInstanceRoot + Constants.ZCOORDINATOR_LOCK, EMPTY_BYTE_ARRAY,
+        ZooUtil.NodeExistsPolicy.FAIL);
+    zoo.putPersistentData(zkInstanceRoot + Constants.ZCOMPACTORS, EMPTY_BYTE_ARRAY,
+        ZooUtil.NodeExistsPolicy.FAIL);
+
+  }
+
+}
diff --git a/server/base/src/test/java/org/apache/accumulo/server/init/InitializeTest.java b/server/base/src/test/java/org/apache/accumulo/server/init/InitializeTest.java
index 2011d41..6fc397d 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/init/InitializeTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/init/InitializeTest.java
@@ -23,7 +23,6 @@ import static org.easymock.EasyMock.createMock;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.replay;
 import static org.easymock.EasyMock.verify;
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertThrows;
 import static org.junit.Assert.assertTrue;
 
@@ -46,27 +45,25 @@ public class InitializeTest {
   private Configuration conf;
   private VolumeManager fs;
   private SiteConfiguration sconf;
-  private ZooReaderWriter zooOrig;
   private ZooReaderWriter zoo;
+  private InitialConfiguration initConfig;
 
   @Before
   public void setUp() {
     conf = new Configuration(false);
     fs = createMock(VolumeManager.class);
     sconf = createMock(SiteConfiguration.class);
+    initConfig = new InitialConfiguration(conf, sconf);
     expect(sconf.get(Property.INSTANCE_VOLUMES))
         .andReturn("hdfs://foo/accumulo,hdfs://bar/accumulo").anyTimes();
     expect(sconf.get(Property.INSTANCE_SECRET))
         .andReturn(Property.INSTANCE_SECRET.getDefaultValue()).anyTimes();
     expect(sconf.get(Property.INSTANCE_ZK_HOST)).andReturn("zk1").anyTimes();
     zoo = createMock(ZooReaderWriter.class);
-    zooOrig = Initialize.getZooReaderWriter();
-    Initialize.setZooReaderWriter(zoo);
   }
 
   @After
   public void tearDown() {
-    Initialize.setZooReaderWriter(zooOrig);
     verify(sconf, zoo, fs);
   }
 
@@ -74,7 +71,7 @@ public class InitializeTest {
   public void testIsInitialized_HasInstanceId() throws Exception {
     expect(fs.exists(anyObject(Path.class))).andReturn(true);
     replay(sconf, zoo, fs);
-    assertTrue(Initialize.isInitialized(fs, sconf));
+    assertTrue(Initialize.isInitialized(fs, initConfig));
   }
 
   @Test
@@ -82,14 +79,14 @@ public class InitializeTest {
     expect(fs.exists(anyObject(Path.class))).andReturn(false);
     expect(fs.exists(anyObject(Path.class))).andReturn(true);
     replay(sconf, zoo, fs);
-    assertTrue(Initialize.isInitialized(fs, sconf));
+    assertTrue(Initialize.isInitialized(fs, initConfig));
   }
 
   @Test
   public void testCheckInit_NoZK() throws Exception {
     expect(zoo.exists("/")).andReturn(false);
     replay(sconf, zoo, fs);
-    assertFalse(Initialize.checkInit(fs, sconf, conf));
+    assertThrows(IllegalStateException.class, () -> Initialize.checkInit(zoo, fs, initConfig));
   }
 
   @Test
@@ -97,7 +94,7 @@ public class InitializeTest {
     expect(zoo.exists("/")).andReturn(true);
     expect(fs.exists(anyObject(Path.class))).andReturn(true);
     replay(sconf, zoo, fs);
-    assertFalse(Initialize.checkInit(fs, sconf, conf));
+    assertThrows(IOException.class, () -> Initialize.checkInit(zoo, fs, initConfig));
   }
 
   @Test
@@ -105,7 +102,7 @@ public class InitializeTest {
     expect(zoo.exists("/")).andReturn(true);
     expect(fs.exists(anyObject(Path.class))).andThrow(new IOException());
     replay(sconf, zoo, fs);
-    assertThrows(IOException.class, () -> Initialize.checkInit(fs, sconf, conf));
+    assertThrows(IOException.class, () -> Initialize.checkInit(zoo, fs, initConfig));
   }
 
   @Test
@@ -115,6 +112,6 @@ public class InitializeTest {
     // once for instance_id, and once for version
     expect(fs.exists(anyObject(Path.class))).andReturn(false).times(4);
     replay(sconf, zoo, fs);
-    assertTrue(Initialize.checkInit(fs, sconf, conf));
+    Initialize.checkInit(zoo, fs, initConfig);
   }
 }