You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by cs...@apache.org on 2023/05/19 17:54:04 UTC

[accumulo] branch main updated: Start using TabletFile instead of String for referencing data files (#3401)

This is an automated email from the ASF dual-hosted git repository.

cshannon pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/main by this push:
     new 4d933f1a28 Start using TabletFile instead of String for referencing data files (#3401)
4d933f1a28 is described below

commit 4d933f1a285fb1a5c323bf58062fb05d3110dd91
Author: Christopher L. Shannon <ch...@gmail.com>
AuthorDate: Fri May 19 13:53:59 2023 -0400

    Start using TabletFile instead of String for referencing data files (#3401)
    
    This commit moves away from using a String or Path for referring to data
    files when reading/writing and instead uses a TabletFile so that range
    information can be more easily added in the future. New classes have
    been introduced called AbstractTabletFile and UnreferencedTabletFile.
    UnreferencedTabletFile represents a TabletFile that is not yet part of a
    Table/Tablet yet but still needs to be written or read from such as in
    the Bulk Import case, WAL, testing, etc.
    
    Co-authored-by: Keith Turner <kt...@apache.org>
---
 .../core/client/rfile/RFileWriterBuilder.java      |  4 +-
 .../accumulo/core/clientImpl/OfflineIterator.java  |  2 +-
 .../accumulo/core/clientImpl/bulk/BulkImport.java  | 34 ++++----
 .../accumulo/core/file/BloomFilterLayer.java       | 10 ++-
 .../accumulo/core/file/DispatchingFileFactory.java |  7 +-
 .../apache/accumulo/core/file/FileOperations.java  | 66 +++++++--------
 .../accumulo/core/file/rfile/CreateEmpty.java      |  7 +-
 .../accumulo/core/file/rfile/GenerateSplits.java   | 53 ++++++------
 .../accumulo/core/file/rfile/RFileOperations.java  | 18 ++---
 .../accumulo/core/metadata/AbstractTabletFile.java | 56 +++++++++++++
 .../apache/accumulo/core/metadata/TabletFile.java  | 24 ++----
 .../core/metadata/UnreferencedTabletFile.java      | 87 ++++++++++++++++++++
 .../core/client/rfile/RFileClientTest.java         |  5 +-
 .../core/file/BloomFilterLayerLookupTest.java      | 12 ++-
 .../accumulo/core/file/FileOperationsTest.java     |  7 +-
 .../its/mapred/AccumuloFileOutputFormatIT.java     |  4 +-
 .../its/mapreduce/AccumuloFileOutputFormatIT.java  |  6 +-
 .../accumulo/server/compaction/FileCompactor.java  |  9 +--
 .../org/apache/accumulo/server/fs/FileManager.java | 93 ++++++++++------------
 .../server/init/FileSystemInitializer.java         |  6 +-
 .../org/apache/accumulo/server/util/FileUtil.java  | 62 ++++++++-------
 .../org/apache/accumulo/tserver/InMemoryMap.java   |  7 +-
 .../org/apache/accumulo/tserver/log/LogSorter.java |  3 +-
 .../accumulo/tserver/log/RecoveryLogsIterator.java | 26 +++---
 .../accumulo/tserver/tablet/CompactableUtils.java  |  4 +-
 .../org/apache/accumulo/tserver/tablet/Tablet.java |  8 +-
 .../accumulo/test/CountNameNodeOpsBulkIT.java      |  7 +-
 .../accumulo/test/GenerateSequentialRFile.java     |  7 +-
 .../java/org/apache/accumulo/test/TestIngest.java  |  7 +-
 .../apache/accumulo/test/functional/BulkNewIT.java |  4 +-
 .../test/performance/scan/CollectTabletStats.java  |  4 +-
 .../apache/accumulo/test/shell/ShellServerIT.java  |  9 ++-
 32 files changed, 417 insertions(+), 241 deletions(-)

diff --git a/core/src/main/java/org/apache/accumulo/core/client/rfile/RFileWriterBuilder.java b/core/src/main/java/org/apache/accumulo/core/client/rfile/RFileWriterBuilder.java
index be1850c8c3..b1d7957338 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/rfile/RFileWriterBuilder.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/rfile/RFileWriterBuilder.java
@@ -38,6 +38,7 @@ import org.apache.accumulo.core.conf.ConfigurationCopy;
 import org.apache.accumulo.core.conf.DefaultConfiguration;
 import org.apache.accumulo.core.crypto.CryptoFactoryLoader;
 import org.apache.accumulo.core.file.FileOperations;
+import org.apache.accumulo.core.metadata.UnreferencedTabletFile;
 import org.apache.accumulo.core.metadata.ValidationUtil;
 import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl;
 import org.apache.accumulo.core.spi.crypto.CryptoEnvironment;
@@ -119,7 +120,8 @@ class RFileWriterBuilder implements RFile.OutputArguments, RFile.WriterFSOptions
           visCacheSize);
     } else {
       return new RFileWriter(fileops.newWriterBuilder()
-          .forFile(out.path.toString(), out.getFileSystem(), out.getConf(), cs)
+          .forFile(UnreferencedTabletFile.of(out.getFileSystem(), out.path), out.getFileSystem(),
+              out.getConf(), cs)
           .withTableConfiguration(acuconf).withStartDisabled().build(), visCacheSize);
     }
   }
diff --git a/core/src/main/java/org/apache/accumulo/core/clientImpl/OfflineIterator.java b/core/src/main/java/org/apache/accumulo/core/clientImpl/OfflineIterator.java
index 6759a5e51c..7fff98cc0c 100644
--- a/core/src/main/java/org/apache/accumulo/core/clientImpl/OfflineIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/clientImpl/OfflineIterator.java
@@ -300,7 +300,7 @@ class OfflineIterator implements Iterator<Entry<Key,Value>> {
       var cs = CryptoFactoryLoader.getServiceForClientWithTable(systemConf, tableConf, tableId);
       FileSystem fs = VolumeConfiguration.fileSystemForPath(file.getPathStr(), conf);
       FileSKVIterator reader = FileOperations.getInstance().newReaderBuilder()
-          .forFile(file.getPathStr(), fs, conf, cs).withTableConfiguration(tableCC).build();
+          .forFile(file, fs, conf, cs).withTableConfiguration(tableCC).build();
       if (scannerSamplerConfigImpl != null) {
         reader = reader.getSample(scannerSamplerConfigImpl);
         if (reader == null) {
diff --git a/core/src/main/java/org/apache/accumulo/core/clientImpl/bulk/BulkImport.java b/core/src/main/java/org/apache/accumulo/core/clientImpl/bulk/BulkImport.java
index 2ccc2ad3aa..dec10d0335 100644
--- a/core/src/main/java/org/apache/accumulo/core/clientImpl/bulk/BulkImport.java
+++ b/core/src/main/java/org/apache/accumulo/core/clientImpl/bulk/BulkImport.java
@@ -74,6 +74,7 @@ import org.apache.accumulo.core.data.TableId;
 import org.apache.accumulo.core.dataImpl.KeyExtent;
 import org.apache.accumulo.core.file.FileOperations;
 import org.apache.accumulo.core.file.FileSKVIterator;
+import org.apache.accumulo.core.metadata.UnreferencedTabletFile;
 import org.apache.accumulo.core.spi.crypto.CryptoService;
 import org.apache.accumulo.core.util.Retry;
 import org.apache.accumulo.core.volume.VolumeConfiguration;
@@ -261,9 +262,9 @@ public class BulkImport implements ImportDestinationArguments, ImportMappingOpti
     long l;
   }
 
-  public static Map<KeyExtent,Long> estimateSizes(AccumuloConfiguration acuConf, Path dataFile,
-      long fileSize, Collection<KeyExtent> extents, FileSystem ns, Cache<String,Long> fileLenCache,
-      CryptoService cs) throws IOException {
+  public static Map<KeyExtent,Long> estimateSizes(AccumuloConfiguration acuConf,
+      UnreferencedTabletFile dataFile, long fileSize, Collection<KeyExtent> extents, FileSystem ns,
+      Cache<String,Long> fileLenCache, CryptoService cs) throws IOException {
 
     if (extents.size() == 1) {
       return Collections.singletonMap(extents.iterator().next(), fileSize);
@@ -277,9 +278,9 @@ public class BulkImport implements ImportDestinationArguments, ImportMappingOpti
 
     Text row = new Text();
 
-    FileSKVIterator index = FileOperations.getInstance().newIndexReaderBuilder()
-        .forFile(dataFile.toString(), ns, ns.getConf(), cs).withTableConfiguration(acuConf)
-        .withFileLenCache(fileLenCache).build();
+    FileSKVIterator index =
+        FileOperations.getInstance().newIndexReaderBuilder().forFile(dataFile, ns, ns.getConf(), cs)
+            .withTableConfiguration(acuConf).withFileLenCache(fileLenCache).build();
 
     try {
       while (index.hasTop()) {
@@ -354,12 +355,11 @@ public class BulkImport implements ImportDestinationArguments, ImportMappingOpti
   }
 
   public static List<KeyExtent> findOverlappingTablets(ClientContext context,
-      KeyExtentCache extentCache, Path file, FileSystem fs, Cache<String,Long> fileLenCache,
-      CryptoService cs) throws IOException {
+      KeyExtentCache extentCache, UnreferencedTabletFile file, FileSystem fs,
+      Cache<String,Long> fileLenCache, CryptoService cs) throws IOException {
     try (FileSKVIterator reader = FileOperations.getInstance().newReaderBuilder()
-        .forFile(file.toString(), fs, fs.getConf(), cs)
-        .withTableConfiguration(context.getConfiguration()).withFileLenCache(fileLenCache)
-        .seekToBeginning().build()) {
+        .forFile(file, fs, fs.getConf(), cs).withTableConfiguration(context.getConfiguration())
+        .withFileLenCache(fileLenCache).seekToBeginning().build()) {
       return findOverlappingTablets(extentCache, reader);
     }
   }
@@ -542,22 +542,22 @@ public class BulkImport implements ImportDestinationArguments, ImportMappingOpti
         context.instanceOperations().getSystemConfiguration(), tableProps, tableId);
 
     for (FileStatus fileStatus : files) {
-      Path filePath = fileStatus.getPath();
+      UnreferencedTabletFile file = UnreferencedTabletFile.of(fs, fileStatus.getPath());
       CompletableFuture<Map<KeyExtent,Bulk.FileInfo>> future = CompletableFuture.supplyAsync(() -> {
         try {
           long t1 = System.currentTimeMillis();
           List<KeyExtent> extents =
-              findOverlappingTablets(context, extentCache, filePath, fs, fileLensCache, cs);
+              findOverlappingTablets(context, extentCache, file, fs, fileLensCache, cs);
           // make sure file isn't going to too many tablets
-          checkTabletCount(maxTablets, extents.size(), filePath.toString());
-          Map<KeyExtent,Long> estSizes = estimateSizes(context.getConfiguration(), filePath,
+          checkTabletCount(maxTablets, extents.size(), file.toString());
+          Map<KeyExtent,Long> estSizes = estimateSizes(context.getConfiguration(), file,
               fileStatus.getLen(), extents, fs, fileLensCache, cs);
           Map<KeyExtent,Bulk.FileInfo> pathLocations = new HashMap<>();
           for (KeyExtent ke : extents) {
-            pathLocations.put(ke, new Bulk.FileInfo(filePath, estSizes.getOrDefault(ke, 0L)));
+            pathLocations.put(ke, new Bulk.FileInfo(file.getPath(), estSizes.getOrDefault(ke, 0L)));
           }
           long t2 = System.currentTimeMillis();
-          log.debug("Mapped {} to {} tablets in {}ms", filePath, pathLocations.size(), t2 - t1);
+          log.debug("Mapped {} to {} tablets in {}ms", file, pathLocations.size(), t2 - t1);
           return pathLocations;
         } catch (Exception e) {
           throw new CompletionException(e);
diff --git a/core/src/main/java/org/apache/accumulo/core/file/BloomFilterLayer.java b/core/src/main/java/org/apache/accumulo/core/file/BloomFilterLayer.java
index 59af8025f5..414c11588a 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/BloomFilterLayer.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/BloomFilterLayer.java
@@ -49,11 +49,13 @@ import org.apache.accumulo.core.file.keyfunctor.KeyFunctor;
 import org.apache.accumulo.core.file.rfile.RFile;
 import org.apache.accumulo.core.iterators.IteratorEnvironment;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
+import org.apache.accumulo.core.metadata.TabletFile;
 import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl;
 import org.apache.accumulo.core.spi.crypto.NoCryptoServiceFactory;
 import org.apache.accumulo.core.util.threads.ThreadPools;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.util.bloom.Key;
 import org.apache.hadoop.util.hash.Hash;
@@ -475,8 +477,8 @@ public class BloomFilterLayer {
     String suffix = FileOperations.getNewFileExtension(acuconf);
     String fname = "/tmp/test." + suffix;
     FileSKVWriter bmfw = FileOperations.getInstance().newWriterBuilder()
-        .forFile(fname, fs, conf, NoCryptoServiceFactory.NONE).withTableConfiguration(acuconf)
-        .build();
+        .forFile(new TabletFile(new Path(fname)), fs, conf, NoCryptoServiceFactory.NONE)
+        .withTableConfiguration(acuconf).build();
 
     long t1 = System.currentTimeMillis();
 
@@ -498,8 +500,8 @@ public class BloomFilterLayer {
 
     t1 = System.currentTimeMillis();
     FileSKVIterator bmfr = FileOperations.getInstance().newReaderBuilder()
-        .forFile(fname, fs, conf, NoCryptoServiceFactory.NONE).withTableConfiguration(acuconf)
-        .build();
+        .forFile(new TabletFile(new Path(fname)), fs, conf, NoCryptoServiceFactory.NONE)
+        .withTableConfiguration(acuconf).build();
     t2 = System.currentTimeMillis();
     out.println("Opened " + fname + " in " + (t2 - t1));
 
diff --git a/core/src/main/java/org/apache/accumulo/core/file/DispatchingFileFactory.java b/core/src/main/java/org/apache/accumulo/core/file/DispatchingFileFactory.java
index bae4c433af..3620f9b56c 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/DispatchingFileFactory.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/DispatchingFileFactory.java
@@ -23,16 +23,15 @@ import java.io.IOException;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.file.rfile.RFile;
 import org.apache.accumulo.core.file.rfile.RFileOperations;
+import org.apache.accumulo.core.metadata.AbstractTabletFile;
 import org.apache.accumulo.core.summary.SummaryWriter;
-import org.apache.hadoop.fs.Path;
 
 class DispatchingFileFactory extends FileOperations {
 
   private FileOperations findFileFactory(FileOptions options) {
-    String file = options.getFilename();
+    AbstractTabletFile<?> file = options.getFile();
 
-    Path p = new Path(file);
-    String name = p.getName();
+    String name = file.getPath().getName();
 
     String[] sp = name.split("\\.");
 
diff --git a/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java b/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
index db65923641..7dad3a3127 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
@@ -31,11 +31,14 @@ import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.file.blockfile.impl.CacheProvider;
 import org.apache.accumulo.core.file.rfile.RFile;
+import org.apache.accumulo.core.metadata.AbstractTabletFile;
+import org.apache.accumulo.core.metadata.UnreferencedTabletFile;
 import org.apache.accumulo.core.spi.crypto.CryptoService;
 import org.apache.accumulo.core.util.ratelimit.RateLimiter;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapred.FileOutputCommitter;
 
 import com.github.benmanes.caffeine.cache.Cache;
@@ -162,10 +165,10 @@ public abstract class FileOperations {
     return new ReaderBuilder();
   }
 
-  public static class FileOptions {
+  protected static class FileOptions {
     // objects used by all
     public final AccumuloConfiguration tableConfiguration;
-    public final String filename;
+    public final AbstractTabletFile<?> file;
     public final FileSystem fs;
     public final Configuration fsConf;
     public final RateLimiter rateLimiter;
@@ -184,13 +187,13 @@ public abstract class FileOperations {
     public final boolean inclusive;
     public final boolean dropCacheBehind;
 
-    public FileOptions(AccumuloConfiguration tableConfiguration, String filename, FileSystem fs,
-        Configuration fsConf, RateLimiter rateLimiter, String compression,
+    protected FileOptions(AccumuloConfiguration tableConfiguration, AbstractTabletFile<?> file,
+        FileSystem fs, Configuration fsConf, RateLimiter rateLimiter, String compression,
         FSDataOutputStream outputStream, boolean enableAccumuloStart, CacheProvider cacheProvider,
         Cache<String,Long> fileLenCache, boolean seekToBeginning, CryptoService cryptoService,
         Range range, Set<ByteSequence> columnFamilies, boolean inclusive, boolean dropCacheBehind) {
       this.tableConfiguration = tableConfiguration;
-      this.filename = filename;
+      this.file = Objects.requireNonNull(file);
       this.fs = fs;
       this.fsConf = fsConf;
       this.rateLimiter = rateLimiter;
@@ -211,8 +214,8 @@ public abstract class FileOperations {
       return tableConfiguration;
     }
 
-    public String getFilename() {
-      return filename;
+    public AbstractTabletFile<?> getFile() {
+      return file;
     }
 
     public FileSystem getFileSystem() {
@@ -273,7 +276,7 @@ public abstract class FileOperations {
    */
   public static class FileHelper {
     private AccumuloConfiguration tableConfiguration;
-    private String filename;
+    private AbstractTabletFile<?> file;
     private FileSystem fs;
     private Configuration fsConf;
     private RateLimiter rateLimiter;
@@ -290,8 +293,8 @@ public abstract class FileOperations {
       return this;
     }
 
-    protected FileHelper filename(String filename) {
-      this.filename = Objects.requireNonNull(filename);
+    protected FileHelper file(AbstractTabletFile<?> file) {
+      this.file = Objects.requireNonNull(file);
       return this;
     }
 
@@ -317,28 +320,27 @@ public abstract class FileOperations {
 
     protected FileOptions toWriterBuilderOptions(String compression,
         FSDataOutputStream outputStream, boolean startEnabled) {
-      return new FileOptions(tableConfiguration, filename, fs, fsConf, rateLimiter, compression,
+      return new FileOptions(tableConfiguration, file, fs, fsConf, rateLimiter, compression,
           outputStream, startEnabled, NULL_PROVIDER, null, false, cryptoService, null, null, true,
           dropCacheBehind);
     }
 
     protected FileOptions toReaderBuilderOptions(CacheProvider cacheProvider,
         Cache<String,Long> fileLenCache, boolean seekToBeginning) {
-      return new FileOptions(tableConfiguration, filename, fs, fsConf, rateLimiter, null, null,
-          false, cacheProvider == null ? NULL_PROVIDER : cacheProvider, fileLenCache,
-          seekToBeginning, cryptoService, null, null, true, dropCacheBehind);
+      return new FileOptions(tableConfiguration, file, fs, fsConf, rateLimiter, null, null, false,
+          cacheProvider == null ? NULL_PROVIDER : cacheProvider, fileLenCache, seekToBeginning,
+          cryptoService, null, null, true, dropCacheBehind);
     }
 
     protected FileOptions toIndexReaderBuilderOptions(Cache<String,Long> fileLenCache) {
-      return new FileOptions(tableConfiguration, filename, fs, fsConf, rateLimiter, null, null,
-          false, NULL_PROVIDER, fileLenCache, false, cryptoService, null, null, true,
-          dropCacheBehind);
+      return new FileOptions(tableConfiguration, file, fs, fsConf, rateLimiter, null, null, false,
+          NULL_PROVIDER, fileLenCache, false, cryptoService, null, null, true, dropCacheBehind);
     }
 
     protected FileOptions toScanReaderBuilderOptions(Range range, Set<ByteSequence> columnFamilies,
         boolean inclusive) {
-      return new FileOptions(tableConfiguration, filename, fs, fsConf, rateLimiter, null, null,
-          false, NULL_PROVIDER, null, false, cryptoService, range, columnFamilies, inclusive,
+      return new FileOptions(tableConfiguration, file, fs, fsConf, rateLimiter, null, null, false,
+          NULL_PROVIDER, null, false, cryptoService, range, columnFamilies, inclusive,
           dropCacheBehind);
     }
 
@@ -356,15 +358,17 @@ public abstract class FileOperations {
     private boolean enableAccumuloStart = true;
 
     public WriterTableConfiguration forOutputStream(String extension,
-        FSDataOutputStream outputStream, Configuration fsConf, CryptoService cs) {
+        FSDataOutputStream outputStream, Configuration fsConf, CryptoService cs)
+        throws IOException {
       this.outputStream = outputStream;
-      filename("foo" + extension).fsConf(fsConf).cryptoService(cs);
+      file(UnreferencedTabletFile.of(fsConf, new Path("foo/foo" + extension))).fsConf(fsConf)
+          .cryptoService(cs);
       return this;
     }
 
-    public WriterTableConfiguration forFile(String filename, FileSystem fs, Configuration fsConf,
-        CryptoService cs) {
-      filename(filename).fs(fs).fsConf(fsConf).cryptoService(cs);
+    public WriterTableConfiguration forFile(AbstractTabletFile<?> file, FileSystem fs,
+        Configuration fsConf, CryptoService cs) {
+      file(file).fs(fs).fsConf(fsConf).cryptoService(cs);
       return this;
     }
 
@@ -411,9 +415,9 @@ public abstract class FileOperations {
     private Cache<String,Long> fileLenCache;
     private boolean seekToBeginning = false;
 
-    public ReaderTableConfiguration forFile(String filename, FileSystem fs, Configuration fsConf,
-        CryptoService cs) {
-      filename(filename).fs(fs).fsConf(fsConf).cryptoService(cs);
+    public ReaderTableConfiguration forFile(AbstractTabletFile<?> file, FileSystem fs,
+        Configuration fsConf, CryptoService cs) {
+      file(file).fs(fs).fsConf(fsConf).cryptoService(cs);
       return this;
     }
 
@@ -479,9 +483,9 @@ public abstract class FileOperations {
 
     private Cache<String,Long> fileLenCache = null;
 
-    public IndexReaderTableConfiguration forFile(String filename, FileSystem fs,
+    public IndexReaderTableConfiguration forFile(AbstractTabletFile<?> file, FileSystem fs,
         Configuration fsConf, CryptoService cs) {
-      filename(filename).fs(fs).fsConf(fsConf).cryptoService(cs);
+      file(file).fs(fs).fsConf(fsConf).cryptoService(cs);
       return this;
     }
 
@@ -511,9 +515,9 @@ public abstract class FileOperations {
     private Set<ByteSequence> columnFamilies;
     private boolean inclusive;
 
-    public ScanReaderTableConfiguration forFile(String filename, FileSystem fs,
+    public ScanReaderTableConfiguration forFile(AbstractTabletFile<?> file, FileSystem fs,
         Configuration fsConf, CryptoService cs) {
-      filename(filename).fs(fs).fsConf(fsConf).cryptoService(cs);
+      file(file).fs(fs).fsConf(fsConf).cryptoService(cs);
       return this;
     }
 
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/CreateEmpty.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/CreateEmpty.java
index f7d7eb9acd..5a8d4dc104 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/CreateEmpty.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/CreateEmpty.java
@@ -25,6 +25,7 @@ import org.apache.accumulo.core.cli.Help;
 import org.apache.accumulo.core.conf.DefaultConfiguration;
 import org.apache.accumulo.core.file.FileSKVWriter;
 import org.apache.accumulo.core.file.rfile.bcfile.Compression;
+import org.apache.accumulo.core.metadata.UnreferencedTabletFile;
 import org.apache.accumulo.core.spi.crypto.NoCryptoServiceFactory;
 import org.apache.accumulo.core.spi.file.rfile.compression.NoCompression;
 import org.apache.accumulo.start.spi.KeywordExecutable;
@@ -99,10 +100,10 @@ public class CreateEmpty implements KeywordExecutable {
     opts.parseArgs("accumulo create-empty", args);
 
     for (String arg : opts.files) {
-      Path path = new Path(arg);
-      log.info("Writing to file '{}'", path);
+      UnreferencedTabletFile file = UnreferencedTabletFile.of(conf, new Path(arg));
+      log.info("Writing to file '{}'", file);
       FileSKVWriter writer = new RFileOperations().newWriterBuilder()
-          .forFile(arg, path.getFileSystem(conf), conf, NoCryptoServiceFactory.NONE)
+          .forFile(file, file.getPath().getFileSystem(conf), conf, NoCryptoServiceFactory.NONE)
           .withTableConfiguration(DefaultConfiguration.getInstance()).withCompression(opts.codec)
           .build();
       writer.close();
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/GenerateSplits.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/GenerateSplits.java
index a21d5a37d2..a4a4accbbe 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/GenerateSplits.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/GenerateSplits.java
@@ -46,6 +46,7 @@ import org.apache.accumulo.core.file.FileOperations;
 import org.apache.accumulo.core.file.FileSKVIterator;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 import org.apache.accumulo.core.iteratorsImpl.system.MultiIterator;
+import org.apache.accumulo.core.metadata.UnreferencedTabletFile;
 import org.apache.accumulo.core.spi.crypto.CryptoEnvironment;
 import org.apache.accumulo.core.spi.crypto.CryptoService;
 import org.apache.accumulo.core.util.TextUtil;
@@ -129,34 +130,33 @@ public class GenerateSplits implements KeywordExecutable {
     long splitSize = opts.splitSize;
 
     FileSystem fs = FileSystem.get(hadoopConf);
-    List<Path> filePaths = new ArrayList<>();
+    List<UnreferencedTabletFile> files = new ArrayList<>();
     for (String file : opts.files) {
       Path path = new Path(file);
       fs = PrintInfo.resolveFS(log, hadoopConf, path);
       // get all the files in the directory
-      filePaths.addAll(getFiles(fs, path));
+      files.addAll(getFiles(fs, path));
     }
 
-    if (filePaths.isEmpty()) {
+    if (files.isEmpty()) {
       throw new IllegalArgumentException("No files were found in " + opts.files);
     } else {
-      log.trace("Found the following files: {}", filePaths);
+      log.trace("Found the following files: {}", files);
     }
 
     // if no size specified look at indexed keys first
     if (opts.splitSize == 0) {
-      splits = getIndexKeys(siteConf, hadoopConf, fs, filePaths, requestedNumSplits, encode,
-          cryptoService);
+      splits =
+          getIndexKeys(siteConf, hadoopConf, fs, files, requestedNumSplits, encode, cryptoService);
       // if there weren't enough splits indexed, try again with size = 0
       if (splits.size() < requestedNumSplits) {
         log.info("Only found {} indexed keys but need {}. Doing a full scan on files {}",
-            splits.size(), requestedNumSplits, filePaths);
-        splits = getSplitsFromFullScan(siteConf, hadoopConf, filePaths, fs, requestedNumSplits,
-            encode, cryptoService);
+            splits.size(), requestedNumSplits, files);
+        splits = getSplitsFromFullScan(siteConf, hadoopConf, files, fs, requestedNumSplits, encode,
+            cryptoService);
       }
     } else {
-      splits =
-          getSplitsBySize(siteConf, hadoopConf, filePaths, fs, splitSize, encode, cryptoService);
+      splits = getSplitsBySize(siteConf, hadoopConf, files, fs, splitSize, encode, cryptoService);
     }
 
     TreeSet<String> desiredSplits;
@@ -182,20 +182,20 @@ public class GenerateSplits implements KeywordExecutable {
     }
   }
 
-  private List<Path> getFiles(FileSystem fs, Path path) throws IOException {
-    List<Path> filePaths = new ArrayList<>();
+  private List<UnreferencedTabletFile> getFiles(FileSystem fs, Path path) throws IOException {
+    List<UnreferencedTabletFile> files = new ArrayList<>();
     if (fs.getFileStatus(path).isDirectory()) {
       var iter = fs.listFiles(path, true);
       while (iter.hasNext()) {
-        filePaths.addAll(getFiles(fs, iter.next().getPath()));
+        files.addAll(getFiles(fs, iter.next().getPath()));
       }
     } else {
       if (!path.toString().endsWith(".rf")) {
         throw new IllegalArgumentException("Provided file (" + path + ") does not end with '.rf'");
       }
-      filePaths.add(path);
+      files.add(UnreferencedTabletFile.of(fs, path));
     }
-    return filePaths;
+    return files;
   }
 
   private Text[] getQuantiles(SortedKeyValueIterator<Key,Value> iterator, int numSplits)
@@ -267,16 +267,15 @@ public class GenerateSplits implements KeywordExecutable {
    * Scan the files for indexed keys first since it is more efficient than a full file scan.
    */
   private TreeSet<String> getIndexKeys(AccumuloConfiguration accumuloConf, Configuration hadoopConf,
-      FileSystem fs, List<Path> files, int requestedNumSplits, boolean base64encode,
-      CryptoService cs) throws IOException {
+      FileSystem fs, List<UnreferencedTabletFile> files, int requestedNumSplits,
+      boolean base64encode, CryptoService cs) throws IOException {
     Text[] splitArray;
     List<SortedKeyValueIterator<Key,Value>> readers = new ArrayList<>(files.size());
     List<FileSKVIterator> fileReaders = new ArrayList<>(files.size());
     try {
-      for (Path file : files) {
+      for (UnreferencedTabletFile file : files) {
         FileSKVIterator reader = FileOperations.getInstance().newIndexReaderBuilder()
-            .forFile(file.toString(), fs, hadoopConf, cs).withTableConfiguration(accumuloConf)
-            .build();
+            .forFile(file, fs, hadoopConf, cs).withTableConfiguration(accumuloConf).build();
         readers.add(reader);
         fileReaders.add(reader);
       }
@@ -294,7 +293,7 @@ public class GenerateSplits implements KeywordExecutable {
   }
 
   private TreeSet<String> getSplitsFromFullScan(SiteConfiguration accumuloConf,
-      Configuration hadoopConf, List<Path> files, FileSystem fs, int numSplits,
+      Configuration hadoopConf, List<UnreferencedTabletFile> files, FileSystem fs, int numSplits,
       boolean base64encode, CryptoService cs) throws IOException {
     Text[] splitArray;
     List<FileSKVIterator> fileReaders = new ArrayList<>(files.size());
@@ -302,9 +301,9 @@ public class GenerateSplits implements KeywordExecutable {
     SortedKeyValueIterator<Key,Value> iterator;
 
     try {
-      for (Path file : files) {
+      for (UnreferencedTabletFile file : files) {
         FileSKVIterator reader = FileOperations.getInstance().newScanReaderBuilder()
-            .forFile(file.toString(), fs, hadoopConf, cs).withTableConfiguration(accumuloConf)
+            .forFile(file, fs, hadoopConf, cs).withTableConfiguration(accumuloConf)
             .overRange(new Range(), Set.of(), false).build();
         readers.add(reader);
         fileReaders.add(reader);
@@ -327,7 +326,7 @@ public class GenerateSplits implements KeywordExecutable {
    * Get number of splits based on requested size of split.
    */
   private TreeSet<String> getSplitsBySize(AccumuloConfiguration accumuloConf,
-      Configuration hadoopConf, List<Path> files, FileSystem fs, long splitSize,
+      Configuration hadoopConf, List<UnreferencedTabletFile> files, FileSystem fs, long splitSize,
       boolean base64encode, CryptoService cs) throws IOException {
     long currentSplitSize = 0;
     long totalSize = 0;
@@ -336,9 +335,9 @@ public class GenerateSplits implements KeywordExecutable {
     List<SortedKeyValueIterator<Key,Value>> readers = new ArrayList<>(files.size());
     SortedKeyValueIterator<Key,Value> iterator;
     try {
-      for (Path file : files) {
+      for (UnreferencedTabletFile file : files) {
         FileSKVIterator reader = FileOperations.getInstance().newScanReaderBuilder()
-            .forFile(file.toString(), fs, hadoopConf, cs).withTableConfiguration(accumuloConf)
+            .forFile(file, fs, hadoopConf, cs).withTableConfiguration(accumuloConf)
             .overRange(new Range(), Set.of(), false).build();
         readers.add(reader);
         fileReaders.add(reader);
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/RFileOperations.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/RFileOperations.java
index 609c19550e..ac037d66e7 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/RFileOperations.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/RFileOperations.java
@@ -34,13 +34,13 @@ import org.apache.accumulo.core.file.FileSKVIterator;
 import org.apache.accumulo.core.file.FileSKVWriter;
 import org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile.CachableBuilder;
 import org.apache.accumulo.core.file.rfile.bcfile.BCFile;
+import org.apache.accumulo.core.metadata.AbstractTabletFile;
 import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl;
 import org.apache.accumulo.core.sample.impl.SamplerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -55,7 +55,7 @@ public class RFileOperations extends FileOperations {
 
   private static RFile.Reader getReader(FileOptions options) throws IOException {
     CachableBuilder cb = new CachableBuilder()
-        .fsPath(options.getFileSystem(), new Path(options.getFilename()), options.dropCacheBehind)
+        .fsPath(options.getFileSystem(), options.getFile().getPath(), options.dropCacheBehind)
         .conf(options.getConfiguration()).fileLen(options.getFileLenCache())
         .cacheProvider(options.cacheProvider).readLimiter(options.getRateLimiter())
         .cryptoService(options.getCryptoService());
@@ -64,7 +64,7 @@ public class RFileOperations extends FileOperations {
 
   @Override
   protected long getFileSize(FileOptions options) throws IOException {
-    return options.getFileSystem().getFileStatus(new Path(options.getFilename())).getLen();
+    return options.getFileSystem().getFileStatus(options.getFile().getPath()).getLen();
   }
 
   @Override
@@ -133,25 +133,25 @@ public class RFileOperations extends FileOperations {
       }
       int bufferSize = conf.getInt("io.file.buffer.size", 4096);
 
-      String file = options.getFilename();
+      AbstractTabletFile<?> file = options.getFile();
       FileSystem fs = options.getFileSystem();
 
       if (options.dropCacheBehind) {
         EnumSet<CreateFlag> set = EnumSet.of(CreateFlag.SYNC_BLOCK, CreateFlag.CREATE);
-        outputStream = fs.create(new Path(file), FsPermission.getDefault(), set, bufferSize,
+        outputStream = fs.create(file.getPath(), FsPermission.getDefault(), set, bufferSize,
             (short) rep, block, null);
         try {
           // Tell the DataNode that the file does not need to be cached in the OS page cache
           outputStream.setDropBehind(Boolean.TRUE);
-          LOG.trace("Called setDropBehind(TRUE) for stream writing file {}", options.filename);
+          LOG.trace("Called setDropBehind(TRUE) for stream writing file {}", options.file);
         } catch (UnsupportedOperationException e) {
-          LOG.debug("setDropBehind not enabled for file: {}", options.filename);
+          LOG.debug("setDropBehind not enabled for file: {}", options.file);
         } catch (IOException e) {
-          LOG.debug("IOException setting drop behind for file: {}, msg: {}", options.filename,
+          LOG.debug("IOException setting drop behind for file: {}, msg: {}", options.file,
               e.getMessage());
         }
       } else {
-        outputStream = fs.create(new Path(file), false, bufferSize, (short) rep, block);
+        outputStream = fs.create(file.getPath(), false, bufferSize, (short) rep, block);
       }
     }
 
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/AbstractTabletFile.java b/core/src/main/java/org/apache/accumulo/core/metadata/AbstractTabletFile.java
new file mode 100644
index 0000000000..657c6d0c08
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/AbstractTabletFile.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.core.metadata;
+
+import java.util.Objects;
+
+import org.apache.hadoop.fs.Path;
+
+/**
+ * A base class used to represent file references that are handled by code that processes tablet
+ * files.
+ *
+ * @since 3.0.0
+ */
+public abstract class AbstractTabletFile<T extends AbstractTabletFile<T>> implements Comparable<T> {
+
+  private final String fileName; // C0004.rf
+  protected final Path path;
+
+  protected AbstractTabletFile(Path path) {
+    this.path = Objects.requireNonNull(path);
+    this.fileName = path.getName();
+    ValidationUtil.validateFileName(fileName);
+  }
+
+  /**
+   * @return The file name of the TabletFile
+   */
+  public String getFileName() {
+    return fileName;
+  }
+
+  /**
+   * @return The path of the TabletFile
+   */
+  public Path getPath() {
+    return path;
+  }
+
+}
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/TabletFile.java b/core/src/main/java/org/apache/accumulo/core/metadata/TabletFile.java
index de974ca687..c0a32c6671 100644
--- a/core/src/main/java/org/apache/accumulo/core/metadata/TabletFile.java
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/TabletFile.java
@@ -40,11 +40,9 @@ import com.google.common.base.Preconditions;
  * As of 2.1, Tablet file paths should now be only absolute URIs with the removal of relative paths
  * in Upgrader9to10.upgradeRelativePaths()
  */
-public class TabletFile implements Comparable<TabletFile> {
+public class TabletFile extends AbstractTabletFile<TabletFile> {
   // parts of an absolute URI, like "hdfs://1.2.3.4/accumulo/tables/2a/t-0003/C0004.rf"
   private final TabletDirectory tabletDir; // hdfs://1.2.3.4/accumulo/tables/2a/t-0003
-  private final String fileName; // C0004.rf
-  protected final Path metaPath;
   private final String normalizedPath;
 
   private static final Logger log = LoggerFactory.getLogger(TabletFile.class);
@@ -54,14 +52,11 @@ public class TabletFile implements Comparable<TabletFile> {
    * qualify an absolute path or create a new file.
    */
   public TabletFile(Path metaPath) {
-    this.metaPath = Objects.requireNonNull(metaPath);
+    super(Objects.requireNonNull(metaPath));
     String errorMsg = "Missing or invalid part of tablet file metadata entry: " + metaPath;
     log.trace("Parsing TabletFile from {}", metaPath);
 
     // use Path object to step backwards from the filename through all the parts
-    this.fileName = metaPath.getName();
-    ValidationUtil.validateFileName(fileName);
-
     Path tabletDirPath = Objects.requireNonNull(metaPath.getParent(), errorMsg);
 
     Path tableIdPath = Objects.requireNonNull(tabletDirPath.getParent(), errorMsg);
@@ -76,7 +71,7 @@ public class TabletFile implements Comparable<TabletFile> {
     var volume = volumePath.toString();
 
     this.tabletDir = new TabletDirectory(volume, TableId.of(id), tabletDirPath.getName());
-    this.normalizedPath = tabletDir.getNormalizedPath() + "/" + fileName;
+    this.normalizedPath = tabletDir.getNormalizedPath() + "/" + getFileName();
   }
 
   public String getVolume() {
@@ -91,10 +86,6 @@ public class TabletFile implements Comparable<TabletFile> {
     return tabletDir.getTabletDir();
   }
 
-  public String getFileName() {
-    return fileName;
-  }
-
   /**
    * Return a string for opening and reading the tablet file. Doesn't have to be exact string in
    * metadata.
@@ -124,10 +115,6 @@ public class TabletFile implements Comparable<TabletFile> {
     return new StoredTabletFile(normalizedPath);
   }
 
-  public Path getPath() {
-    return metaPath;
-  }
-
   @Override
   public int compareTo(TabletFile o) {
     if (equals(o)) {
@@ -155,4 +142,9 @@ public class TabletFile implements Comparable<TabletFile> {
   public String toString() {
     return normalizedPath;
   }
+
+  public static TabletFile of(final Path path) {
+    return new TabletFile(path);
+  }
+
 }
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/UnreferencedTabletFile.java b/core/src/main/java/org/apache/accumulo/core/metadata/UnreferencedTabletFile.java
new file mode 100644
index 0000000000..fe7ec5b00a
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/UnreferencedTabletFile.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.accumulo.core.metadata;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Objects;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+/**
+ * A file that is not intended to be added to a tablet as a reference, within the scope of the code
+ * using this class, but needs to be passed to code that processes tablet files. These files could
+ * be temp files or files directly created by a user for bulk import. The file may ultimately be
+ * added to a tablet later as a new file reference, but within a different scope (process, thread,
+ * code block, method, etc.) that uses a different class to represent the file in that scope.
+ *
+ * Unlike {@link TabletFile}, this class does not perform any validation or normalization on the
+ * provided path.
+ *
+ * @since 3.0.0
+ */
+public class UnreferencedTabletFile extends AbstractTabletFile<UnreferencedTabletFile> {
+
+  public UnreferencedTabletFile(FileSystem fs, Path path) {
+    super(Objects.requireNonNull(fs).makeQualified(Objects.requireNonNull(path)));
+  }
+
+  @Override
+  public int compareTo(UnreferencedTabletFile o) {
+    if (equals(o)) {
+      return 0;
+    } else {
+      return path.compareTo(o.path);
+    }
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (obj instanceof UnreferencedTabletFile) {
+      UnreferencedTabletFile that = (UnreferencedTabletFile) obj;
+      return path.equals(that.path);
+    }
+    return false;
+  }
+
+  @Override
+  public int hashCode() {
+    return path.hashCode();
+  }
+
+  @Override
+  public String toString() {
+    return path.toString();
+  }
+
+  public static UnreferencedTabletFile of(FileSystem fs, File file) {
+    return new UnreferencedTabletFile(fs, new Path(Objects.requireNonNull(file).toString()));
+  }
+
+  public static UnreferencedTabletFile of(FileSystem fs, Path path) {
+    return new UnreferencedTabletFile(fs, path);
+  }
+
+  public static UnreferencedTabletFile of(Configuration conf, Path path) throws IOException {
+    return new UnreferencedTabletFile(Objects.requireNonNull(path).getFileSystem(conf), path);
+  }
+
+}
diff --git a/core/src/test/java/org/apache/accumulo/core/client/rfile/RFileClientTest.java b/core/src/test/java/org/apache/accumulo/core/client/rfile/RFileClientTest.java
index ec07de4ac0..378e036b51 100644
--- a/core/src/test/java/org/apache/accumulo/core/client/rfile/RFileClientTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/client/rfile/RFileClientTest.java
@@ -62,6 +62,7 @@ import org.apache.accumulo.core.file.FileOperations;
 import org.apache.accumulo.core.file.FileSKVIterator;
 import org.apache.accumulo.core.file.rfile.RFile.Reader;
 import org.apache.accumulo.core.iterators.user.RegExFilter;
+import org.apache.accumulo.core.metadata.UnreferencedTabletFile;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.spi.crypto.NoCryptoServiceFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -810,9 +811,11 @@ public class RFileClientTest {
     }
   }
 
+  @SuppressFBWarnings(value = "PATH_TRAVERSAL_IN", justification = "path is set by test, not user")
   private Reader getReader(LocalFileSystem localFs, String testFile) throws IOException {
     return (Reader) FileOperations.getInstance().newReaderBuilder()
-        .forFile(testFile, localFs, localFs.getConf(), NoCryptoServiceFactory.NONE)
+        .forFile(UnreferencedTabletFile.of(localFs, new File(testFile)), localFs, localFs.getConf(),
+            NoCryptoServiceFactory.NONE)
         .withTableConfiguration(DefaultConfiguration.getInstance()).build();
   }
 
diff --git a/core/src/test/java/org/apache/accumulo/core/file/BloomFilterLayerLookupTest.java b/core/src/test/java/org/apache/accumulo/core/file/BloomFilterLayerLookupTest.java
index 1d0b5ef539..89384ec21e 100644
--- a/core/src/test/java/org/apache/accumulo/core/file/BloomFilterLayerLookupTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/file/BloomFilterLayerLookupTest.java
@@ -37,9 +37,11 @@ import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.file.keyfunctor.ColumnFamilyFunctor;
 import org.apache.accumulo.core.file.rfile.RFile;
+import org.apache.accumulo.core.metadata.UnreferencedTabletFile;
 import org.apache.accumulo.core.spi.crypto.NoCryptoServiceFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.io.TempDir;
@@ -81,8 +83,9 @@ public class BloomFilterLayerLookupTest extends WithTestNames {
     String suffix = FileOperations.getNewFileExtension(acuconf);
     String fname = new File(tempDir, testName() + "." + suffix).getAbsolutePath();
     FileSKVWriter bmfw = FileOperations.getInstance().newWriterBuilder()
-        .forFile(fname, fs, conf, NoCryptoServiceFactory.NONE).withTableConfiguration(acuconf)
-        .build();
+        .forFile(UnreferencedTabletFile.of(fs, new Path(fname)), fs, conf,
+            NoCryptoServiceFactory.NONE)
+        .withTableConfiguration(acuconf).build();
 
     // write data to file
     long t1 = System.currentTimeMillis();
@@ -99,8 +102,9 @@ public class BloomFilterLayerLookupTest extends WithTestNames {
 
     t1 = System.currentTimeMillis();
     FileSKVIterator bmfr = FileOperations.getInstance().newReaderBuilder()
-        .forFile(fname, fs, conf, NoCryptoServiceFactory.NONE).withTableConfiguration(acuconf)
-        .build();
+        .forFile(UnreferencedTabletFile.of(fs, new Path(fname)), fs, conf,
+            NoCryptoServiceFactory.NONE)
+        .withTableConfiguration(acuconf).build();
     t2 = System.currentTimeMillis();
     log.debug("Opened {} in {}", fname, (t2 - t1));
 
diff --git a/core/src/test/java/org/apache/accumulo/core/file/FileOperationsTest.java b/core/src/test/java/org/apache/accumulo/core/file/FileOperationsTest.java
index b4df990c2c..b85004a900 100644
--- a/core/src/test/java/org/apache/accumulo/core/file/FileOperationsTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/file/FileOperationsTest.java
@@ -26,6 +26,7 @@ import java.io.IOException;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.DefaultConfiguration;
 import org.apache.accumulo.core.file.rfile.RFile;
+import org.apache.accumulo.core.metadata.UnreferencedTabletFile;
 import org.apache.accumulo.core.spi.crypto.NoCryptoServiceFactory;
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
@@ -52,9 +53,9 @@ public class FileOperationsTest {
       Configuration conf = new Configuration();
       FileSystem fs = FileSystem.getLocal(conf);
       AccumuloConfiguration acuconf = DefaultConfiguration.getInstance();
-      writer =
-          fileOperations.newWriterBuilder().forFile(filename, fs, conf, NoCryptoServiceFactory.NONE)
-              .withTableConfiguration(acuconf).build();
+      writer = fileOperations.newWriterBuilder()
+          .forFile(UnreferencedTabletFile.of(fs, testFile), fs, conf, NoCryptoServiceFactory.NONE)
+          .withTableConfiguration(acuconf).build();
       writer.close();
     } catch (Exception ex) {
       caughtException = true;
diff --git a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/AccumuloFileOutputFormatIT.java b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/AccumuloFileOutputFormatIT.java
index bcebf005bd..5bff999211 100644
--- a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/AccumuloFileOutputFormatIT.java
+++ b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapred/AccumuloFileOutputFormatIT.java
@@ -37,6 +37,7 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.file.FileOperations;
 import org.apache.accumulo.core.file.FileSKVIterator;
+import org.apache.accumulo.core.metadata.UnreferencedTabletFile;
 import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.spi.crypto.NoCryptoServiceFactory;
@@ -197,8 +198,9 @@ public class AccumuloFileOutputFormatIT extends AccumuloClusterHarness {
 
       Configuration conf = cluster.getServerContext().getHadoopConf();
       DefaultConfiguration acuconf = DefaultConfiguration.getInstance();
+      FileSystem fileSystem = FileSystem.getLocal(conf);
       FileSKVIterator sample = FileOperations.getInstance().newReaderBuilder()
-          .forFile(files[0].toString(), FileSystem.getLocal(conf), conf,
+          .forFile(UnreferencedTabletFile.of(fileSystem, files[0]), fileSystem, conf,
               NoCryptoServiceFactory.NONE)
           .withTableConfiguration(acuconf).build()
           .getSample(new SamplerConfigurationImpl(SAMPLER_CONFIG));
diff --git a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/AccumuloFileOutputFormatIT.java b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/AccumuloFileOutputFormatIT.java
index 287f41550b..43e1839da3 100644
--- a/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/AccumuloFileOutputFormatIT.java
+++ b/hadoop-mapreduce/src/test/java/org/apache/accumulo/hadoop/its/mapreduce/AccumuloFileOutputFormatIT.java
@@ -37,6 +37,7 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.file.FileSKVIterator;
 import org.apache.accumulo.core.file.rfile.RFileOperations;
+import org.apache.accumulo.core.metadata.UnreferencedTabletFile;
 import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.spi.crypto.NoCryptoServiceFactory;
@@ -212,9 +213,10 @@ public class AccumuloFileOutputFormatIT extends AccumuloClusterHarness {
 
       Configuration conf = cluster.getServerContext().getHadoopConf();
       DefaultConfiguration acuconf = DefaultConfiguration.getInstance();
+      FileSystem fs = FileSystem.getLocal(conf);
       FileSKVIterator sample = RFileOperations.getInstance().newReaderBuilder()
-          .forFile(files[0].toString(), FileSystem.getLocal(conf), conf,
-              NoCryptoServiceFactory.NONE)
+          .forFile(UnreferencedTabletFile.of(fs, new Path(files[0].toString())),
+              FileSystem.getLocal(conf), conf, NoCryptoServiceFactory.NONE)
           .withTableConfiguration(acuconf).build()
           .getSample(new SamplerConfigurationImpl(SAMPLER_CONFIG));
       assertNotNull(sample);
diff --git a/server/base/src/main/java/org/apache/accumulo/server/compaction/FileCompactor.java b/server/base/src/main/java/org/apache/accumulo/server/compaction/FileCompactor.java
index 2d4b1b7fd3..6b43e28c39 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/compaction/FileCompactor.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/compaction/FileCompactor.java
@@ -231,9 +231,9 @@ public class FileCompactor implements Callable<CompactionStats> {
           && ((isMinC && acuTableConf.getBoolean(Property.TABLE_MINC_OUTPUT_DROP_CACHE))
               || (!isMinC && acuTableConf.getBoolean(Property.TABLE_MAJC_OUTPUT_DROP_CACHE)));
 
-      WriterBuilder outBuilder = fileFactory.newWriterBuilder()
-          .forFile(outputFile.getMetaInsert(), ns, ns.getConf(), cryptoService)
-          .withTableConfiguration(acuTableConf).withRateLimiter(env.getWriteLimiter());
+      WriterBuilder outBuilder =
+          fileFactory.newWriterBuilder().forFile(outputFile, ns, ns.getConf(), cryptoService)
+              .withTableConfiguration(acuTableConf).withRateLimiter(env.getWriteLimiter());
       if (dropCacheBehindOutput) {
         outBuilder.dropCachesBehind();
       }
@@ -337,8 +337,7 @@ public class FileCompactor implements Callable<CompactionStats> {
         FileSystem fs = this.fs.getFileSystemByPath(dataFile.getPath());
         FileSKVIterator reader;
 
-        reader = fileFactory.newReaderBuilder()
-            .forFile(dataFile.getPathStr(), fs, fs.getConf(), cryptoService)
+        reader = fileFactory.newReaderBuilder().forFile(dataFile, fs, fs.getConf(), cryptoService)
             .withTableConfiguration(acuTableConf).withRateLimiter(env.getReadLimiter())
             .dropCachesBehind().build();
 
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/FileManager.java b/server/base/src/main/java/org/apache/accumulo/server/fs/FileManager.java
index 1ec2918b5b..7edaf97a1a 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/FileManager.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/FileManager.java
@@ -56,7 +56,6 @@ import org.apache.accumulo.server.problems.ProblemReportingIterator;
 import org.apache.accumulo.server.problems.ProblemReports;
 import org.apache.accumulo.server.problems.ProblemType;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -71,10 +70,10 @@ public class FileManager {
   private static class OpenReader implements Comparable<OpenReader> {
     long releaseTime;
     FileSKVIterator reader;
-    String fileName;
+    TabletFile file;
 
-    public OpenReader(String fileName, FileSKVIterator reader) {
-      this.fileName = fileName;
+    public OpenReader(TabletFile file, FileSKVIterator reader) {
+      this.file = file;
       this.reader = reader;
       this.releaseTime = System.currentTimeMillis();
     }
@@ -94,12 +93,12 @@ public class FileManager {
 
     @Override
     public int hashCode() {
-      return fileName.hashCode();
+      return file.hashCode();
     }
   }
 
-  private Map<String,List<OpenReader>> openFiles;
-  private HashMap<FileSKVIterator,String> reservedReaders;
+  private Map<TabletFile,List<OpenReader>> openFiles;
+  private HashMap<FileSKVIterator,TabletFile> reservedReaders;
 
   private Semaphore filePermits;
 
@@ -122,9 +121,9 @@ public class FileManager {
       // determine which files to close in a sync block, and then close the
       // files outside of the sync block
       synchronized (FileManager.this) {
-        Iterator<Entry<String,List<OpenReader>>> iter = openFiles.entrySet().iterator();
+        Iterator<Entry<TabletFile,List<OpenReader>>> iter = openFiles.entrySet().iterator();
         while (iter.hasNext()) {
-          Entry<String,List<OpenReader>> entry = iter.next();
+          Entry<TabletFile,List<OpenReader>> entry = iter.next();
           List<OpenReader> ofl = entry.getValue();
 
           for (Iterator<OpenReader> oflIter = ofl.iterator(); oflIter.hasNext();) {
@@ -173,7 +172,7 @@ public class FileManager {
         this.context.getConfiguration().getTimeInMillis(Property.TSERV_SLOW_FILEPERMIT_MILLIS);
   }
 
-  private static int countReaders(Map<String,List<OpenReader>> files) {
+  private static int countReaders(Map<TabletFile,List<OpenReader>> files) {
     int count = 0;
 
     for (List<OpenReader> list : files.values()) {
@@ -187,7 +186,7 @@ public class FileManager {
 
     ArrayList<OpenReader> openReaders = new ArrayList<>();
 
-    for (Entry<String,List<OpenReader>> entry : openFiles.entrySet()) {
+    for (Entry<TabletFile,List<OpenReader>> entry : openFiles.entrySet()) {
       openReaders.addAll(entry.getValue());
     }
 
@@ -198,13 +197,13 @@ public class FileManager {
     for (int i = 0; i < numToTake && i < openReaders.size(); i++) {
       OpenReader or = openReaders.get(i);
 
-      List<OpenReader> ofl = openFiles.get(or.fileName);
+      List<OpenReader> ofl = openFiles.get(or.file);
       if (!ofl.remove(or)) {
         throw new IllegalStateException("Failed to remove open reader that should have been there");
       }
 
       if (ofl.isEmpty()) {
-        openFiles.remove(or.fileName);
+        openFiles.remove(or.file);
       }
 
       ret.add(or.reader);
@@ -223,10 +222,10 @@ public class FileManager {
     }
   }
 
-  private List<String> takeOpenFiles(Collection<String> files,
-      Map<FileSKVIterator,String> readersReserved) {
-    List<String> filesToOpen = Collections.emptyList();
-    for (String file : files) {
+  private List<TabletFile> takeOpenFiles(Collection<TabletFile> files,
+      Map<FileSKVIterator,TabletFile> readersReserved) {
+    List<TabletFile> filesToOpen = Collections.emptyList();
+    for (TabletFile file : files) {
       List<OpenReader> ofl = openFiles.get(file);
       if (ofl != null && !ofl.isEmpty()) {
         OpenReader openReader = ofl.remove(ofl.size() - 1);
@@ -244,8 +243,9 @@ public class FileManager {
     return filesToOpen;
   }
 
-  private Map<FileSKVIterator,String> reserveReaders(KeyExtent tablet, Collection<String> files,
-      boolean continueOnFailure, CacheProvider cacheProvider) throws IOException {
+  private Map<FileSKVIterator,TabletFile> reserveReaders(KeyExtent tablet,
+      Collection<TabletFile> files, boolean continueOnFailure, CacheProvider cacheProvider)
+      throws IOException {
 
     if (!tablet.isMeta() && files.size() >= maxOpen) {
       throw new IllegalArgumentException("requested files exceeds max open");
@@ -255,9 +255,9 @@ public class FileManager {
       return Collections.emptyMap();
     }
 
-    List<String> filesToOpen = null;
+    List<TabletFile> filesToOpen = null;
     List<FileSKVIterator> filesToClose = Collections.emptyList();
-    Map<FileSKVIterator,String> readersReserved = new HashMap<>();
+    Map<FileSKVIterator,TabletFile> readersReserved = new HashMap<>();
 
     if (!tablet.isMeta()) {
       long start = System.currentTimeMillis();
@@ -297,24 +297,20 @@ public class FileManager {
     closeReaders(filesToClose);
 
     // open any files that need to be opened
-    for (String file : filesToOpen) {
+    for (TabletFile file : filesToOpen) {
       try {
-        if (!file.contains(":")) {
-          throw new IllegalArgumentException("Expected uri, got : " + file);
-        }
-        Path path = new Path(file);
-        FileSystem ns = context.getVolumeManager().getFileSystemByPath(path);
+        FileSystem ns = context.getVolumeManager().getFileSystemByPath(file.getPath());
         // log.debug("Opening "+file + " path " + path);
         var tableConf = context.getTableConfiguration(tablet.tableId());
         FileSKVIterator reader = FileOperations.getInstance().newReaderBuilder()
-            .forFile(path.toString(), ns, ns.getConf(), tableConf.getCryptoService())
+            .forFile(file, ns, ns.getConf(), tableConf.getCryptoService())
             .withTableConfiguration(tableConf).withCacheProvider(cacheProvider)
             .withFileLenCache(fileLenCache).build();
         readersReserved.put(reader, file);
       } catch (Exception e) {
 
         ProblemReports.getInstance(context)
-            .report(new ProblemReport(tablet.tableId(), ProblemType.FILE_READ, file, e));
+            .report(new ProblemReport(tablet.tableId(), ProblemType.FILE_READ, file.toString(), e));
 
         if (continueOnFailure) {
           // release the permit for the file that failed to open
@@ -366,10 +362,9 @@ public class FileManager {
       }
 
       for (FileSKVIterator reader : readers) {
-        String fileName = reservedReaders.remove(reader);
+        TabletFile file = reservedReaders.remove(reader);
         if (!sawIOException) {
-          openFiles.computeIfAbsent(fileName, k -> new ArrayList<>())
-              .add(new OpenReader(fileName, reader));
+          openFiles.computeIfAbsent(file, k -> new ArrayList<>()).add(new OpenReader(file, reader));
         }
       }
     }
@@ -391,10 +386,10 @@ public class FileManager {
     private ArrayList<FileDataSource> deepCopies;
     private boolean current = true;
     private IteratorEnvironment env;
-    private String file;
+    private TabletFile file;
     private AtomicBoolean iflag;
 
-    FileDataSource(String file, SortedKeyValueIterator<Key,Value> iter) {
+    FileDataSource(TabletFile file, SortedKeyValueIterator<Key,Value> iter) {
       this.file = file;
       this.iter = iter;
       this.deepCopies = new ArrayList<>();
@@ -481,7 +476,7 @@ public class FileManager {
       }
     }
 
-    private Map<FileSKVIterator,String> openFiles(List<String> files)
+    private Map<FileSKVIterator,TabletFile> openFiles(List<TabletFile> files)
         throws TooManyFilesException, IOException {
       // one tablet can not open more than maxOpen files, otherwise it could get stuck
       // forever waiting on itself to release files
@@ -493,7 +488,7 @@ public class FileManager {
                 + maxOpen + " tablet = " + tablet);
       }
 
-      Map<FileSKVIterator,String> newlyReservedReaders =
+      Map<FileSKVIterator,TabletFile> newlyReservedReaders =
           reserveReaders(tablet, files, continueOnFailure, cacheProvider);
 
       tabletReservedReaders.addAll(newlyReservedReaders.keySet());
@@ -503,17 +498,17 @@ public class FileManager {
     public synchronized List<InterruptibleIterator> openFiles(Map<TabletFile,DataFileValue> files,
         boolean detachable, SamplerConfigurationImpl samplerConfig) throws IOException {
 
-      Map<FileSKVIterator,String> newlyReservedReaders = openFiles(
-          files.keySet().stream().map(TabletFile::getPathStr).collect(Collectors.toList()));
+      Map<FileSKVIterator,TabletFile> newlyReservedReaders =
+          openFiles(new ArrayList<>(files.keySet()));
 
       ArrayList<InterruptibleIterator> iters = new ArrayList<>();
 
       boolean someIteratorsWillWrap =
           files.values().stream().anyMatch(DataFileValue::willWrapIterator);
 
-      for (Entry<FileSKVIterator,String> entry : newlyReservedReaders.entrySet()) {
+      for (Entry<FileSKVIterator,TabletFile> entry : newlyReservedReaders.entrySet()) {
         FileSKVIterator source = entry.getKey();
-        String filename = entry.getValue();
+        TabletFile file = entry.getValue();
         InterruptibleIterator iter;
 
         if (samplerConfig != null) {
@@ -523,12 +518,12 @@ public class FileManager {
           }
         }
 
-        iter = new ProblemReportingIterator(context, tablet.tableId(), filename, continueOnFailure,
-            detachable ? getSsi(filename, source) : source);
+        iter = new ProblemReportingIterator(context, tablet.tableId(), file.toString(),
+            continueOnFailure, detachable ? getSsi(file, source) : source);
 
         if (someIteratorsWillWrap) {
           // constructing FileRef is expensive so avoid if not needed
-          DataFileValue value = files.get(new TabletFile(new Path(filename)));
+          DataFileValue value = files.get(file);
           iter = value.wrapFileIterator(iter);
         }
 
@@ -538,8 +533,8 @@ public class FileManager {
       return iters;
     }
 
-    private SourceSwitchingIterator getSsi(String filename, FileSKVIterator source) {
-      FileDataSource fds = new FileDataSource(filename, source);
+    private SourceSwitchingIterator getSsi(TabletFile file, FileSKVIterator source) {
+      FileDataSource fds = new FileDataSource(file, source);
       dataSources.add(fds);
       return new SourceSwitchingIterator(fds);
     }
@@ -559,11 +554,11 @@ public class FileManager {
         throw new IllegalStateException();
       }
 
-      List<String> files = dataSources.stream().map(x -> x.file).collect(Collectors.toList());
-      Map<FileSKVIterator,String> newlyReservedReaders = openFiles(files);
-      Map<String,List<FileSKVIterator>> map = new HashMap<>();
+      List<TabletFile> files = dataSources.stream().map(x -> x.file).collect(Collectors.toList());
+      Map<FileSKVIterator,TabletFile> newlyReservedReaders = openFiles(files);
+      Map<TabletFile,List<FileSKVIterator>> map = new HashMap<>();
       newlyReservedReaders.forEach(
-          (reader, fileName) -> map.computeIfAbsent(fileName, k -> new LinkedList<>()).add(reader));
+          (reader, file) -> map.computeIfAbsent(file, k -> new LinkedList<>()).add(reader));
 
       for (FileDataSource fds : dataSources) {
         FileSKVIterator source = map.get(fds.file).remove(0);
diff --git a/server/base/src/main/java/org/apache/accumulo/server/init/FileSystemInitializer.java b/server/base/src/main/java/org/apache/accumulo/server/init/FileSystemInitializer.java
index daed8281b8..60cca559e7 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/init/FileSystemInitializer.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/init/FileSystemInitializer.java
@@ -42,6 +42,7 @@ import org.apache.accumulo.core.file.FileOperations;
 import org.apache.accumulo.core.file.FileSKVWriter;
 import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.TabletFile;
 import org.apache.accumulo.core.metadata.schema.DataFileValue;
 import org.apache.accumulo.core.metadata.schema.MetadataSchema;
 import org.apache.accumulo.core.metadata.schema.MetadataTime;
@@ -164,12 +165,13 @@ class FileSystemInitializer {
     for (Tablet tablet : tablets) {
       createEntriesForTablet(sorted, tablet);
     }
-    FileSystem fs = volmanager.getFileSystemByPath(new Path(fileName));
+    TabletFile file = TabletFile.of(new Path(fileName));
+    FileSystem fs = volmanager.getFileSystemByPath(file.getPath());
 
     CryptoService cs = CryptoFactoryLoader.getServiceForServer(conf);
 
     FileSKVWriter tabletWriter = FileOperations.getInstance().newWriterBuilder()
-        .forFile(fileName, fs, fs.getConf(), cs).withTableConfiguration(conf).build();
+        .forFile(file, fs, fs.getConf(), cs).withTableConfiguration(conf).build();
     tabletWriter.startDefaultLocalityGroup();
 
     for (Map.Entry<Key,Value> entry : sorted.entrySet()) {
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java
index ba24668948..3f16286df6 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java
@@ -44,7 +44,9 @@ import org.apache.accumulo.core.file.rfile.RFile;
 import org.apache.accumulo.core.file.rfile.RFileOperations;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 import org.apache.accumulo.core.iteratorsImpl.system.MultiIterator;
+import org.apache.accumulo.core.metadata.AbstractTabletFile;
 import org.apache.accumulo.core.metadata.TabletFile;
+import org.apache.accumulo.core.metadata.UnreferencedTabletFile;
 import org.apache.accumulo.server.ServerContext;
 import org.apache.accumulo.server.conf.TableConfiguration;
 import org.apache.accumulo.server.fs.VolumeManager;
@@ -110,35 +112,37 @@ public class FileUtil {
     return result;
   }
 
-  public static Collection<String> reduceFiles(ServerContext context, TableConfiguration tableConf,
-      Text prevEndRow, Text endRow, Collection<String> dataFiles, int maxFiles, Path tmpDir,
-      int pass) throws IOException {
+  public static Collection<AbstractTabletFile<?>> reduceFiles(ServerContext context,
+      TableConfiguration tableConf, Text prevEndRow, Text endRow,
+      Collection<? extends AbstractTabletFile<?>> dataFiles, int maxFiles, Path tmpDir, int pass)
+      throws IOException {
 
-    ArrayList<String> paths = new ArrayList<>(dataFiles);
+    ArrayList<AbstractTabletFile<?>> files = new ArrayList<>(dataFiles);
 
-    if (paths.size() <= maxFiles) {
-      return paths;
+    if (files.size() <= maxFiles) {
+      return files;
     }
 
     String newDir = String.format("%s/pass_%04d", tmpDir, pass);
 
     int start = 0;
 
-    ArrayList<String> outFiles = new ArrayList<>();
+    ArrayList<AbstractTabletFile<?>> outFiles = new ArrayList<>();
 
     int count = 0;
 
-    while (start < paths.size()) {
-      int end = Math.min(maxFiles + start, paths.size());
-      List<String> inFiles = paths.subList(start, end);
+    while (start < files.size()) {
+      int end = Math.min(maxFiles + start, files.size());
+      List<AbstractTabletFile<?>> inFiles = files.subList(start, end);
 
       start = end;
 
       // temporary tablet file does not conform to typical path verified in TabletFile
-      String newDataFile = String.format("%s/%04d.%s", newDir, count++, RFile.EXTENSION);
+      Path newPath = new Path(String.format("%s/%04d.%s", newDir, count++, RFile.EXTENSION));
+      FileSystem ns = context.getVolumeManager().getFileSystemByPath(newPath);
+      UnreferencedTabletFile newDataFile = UnreferencedTabletFile.of(ns, newPath);
 
       outFiles.add(newDataFile);
-      FileSystem ns = context.getVolumeManager().getFileSystemByPath(new Path(newDataFile));
       FileSKVWriter writer = new RFileOperations().newWriterBuilder()
           .forFile(newDataFile, ns, ns.getConf(), tableConf.getCryptoService())
           .withTableConfiguration(tableConf).build();
@@ -147,8 +151,8 @@ public class FileUtil {
 
       FileSKVIterator reader = null;
       try {
-        for (String file : inFiles) {
-          ns = context.getVolumeManager().getFileSystemByPath(new Path(file));
+        for (AbstractTabletFile<?> file : inFiles) {
+          ns = context.getVolumeManager().getFileSystemByPath(file.getPath());
           reader = FileOperations.getInstance().newIndexReaderBuilder()
               .forFile(file, ns, ns.getConf(), tableConf.getCryptoService())
               .withTableConfiguration(tableConf).build();
@@ -207,8 +211,8 @@ public class FileUtil {
   }
 
   public static double estimatePercentageLTE(ServerContext context, TableConfiguration tableConf,
-      String tabletDir, Text prevEndRow, Text endRow, Collection<String> dataFiles, Text splitRow)
-      throws IOException {
+      String tabletDir, Text prevEndRow, Text endRow,
+      Collection<? extends AbstractTabletFile<?>> dataFiles, Text splitRow) throws IOException {
 
     Path tmpDir = null;
 
@@ -285,9 +289,10 @@ public class FileUtil {
    */
   public static SortedMap<Double,Key> findMidPoint(ServerContext context,
       TableConfiguration tableConf, String tabletDirectory, Text prevEndRow, Text endRow,
-      Collection<String> dataFiles, double minSplit, boolean useIndex) throws IOException {
+      Collection<? extends AbstractTabletFile<?>> dataFiles, double minSplit, boolean useIndex)
+      throws IOException {
 
-    Collection<String> origDataFiles = dataFiles;
+    Collection<? extends AbstractTabletFile<?>> origDataFiles = dataFiles;
 
     Path tmpDir = null;
 
@@ -425,23 +430,22 @@ public class FileUtil {
   }
 
   private static long countIndexEntries(ServerContext context, TableConfiguration tableConf,
-      Text prevEndRow, Text endRow, Collection<String> dataFiles, boolean useIndex,
-      ArrayList<FileSKVIterator> readers) throws IOException {
+      Text prevEndRow, Text endRow, Collection<? extends AbstractTabletFile<?>> dataFiles,
+      boolean useIndex, ArrayList<FileSKVIterator> readers) throws IOException {
     long numKeys = 0;
 
     // count the total number of index entries
-    for (String file : dataFiles) {
+    for (AbstractTabletFile<?> file : dataFiles) {
       FileSKVIterator reader = null;
-      Path path = new Path(file);
-      FileSystem ns = context.getVolumeManager().getFileSystemByPath(path);
+      FileSystem ns = context.getVolumeManager().getFileSystemByPath(file.getPath());
       try {
         if (useIndex) {
           reader = FileOperations.getInstance().newIndexReaderBuilder()
-              .forFile(path.toString(), ns, ns.getConf(), tableConf.getCryptoService())
+              .forFile(file, ns, ns.getConf(), tableConf.getCryptoService())
               .withTableConfiguration(tableConf).build();
         } else {
           reader = FileOperations.getInstance().newScanReaderBuilder()
-              .forFile(path.toString(), ns, ns.getConf(), tableConf.getCryptoService())
+              .forFile(file, ns, ns.getConf(), tableConf.getCryptoService())
               .withTableConfiguration(tableConf)
               .overRange(new Range(prevEndRow, false, null, true), Set.of(), false).build();
         }
@@ -468,11 +472,11 @@ public class FileUtil {
 
       if (useIndex) {
         readers.add(FileOperations.getInstance().newIndexReaderBuilder()
-            .forFile(path.toString(), ns, ns.getConf(), tableConf.getCryptoService())
+            .forFile(file, ns, ns.getConf(), tableConf.getCryptoService())
             .withTableConfiguration(tableConf).build());
       } else {
         readers.add(FileOperations.getInstance().newScanReaderBuilder()
-            .forFile(path.toString(), ns, ns.getConf(), tableConf.getCryptoService())
+            .forFile(file, ns, ns.getConf(), tableConf.getCryptoService())
             .withTableConfiguration(tableConf)
             .overRange(new Range(prevEndRow, false, null, true), Set.of(), false).build());
       }
@@ -494,7 +498,7 @@ public class FileUtil {
       FileSystem ns = context.getVolumeManager().getFileSystemByPath(dataFile.getPath());
       try {
         reader = FileOperations.getInstance().newReaderBuilder()
-            .forFile(dataFile.getPathStr(), ns, ns.getConf(), tableConf.getCryptoService())
+            .forFile(dataFile, ns, ns.getConf(), tableConf.getCryptoService())
             .withTableConfiguration(tableConf).build();
 
         Key firstKey = reader.getFirstKey();
@@ -532,7 +536,7 @@ public class FileUtil {
     for (TabletFile file : dataFiles) {
       FileSystem ns = context.getVolumeManager().getFileSystemByPath(file.getPath());
       FileSKVIterator reader = FileOperations.getInstance().newReaderBuilder()
-          .forFile(file.getPathStr(), ns, ns.getConf(), tableConf.getCryptoService())
+          .forFile(file, ns, ns.getConf(), tableConf.getCryptoService())
           .withTableConfiguration(tableConf).seekToBeginning().build();
 
       try {
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/InMemoryMap.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/InMemoryMap.java
index 5a4fc0393f..2479c4a791 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/InMemoryMap.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/InMemoryMap.java
@@ -63,6 +63,7 @@ import org.apache.accumulo.core.iteratorsImpl.system.LocalityGroupIterator.Local
 import org.apache.accumulo.core.iteratorsImpl.system.SortedMapIterator;
 import org.apache.accumulo.core.iteratorsImpl.system.SourceSwitchingIterator;
 import org.apache.accumulo.core.iteratorsImpl.system.SourceSwitchingIterator.DataSource;
+import org.apache.accumulo.core.metadata.UnreferencedTabletFile;
 import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl;
 import org.apache.accumulo.core.sample.impl.SamplerFactory;
 import org.apache.accumulo.core.util.LocalityGroupUtil;
@@ -602,7 +603,8 @@ public class InMemoryMap {
 
         TableConfiguration tableConf = context.getTableConfiguration(tableId);
         reader = new RFileOperations().newReaderBuilder()
-            .forFile(memDumpFile, fs, conf, tableConf.getCryptoService())
+            .forFile(UnreferencedTabletFile.of(fs, new Path(memDumpFile)), fs, conf,
+                tableConf.getCryptoService())
             .withTableConfiguration(tableConf).seekToBeginning().build();
         if (iflag != null) {
           reader.setInterruptFlag(iflag);
@@ -783,7 +785,8 @@ public class InMemoryMap {
 
         TableConfiguration tableConf = context.getTableConfiguration(tableId);
         FileSKVWriter out = new RFileOperations().newWriterBuilder()
-            .forFile(tmpFile, fs, newConf, tableConf.getCryptoService())
+            .forFile(UnreferencedTabletFile.of(fs, new Path(tmpFile)), fs, newConf,
+                tableConf.getCryptoService())
             .withTableConfiguration(aconf).build();
 
         InterruptibleIterator iter = map.skvIterator(null);
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/LogSorter.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/LogSorter.java
index 263e7fe265..66da7828ff 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/LogSorter.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/LogSorter.java
@@ -39,6 +39,7 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.file.FileOperations;
 import org.apache.accumulo.core.manager.thrift.RecoveryStatus;
+import org.apache.accumulo.core.metadata.UnreferencedTabletFile;
 import org.apache.accumulo.core.spi.crypto.CryptoEnvironment;
 import org.apache.accumulo.core.spi.crypto.CryptoService;
 import org.apache.accumulo.core.util.Pair;
@@ -278,7 +279,7 @@ public class LogSorter {
     }
 
     try (var writer = FileOperations.getInstance().newWriterBuilder()
-        .forFile(fullPath.toString(), fs, fs.getConf(), cryptoService)
+        .forFile(UnreferencedTabletFile.of(fs, fullPath), fs, fs.getConf(), cryptoService)
         .withTableConfiguration(sortedLogConf).build()) {
       writer.startDefaultLocalityGroup();
       for (var entry : keyListMap.entrySet()) {
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/RecoveryLogsIterator.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/RecoveryLogsIterator.java
index 469a128125..f5b97361d0 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/RecoveryLogsIterator.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/RecoveryLogsIterator.java
@@ -36,6 +36,7 @@ import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.file.FileOperations;
 import org.apache.accumulo.core.file.FileSKVIterator;
 import org.apache.accumulo.core.iterators.IteratorAdapter;
+import org.apache.accumulo.core.metadata.UnreferencedTabletFile;
 import org.apache.accumulo.core.spi.crypto.CryptoEnvironment;
 import org.apache.accumulo.core.spi.crypto.CryptoService;
 import org.apache.accumulo.server.ServerContext;
@@ -80,7 +81,7 @@ public class RecoveryLogsIterator
 
     for (Path logDir : recoveryLogDirs) {
       LOG.debug("Opening recovery log dir {}", logDir.getName());
-      SortedSet<Path> logFiles = getFiles(vm, logDir);
+      SortedSet<UnreferencedTabletFile> logFiles = getFiles(vm, logDir);
       var fs = vm.getFileSystemByPath(logDir);
 
       // only check the first key once to prevent extra iterator creation and seeking
@@ -88,9 +89,9 @@ public class RecoveryLogsIterator
         validateFirstKey(context, cryptoService, fs, logFiles, logDir);
       }
 
-      for (Path log : logFiles) {
+      for (UnreferencedTabletFile log : logFiles) {
         FileSKVIterator fileIter = FileOperations.getInstance().newReaderBuilder()
-            .forFile(log.toString(), fs, fs.getConf(), cryptoService)
+            .forFile(log, fs, fs.getConf(), cryptoService)
             .withTableConfiguration(context.getConfiguration()).seekToBeginning().build();
         if (range != null) {
           fileIter.seek(range, Collections.emptySet(), false);
@@ -98,11 +99,13 @@ public class RecoveryLogsIterator
         Iterator<Entry<Key,Value>> scanIter = new IteratorAdapter(fileIter);
 
         if (scanIter.hasNext()) {
-          LOG.debug("Write ahead log {} has data in range {} {}", log.getName(), start, end);
+          LOG.debug("Write ahead log {} has data in range {} {}", log.getPath().getName(), start,
+              end);
           iterators.add(scanIter);
           fileIters.add(fileIter);
         } else {
-          LOG.debug("Write ahead log {} has no data in range {} {}", log.getName(), start, end);
+          LOG.debug("Write ahead log {} has no data in range {} {}", log.getPath().getName(), start,
+              end);
           fileIter.close();
         }
       }
@@ -137,12 +140,14 @@ public class RecoveryLogsIterator
   /**
    * Check for sorting signal files (finished/failed) and get the logs in the provided directory.
    */
-  private SortedSet<Path> getFiles(VolumeManager fs, Path directory) throws IOException {
+  private SortedSet<UnreferencedTabletFile> getFiles(VolumeManager fs, Path directory)
+      throws IOException {
     boolean foundFinish = false;
     // Path::getName compares the last component of each Path value. In this case, the last
     // component should
     // always have the format 'part-r-XXXXX.rf', where XXXXX are one-up values.
-    SortedSet<Path> logFiles = new TreeSet<>(Comparator.comparing(Path::getName));
+    SortedSet<UnreferencedTabletFile> logFiles =
+        new TreeSet<>(Comparator.comparing(tf -> tf.getPath().getName()));
     for (FileStatus child : fs.listStatus(directory)) {
       if (child.getPath().getName().startsWith("_")) {
         continue;
@@ -155,7 +160,8 @@ public class RecoveryLogsIterator
         continue;
       }
       FileSystem ns = fs.getFileSystemByPath(child.getPath());
-      Path fullLogPath = ns.makeQualified(child.getPath());
+      UnreferencedTabletFile fullLogPath =
+          UnreferencedTabletFile.of(ns, ns.makeQualified(child.getPath()));
       logFiles.add(fullLogPath);
     }
     if (!foundFinish) {
@@ -169,9 +175,9 @@ public class RecoveryLogsIterator
    * Check that the first entry in the WAL is OPEN. Only need to do this once.
    */
   private void validateFirstKey(ServerContext context, CryptoService cs, FileSystem fs,
-      SortedSet<Path> logFiles, Path fullLogPath) throws IOException {
+      SortedSet<UnreferencedTabletFile> logFiles, Path fullLogPath) throws IOException {
     try (FileSKVIterator fileIter = FileOperations.getInstance().newReaderBuilder()
-        .forFile(logFiles.first().toString(), fs, fs.getConf(), cs)
+        .forFile(logFiles.first(), fs, fs.getConf(), cs)
         .withTableConfiguration(context.getConfiguration()).seekToBeginning().build()) {
       Iterator<Entry<Key,Value>> iterator = new IteratorAdapter(fileIter);
 
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/CompactableUtils.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/CompactableUtils.java
index 9a8fbf4cef..d79a4b81c0 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/CompactableUtils.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/CompactableUtils.java
@@ -98,7 +98,7 @@ public class CompactableUtils {
     for (StoredTabletFile file : allFiles) {
       FileSystem ns = fs.getFileSystemByPath(file.getPath());
       try (FileSKVIterator openReader =
-          fileFactory.newReaderBuilder().forFile(file.getPathStr(), ns, ns.getConf(), cs)
+          fileFactory.newReaderBuilder().forFile(file, ns, ns.getConf(), cs)
               .withTableConfiguration(tableConf).seekToBeginning().build()) {
         Key first = openReader.getFirstKey();
         Key last = openReader.getLastKey();
@@ -283,7 +283,7 @@ public class CompactableUtils {
           FileSystem ns = tablet.getTabletServer().getVolumeManager().getFileSystemByPath(path);
           var tableConf = tablet.getTableConfiguration();
           var fiter = fileFactory.newReaderBuilder()
-              .forFile(path.toString(), ns, ns.getConf(), tableConf.getCryptoService())
+              .forFile(TabletFile.of(path), ns, ns.getConf(), tableConf.getCryptoService())
               .withTableConfiguration(tableConf).seekToBeginning().build();
           return Optional.ofNullable(fiter.getSample(new SamplerConfigurationImpl(sc)));
         } catch (IOException e) {
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java
index 82cd9d3ca7..e82418264a 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java
@@ -1376,9 +1376,8 @@ public class Tablet extends TabletBase {
     // Only want one thread doing this computation at time for a tablet.
     if (splitComputationLock.tryLock()) {
       try {
-        SortedMap<Double,Key> midpoint =
-            FileUtil.findMidPoint(context, tableConfiguration, chooseTabletDir(),
-                extent.prevEndRow(), extent.endRow(), FileUtil.toPathStrings(files), .25, true);
+        SortedMap<Double,Key> midpoint = FileUtil.findMidPoint(context, tableConfiguration,
+            chooseTabletDir(), extent.prevEndRow(), extent.endRow(), files, .25, true);
 
         Text lastRow = null;
 
@@ -1528,9 +1527,8 @@ public class Tablet extends TabletBase {
         splitPoint = findSplitRow(splitComputations);
       } else {
         Text tsp = new Text(sp);
-        var fileStrings = FileUtil.toPathStrings(getDatafileManager().getFiles());
         var ratio = FileUtil.estimatePercentageLTE(context, tableConfiguration, chooseTabletDir(),
-            extent.prevEndRow(), extent.endRow(), fileStrings, tsp);
+            extent.prevEndRow(), extent.endRow(), getDatafileManager().getFiles(), tsp);
         splitPoint = new SplitRowSpec(ratio, tsp);
       }
 
diff --git a/test/src/main/java/org/apache/accumulo/test/CountNameNodeOpsBulkIT.java b/test/src/main/java/org/apache/accumulo/test/CountNameNodeOpsBulkIT.java
index b8454769b3..4efdc52d22 100644
--- a/test/src/main/java/org/apache/accumulo/test/CountNameNodeOpsBulkIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/CountNameNodeOpsBulkIT.java
@@ -44,6 +44,7 @@ import org.apache.accumulo.core.file.FileOperations;
 import org.apache.accumulo.core.file.FileSKVWriter;
 import org.apache.accumulo.core.file.rfile.RFile;
 import org.apache.accumulo.core.manager.thrift.ManagerMonitorInfo;
+import org.apache.accumulo.core.metadata.UnreferencedTabletFile;
 import org.apache.accumulo.core.spi.crypto.NoCryptoServiceFactory;
 import org.apache.accumulo.minicluster.ServerType;
 import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
@@ -132,8 +133,10 @@ public class CountNameNodeOpsBulkIT extends ConfigurableMacBase {
           fs.mkdirs(files);
           for (int i1 = 0; i1 < 100; i1++) {
             FileSKVWriter writer = FileOperations.getInstance().newWriterBuilder()
-                .forFile(files + "/bulk_" + i1 + "." + RFile.EXTENSION, fs, fs.getConf(),
-                    NoCryptoServiceFactory.NONE)
+                .forFile(
+                    UnreferencedTabletFile.of(fs,
+                        new Path(files + "/bulk_" + i1 + "." + RFile.EXTENSION)),
+                    fs, fs.getConf(), NoCryptoServiceFactory.NONE)
                 .withTableConfiguration(DefaultConfiguration.getInstance()).build();
             writer.startDefaultLocalityGroup();
             for (int j = 0x100; j < 0xfff; j += 3) {
diff --git a/test/src/main/java/org/apache/accumulo/test/GenerateSequentialRFile.java b/test/src/main/java/org/apache/accumulo/test/GenerateSequentialRFile.java
index e3769f7b73..b63f555762 100644
--- a/test/src/main/java/org/apache/accumulo/test/GenerateSequentialRFile.java
+++ b/test/src/main/java/org/apache/accumulo/test/GenerateSequentialRFile.java
@@ -24,6 +24,7 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.file.FileOperations;
 import org.apache.accumulo.core.file.FileSKVWriter;
+import org.apache.accumulo.core.metadata.UnreferencedTabletFile;
 import org.apache.accumulo.core.spi.crypto.NoCryptoServiceFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -58,10 +59,10 @@ public class GenerateSequentialRFile implements Runnable {
   public void run() {
     try {
       final Configuration conf = new Configuration();
-      Path p = new Path(opts.filePath);
-      final FileSystem fs = p.getFileSystem(conf);
+      final FileSystem fs = FileSystem.get(conf);
+      UnreferencedTabletFile file = UnreferencedTabletFile.of(fs, new Path(opts.filePath));
       FileSKVWriter writer = FileOperations.getInstance().newWriterBuilder()
-          .forFile(opts.filePath, fs, conf, NoCryptoServiceFactory.NONE)
+          .forFile(file, fs, conf, NoCryptoServiceFactory.NONE)
           .withTableConfiguration(DefaultConfiguration.getInstance()).build();
 
       writer.startDefaultLocalityGroup();
diff --git a/test/src/main/java/org/apache/accumulo/test/TestIngest.java b/test/src/main/java/org/apache/accumulo/test/TestIngest.java
index fd0d795b1a..99551e411c 100644
--- a/test/src/main/java/org/apache/accumulo/test/TestIngest.java
+++ b/test/src/main/java/org/apache/accumulo/test/TestIngest.java
@@ -49,11 +49,13 @@ import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.file.FileOperations;
 import org.apache.accumulo.core.file.FileSKVWriter;
 import org.apache.accumulo.core.file.rfile.RFile;
+import org.apache.accumulo.core.metadata.UnreferencedTabletFile;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.ColumnVisibility;
 import org.apache.accumulo.core.spi.crypto.NoCryptoServiceFactory;
 import org.apache.accumulo.core.util.FastFormat;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 
 import com.beust.jcommander.Parameter;
@@ -269,8 +271,9 @@ public class TestIngest {
     if (params.outputFile != null) {
       ClientContext cc = (ClientContext) accumuloClient;
       writer = FileOperations.getInstance().newWriterBuilder()
-          .forFile(params.outputFile + "." + RFile.EXTENSION, fs, cc.getHadoopConf(),
-              NoCryptoServiceFactory.NONE)
+          .forFile(
+              UnreferencedTabletFile.of(fs, new Path(params.outputFile + "." + RFile.EXTENSION)),
+              fs, cc.getHadoopConf(), NoCryptoServiceFactory.NONE)
           .withTableConfiguration(DefaultConfiguration.getInstance()).build();
       writer.startDefaultLocalityGroup();
     } else {
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/BulkNewIT.java b/test/src/main/java/org/apache/accumulo/test/functional/BulkNewIT.java
index 5a9fcf5758..0993e45863 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/BulkNewIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/BulkNewIT.java
@@ -61,6 +61,7 @@ import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.file.FileOperations;
 import org.apache.accumulo.core.file.FileSKVWriter;
 import org.apache.accumulo.core.file.rfile.RFile;
+import org.apache.accumulo.core.metadata.UnreferencedTabletFile;
 import org.apache.accumulo.core.metadata.schema.TabletMetadata;
 import org.apache.accumulo.core.metadata.schema.TabletsMetadata;
 import org.apache.accumulo.core.security.Authorizations;
@@ -590,7 +591,8 @@ public class BulkNewIT extends SharedMiniClusterBase {
     FileSystem fs = getCluster().getFileSystem();
     String filename = file + RFile.EXTENSION;
     try (FileSKVWriter writer = FileOperations.getInstance().newWriterBuilder()
-        .forFile(filename, fs, fs.getConf(), NoCryptoServiceFactory.NONE)
+        .forFile(UnreferencedTabletFile.of(fs, new Path(filename)), fs, fs.getConf(),
+            NoCryptoServiceFactory.NONE)
         .withTableConfiguration(aconf).build()) {
       writer.startDefaultLocalityGroup();
       for (int i = s; i <= e; i++) {
diff --git a/test/src/main/java/org/apache/accumulo/test/performance/scan/CollectTabletStats.java b/test/src/main/java/org/apache/accumulo/test/performance/scan/CollectTabletStats.java
index d22508299d..7104f17dbc 100644
--- a/test/src/main/java/org/apache/accumulo/test/performance/scan/CollectTabletStats.java
+++ b/test/src/main/java/org/apache/accumulo/test/performance/scan/CollectTabletStats.java
@@ -464,7 +464,7 @@ public class CollectTabletStats {
     for (TabletFile file : files) {
       FileSystem ns = fs.getFileSystemByPath(file.getPath());
       FileSKVIterator reader = FileOperations.getInstance().newReaderBuilder()
-          .forFile(file.getPathStr(), ns, ns.getConf(), NoCryptoServiceFactory.NONE)
+          .forFile(file, ns, ns.getConf(), NoCryptoServiceFactory.NONE)
           .withTableConfiguration(aconf).build();
       Range range = new Range(ke.prevEndRow(), false, ke.endRow(), true);
       reader.seek(range, columnSet, !columnSet.isEmpty());
@@ -497,7 +497,7 @@ public class CollectTabletStats {
     for (TabletFile file : files) {
       FileSystem ns = fs.getFileSystemByPath(file.getPath());
       readers.add(FileOperations.getInstance().newReaderBuilder()
-          .forFile(file.getPathStr(), ns, ns.getConf(), NoCryptoServiceFactory.NONE)
+          .forFile(file, ns, ns.getConf(), NoCryptoServiceFactory.NONE)
           .withTableConfiguration(context.getConfiguration()).build());
     }
 
diff --git a/test/src/main/java/org/apache/accumulo/test/shell/ShellServerIT.java b/test/src/main/java/org/apache/accumulo/test/shell/ShellServerIT.java
index 95769b9d89..f1348d4d9e 100644
--- a/test/src/main/java/org/apache/accumulo/test/shell/ShellServerIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/shell/ShellServerIT.java
@@ -68,6 +68,7 @@ import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.file.FileOperations;
 import org.apache.accumulo.core.file.FileSKVWriter;
 import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.UnreferencedTabletFile;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.NamespacePermission;
 import org.apache.accumulo.core.spi.crypto.NoCryptoServiceFactory;
@@ -1313,10 +1314,14 @@ public class ShellServerIT extends SharedMiniClusterBase {
     String odd = new File(importDir, "odd.rf").toString();
     AccumuloConfiguration aconf = DefaultConfiguration.getInstance();
     FileSKVWriter evenWriter = FileOperations.getInstance().newWriterBuilder()
-        .forFile(even, fs, conf, NoCryptoServiceFactory.NONE).withTableConfiguration(aconf).build();
+        .forFile(UnreferencedTabletFile.of(fs, new Path(even)), fs, conf,
+            NoCryptoServiceFactory.NONE)
+        .withTableConfiguration(aconf).build();
     evenWriter.startDefaultLocalityGroup();
     FileSKVWriter oddWriter = FileOperations.getInstance().newWriterBuilder()
-        .forFile(odd, fs, conf, NoCryptoServiceFactory.NONE).withTableConfiguration(aconf).build();
+        .forFile(UnreferencedTabletFile.of(fs, new Path(odd)), fs, conf,
+            NoCryptoServiceFactory.NONE)
+        .withTableConfiguration(aconf).build();
     oddWriter.startDefaultLocalityGroup();
     long timestamp = System.currentTimeMillis();
     Text cf = new Text("cf");