You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by si...@apache.org on 2019/04/15 20:24:52 UTC

[lucene-solr] branch branch_8x updated: LUCENE-8671: Introduce Reader attributes (#640)

This is an automated email from the ASF dual-hosted git repository.

simonw pushed a commit to branch branch_8x
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git


The following commit(s) were added to refs/heads/branch_8x by this push:
     new 651f41e  LUCENE-8671: Introduce Reader attributes (#640)
651f41e is described below

commit 651f41e21bd3df98f70d2673295db29506e3d2e6
Author: Simon Willnauer <si...@apache.org>
AuthorDate: Mon Apr 15 20:39:36 2019 +0200

    LUCENE-8671: Introduce Reader attributes (#640)
    
    Reader attributes allows a per IndexReader configuration of codec internals.
    For instance this allows a per reader configuration if FSTs are loaded into memory or are left
    on disk.
---
 lucene/CHANGES.txt                                 |  4 ++
 .../codecs/blocktree/BlockTreeTermsReader.java     | 57 +++++++++++++++++++--
 .../lucene/codecs/blocktree/FieldReader.java       |  3 +-
 .../codecs/lucene50/Lucene50PostingsFormat.java    | 53 ++------------------
 .../lucene/codecs/lucene80/Lucene80Codec.java      | 11 ++--
 .../java/org/apache/lucene/index/CheckIndex.java   |  5 +-
 .../apache/lucene/index/DefaultIndexingChain.java  |  3 +-
 .../org/apache/lucene/index/DirectoryReader.java   | 29 ++++++++++-
 .../java/org/apache/lucene/index/IndexWriter.java  |  4 +-
 .../org/apache/lucene/index/IndexWriterConfig.java | 14 ++++++
 .../apache/lucene/index/LiveIndexWriterConfig.java | 13 +++++
 .../java/org/apache/lucene/index/ReaderPool.java   |  8 +--
 .../org/apache/lucene/index/ReadersAndUpdates.java | 17 +++++--
 .../apache/lucene/index/SegmentCoreReaders.java    |  5 +-
 .../org/apache/lucene/index/SegmentDocValues.java  |  3 +-
 .../org/apache/lucene/index/SegmentMerger.java     |  3 +-
 .../org/apache/lucene/index/SegmentReadState.java  | 18 +++++--
 .../org/apache/lucene/index/SegmentReader.java     |  5 +-
 .../lucene/index/StandardDirectoryReader.java      | 23 +++++----
 .../codecs/lucene50/TestBlockPostingsFormat.java   | 58 ++++++++++++++--------
 ...tLucene50StoredFieldsFormatHighCompression.java | 10 ++--
 .../test/org/apache/lucene/index/TestCodecs.java   |  4 +-
 .../lucene/index/TestDemoParallelLeafReader.java   |  2 +-
 .../src/test/org/apache/lucene/index/TestDoc.java  |  6 +--
 .../apache/lucene/index/TestDocumentWriter.java    |  9 ++--
 .../index/TestIndexWriterThreadsToSegments.java    |  3 +-
 .../org/apache/lucene/index/TestReaderPool.java    | 12 ++---
 .../org/apache/lucene/index/TestSegmentMerger.java |  6 +--
 .../org/apache/lucene/index/TestSegmentReader.java |  3 +-
 .../apache/lucene/index/TestSegmentTermDocs.java   |  7 +--
 .../nrt/SegmentInfosSearcherManager.java           |  5 +-
 .../codecs/cheapbastard/CheapBastardCodec.java     |  4 +-
 .../mockrandom/MockRandomPostingsFormat.java       |  4 +-
 .../lucene/index/BaseIndexFileFormatTestCase.java  |  2 +-
 .../java/org/apache/lucene/index/RandomCodec.java  |  4 +-
 .../apache/lucene/index/RandomPostingsTester.java  |  2 +-
 .../util/TestRuleSetupAndRestoreClassEnv.java      |  5 +-
 .../src/java/org/apache/lucene/util/TestUtil.java  |  5 +-
 .../org/apache/solr/core/SchemaCodecFactory.java   |  3 +-
 39 files changed, 270 insertions(+), 162 deletions(-)

diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 26848fb..ddd7e1f 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -108,6 +108,10 @@ Improvements
   JapaneseTokenizer so that the analyzer handles the era name correctly.
   Reiwa is set to replace the Heisei Era on May 1, 2019. (Tomoko Uchida)
 
+* LUCENE-8671: Introduced reader attributes allows a per IndexReader configuration
+  of codec internals. This enables a per reader configuration if FSTs are on- or off-heap on a
+  per field basis (Simon Willnauer)
+
 Changes in Runtime Behavior
 
 * LUCENE-8671: Load FST off-heap also for ID-like fields if reader is not opened
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsReader.java
index eb341a0..0e9fe6b 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsReader.java
@@ -19,16 +19,17 @@ package org.apache.lucene.codecs.blocktree;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
 import java.util.TreeMap;
 
 import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.codecs.FieldsProducer;
 import org.apache.lucene.codecs.PostingsReaderBase;
-import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat;
 import org.apache.lucene.index.CorruptIndexException;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.IndexFileNames;
@@ -75,6 +76,39 @@ import org.apache.lucene.util.fst.Outputs;
 
 public final class BlockTreeTermsReader extends FieldsProducer {
 
+  /**
+   * An enum that allows to control if term index FSTs are loaded into memory or read off-heap
+   */
+  public enum FSTLoadMode {
+    /**
+     * Always read FSTs from disk.
+     * NOTE: If this option is used the FST will be read off-heap even if buffered directory implementations
+     * are used.
+     */
+    OFF_HEAP,
+    /**
+     * Never read FSTs from disk ie. all fields FSTs are loaded into memory
+     */
+    ON_HEAP,
+    /**
+     * Always read FSTs from disk.
+     * An exception is made for ID fields in an IndexWriter context which are always loaded into memory.
+     * This is useful to guarantee best update performance even if a non MMapDirectory is used.
+     * NOTE: If this option is used the FST will be read off-heap even if buffered directory implementations
+     * are used.
+     * See {@link FSTLoadMode#AUTO}
+     */
+    OPTIMIZE_UPDATES_OFF_HEAP,
+    /**
+     * Automatically make the decision if FSTs are read from disk depending if the segment read from an MMAPDirectory
+     * An exception is made for ID fields in an IndexWriter context which are always loaded into memory.
+     */
+    AUTO
+  }
+
+  /** Attribute key for fst mode. */
+  public static final String FST_MODE_KEY = "blocktree.terms.fst";
+
   static final Outputs<BytesRef> FST_OUTPUTS = ByteSequenceOutputs.getSingleton();
   
   static final BytesRef NO_OUTPUT = FST_OUTPUTS.getNoOutput();
@@ -119,7 +153,7 @@ public final class BlockTreeTermsReader extends FieldsProducer {
   final int version;
 
   /** Sole constructor. */
-  public BlockTreeTermsReader(PostingsReaderBase postingsReader, SegmentReadState state, Lucene50PostingsFormat.FSTLoadMode fstLoadMode) throws IOException {
+  public BlockTreeTermsReader(PostingsReaderBase postingsReader, SegmentReadState state, FSTLoadMode defaultLoadMode) throws IOException {
     boolean success = false;
     
     this.postingsReader = postingsReader;
@@ -156,6 +190,7 @@ public final class BlockTreeTermsReader extends FieldsProducer {
       seekDir(termsIn);
       seekDir(indexIn);
 
+      final FSTLoadMode fstLoadMode = getLoadMode(state.readerAttributes, FST_MODE_KEY, defaultLoadMode);
       final int numFields = termsIn.readVInt();
       if (numFields < 0) {
         throw new CorruptIndexException("invalid numFields: " + numFields, termsIn);
@@ -190,10 +225,11 @@ public final class BlockTreeTermsReader extends FieldsProducer {
         if (sumTotalTermFreq < sumDocFreq) { // #positions must be >= #postings
           throw new CorruptIndexException("invalid sumTotalTermFreq: " + sumTotalTermFreq + " sumDocFreq: " + sumDocFreq, termsIn);
         }
+        final FSTLoadMode perFieldLoadMode = getLoadMode(state.readerAttributes, FST_MODE_KEY + "." + fieldInfo.name, fstLoadMode);
         final long indexStartFP = indexIn.readVLong();
-        FieldReader previous = fields.put(fieldInfo.name,       
+        FieldReader previous = fields.put(fieldInfo.name,
                                           new FieldReader(this, fieldInfo, numTerms, rootCode, sumTotalTermFreq, sumDocFreq, docCount,
-                                                          indexStartFP, longsSize, indexIn, minTerm, maxTerm, state.openedFromWriter, fstLoadMode));
+                                                          indexStartFP, longsSize, indexIn, minTerm, maxTerm, state.openedFromWriter, perFieldLoadMode));
         if (previous != null) {
           throw new CorruptIndexException("duplicate field: " + fieldInfo.name, termsIn);
         }
@@ -207,6 +243,19 @@ public final class BlockTreeTermsReader extends FieldsProducer {
     }
   }
 
+  private static FSTLoadMode getLoadMode(Map<String, String> attributes, String key, FSTLoadMode defaultValue) {
+    String value = attributes.get(key);
+    if (value == null) {
+      return defaultValue;
+    }
+    try {
+      return FSTLoadMode.valueOf(value);
+    } catch (IllegalArgumentException ex) {
+      throw new IllegalArgumentException("Invalid value for " + key + " expected one of: "
+          + Arrays.toString(FSTLoadMode.values()) + " but was: " + value, ex);
+    }
+  }
+
   private static BytesRef readBytesRef(IndexInput in) throws IOException {
     int numBytes = in.readVInt();
     if (numBytes < 0) {
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/FieldReader.java b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/FieldReader.java
index 94dd1dc..9189b63 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/FieldReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/FieldReader.java
@@ -20,7 +20,6 @@ import java.io.IOException;
 import java.util.Collection;
 import java.util.Collections;
 
-import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.Terms;
@@ -67,7 +66,7 @@ public final class FieldReader extends Terms implements Accountable {
   //private boolean DEBUG;
 
   FieldReader(BlockTreeTermsReader parent, FieldInfo fieldInfo, long numTerms, BytesRef rootCode, long sumTotalTermFreq, long sumDocFreq, int docCount,
-              long indexStartFP, int longsSize, IndexInput indexIn, BytesRef minTerm, BytesRef maxTerm, boolean openedFromWriter, Lucene50PostingsFormat.FSTLoadMode fstLoadMode) throws IOException {
+              long indexStartFP, int longsSize, IndexInput indexIn, BytesRef minTerm, BytesRef maxTerm, boolean openedFromWriter, BlockTreeTermsReader.FSTLoadMode fstLoadMode) throws IOException {
     assert numTerms > 0;
     this.fieldInfo = fieldInfo;
     //DEBUG = BlockTreeTermsReader.DEBUG && fieldInfo.name.equals("id");
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50PostingsFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50PostingsFormat.java
index 2839233..615909e 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50PostingsFormat.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50PostingsFormat.java
@@ -370,10 +370,7 @@ public final class Lucene50PostingsFormat extends PostingsFormat {
    */
   public static final String PAY_EXTENSION = "pay";
 
-  /** Attribute key for fst mode. */
-  static final String MODE_KEY = Lucene50PostingsFormat.class.getSimpleName() + ".fstMode";
-
-  /** 
+  /**
    * Expert: The maximum number of skip levels. Smaller values result in 
    * slightly smaller indexes, but slower skipping in big posting lists.
    */
@@ -391,37 +388,6 @@ public final class Lucene50PostingsFormat extends PostingsFormat {
 
   private final int minTermBlockSize;
   private final int maxTermBlockSize;
-  private final FSTLoadMode fstLoadMode;
-
-  /**
-   * An enum that allows to control if term index FSTs are loaded into memory or read off-heap
-   */
-  public enum FSTLoadMode {
-    /**
-     * Always read FSTs from disk.
-     * NOTE: If this option is used the FST will be read off-heap even if buffered directory implementations
-     * are used.
-     */
-    OFF_HEAP,
-    /**
-     * Never read FSTs from disk ie. all fields FSTs are loaded into memory
-     */
-    ON_HEAP,
-    /**
-     * Always read FSTs from disk.
-     * An exception is made for ID fields in an IndexWriter context which are always loaded into memory.
-     * This is useful to guarantee best update performance even if a non MMapDirectory is used.
-     * NOTE: If this option is used the FST will be read off-heap even if buffered directory implementations
-     * are used.
-     * See {@link FSTLoadMode#AUTO}
-     */
-    OPTIMIZE_UPDATES_OFF_HEAP,
-    /**
-     * Automatically make the decision if FSTs are read from disk depending if the segment read from an MMAPDirectory
-     * An exception is made for ID fields in an IndexWriter context which are always loaded into memory.
-     */
-    AUTO
-  }
 
   /**
    * Fixed packed block size, number of integers encoded in 
@@ -429,23 +395,24 @@ public final class Lucene50PostingsFormat extends PostingsFormat {
    */
   // NOTE: must be multiple of 64 because of PackedInts long-aligned encoding/decoding
   public final static int BLOCK_SIZE = 128;
+  private final BlockTreeTermsReader.FSTLoadMode fstLoadMode;
 
   /** Creates {@code Lucene50PostingsFormat} with default
    *  settings. */
   public Lucene50PostingsFormat() {
-    this(BlockTreeTermsWriter.DEFAULT_MIN_BLOCK_SIZE, BlockTreeTermsWriter.DEFAULT_MAX_BLOCK_SIZE, FSTLoadMode.AUTO);
+    this(BlockTreeTermsWriter.DEFAULT_MIN_BLOCK_SIZE, BlockTreeTermsWriter.DEFAULT_MAX_BLOCK_SIZE, BlockTreeTermsReader.FSTLoadMode.AUTO);
   }
 
   /** Creates {@code Lucene50PostingsFormat} with custom
    *  values for {@code minBlockSize} and {@code
    *  maxBlockSize} passed to block terms dictionary.
    *  @see BlockTreeTermsWriter#BlockTreeTermsWriter(SegmentWriteState,PostingsWriterBase,int,int) */
-  public Lucene50PostingsFormat(int minTermBlockSize, int maxTermBlockSize, FSTLoadMode fstLoadMode) {
+  public Lucene50PostingsFormat(int minTermBlockSize, int maxTermBlockSize, BlockTreeTermsReader.FSTLoadMode loadMode) {
     super("Lucene50");
     BlockTreeTermsWriter.validateSettings(minTermBlockSize, maxTermBlockSize);
     this.minTermBlockSize = minTermBlockSize;
     this.maxTermBlockSize = maxTermBlockSize;
-    this.fstLoadMode = fstLoadMode;
+    this.fstLoadMode = loadMode;
   }
 
   @Override
@@ -456,11 +423,6 @@ public final class Lucene50PostingsFormat extends PostingsFormat {
   @Override
   public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
     PostingsWriterBase postingsWriter = new Lucene50PostingsWriter(state);
-    if (state.segmentInfo.getAttribute(MODE_KEY) != null && fstLoadMode.name().equals(state.segmentInfo.getAttribute(MODE_KEY)) == false) {
-      throw new IllegalStateException("found existing value for " + MODE_KEY + " for segment: " + state.segmentInfo.name +
-          " old=" + state.segmentInfo.getAttribute(MODE_KEY) + ", new=" + fstLoadMode.name());
-    }
-    state.segmentInfo.putAttribute(MODE_KEY, fstLoadMode.name());
     boolean success = false;
     try {
       FieldsConsumer ret = new BlockTreeTermsWriter(state, 
@@ -479,11 +441,6 @@ public final class Lucene50PostingsFormat extends PostingsFormat {
   @Override
   public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException {
     PostingsReaderBase postingsReader = new Lucene50PostingsReader(state);
-    String fstLoadModeKey = state.segmentInfo.getAttribute(MODE_KEY);
-    FSTLoadMode fstLoadMode = FSTLoadMode.AUTO;
-    if (fstLoadModeKey != null) {
-      fstLoadMode = FSTLoadMode.valueOf(fstLoadModeKey);
-    }
     boolean success = false;
     try {
       FieldsProducer ret = new BlockTreeTermsReader(postingsReader, state, fstLoadMode);
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene80/Lucene80Codec.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene80/Lucene80Codec.java
index aa55f5f..9abb1eb 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene80/Lucene80Codec.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene80/Lucene80Codec.java
@@ -30,11 +30,9 @@ import org.apache.lucene.codecs.PostingsFormat;
 import org.apache.lucene.codecs.SegmentInfoFormat;
 import org.apache.lucene.codecs.StoredFieldsFormat;
 import org.apache.lucene.codecs.TermVectorsFormat;
-import org.apache.lucene.codecs.blocktree.BlockTreeTermsWriter;
 import org.apache.lucene.codecs.lucene50.Lucene50CompoundFormat;
 import org.apache.lucene.codecs.lucene50.Lucene50LiveDocsFormat;
 import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat;
-import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat.FSTLoadMode;
 import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat;
 import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode;
 import org.apache.lucene.codecs.lucene50.Lucene50TermVectorsFormat;
@@ -83,20 +81,19 @@ public class Lucene80Codec extends Codec {
    * Instantiates a new codec.
    */
   public Lucene80Codec() {
-    this(Mode.BEST_SPEED, FSTLoadMode.AUTO);
+    this(Mode.BEST_SPEED);
   }
   
   /** 
    * Instantiates a new codec, specifying the stored fields compression
    * mode to use.
-   * @param mode stored fields compression mode to use for newly 
+   * @param mode stored fields compression mode to use for newly
    *             flushed/merged segments.
    */
-  public Lucene80Codec(Mode mode, FSTLoadMode fstLoadMode) {
+  public Lucene80Codec(Mode mode) {
     super("Lucene80");
     this.storedFieldsFormat = new Lucene50StoredFieldsFormat(Objects.requireNonNull(mode));
-    this.defaultFormat = new Lucene50PostingsFormat(BlockTreeTermsWriter.DEFAULT_MIN_BLOCK_SIZE,
-        BlockTreeTermsWriter.DEFAULT_MAX_BLOCK_SIZE, Objects.requireNonNull(fstLoadMode));
+    this.defaultFormat = new Lucene50PostingsFormat();
   }
   
   @Override
diff --git a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
index fcda464..df61936 100644
--- a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
+++ b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
@@ -25,6 +25,7 @@ import java.nio.file.Paths;
 import java.text.NumberFormat;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
@@ -38,6 +39,7 @@ import org.apache.lucene.codecs.PointsReader;
 import org.apache.lucene.codecs.PostingsFormat;
 import org.apache.lucene.codecs.StoredFieldsReader;
 import org.apache.lucene.codecs.TermVectorsReader;
+import org.apache.lucene.codecs.blocktree.BlockTreeTermsReader;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.DocumentStoredFieldVisitor;
 import org.apache.lucene.index.CheckIndex.Status.DocValuesStatus;
@@ -673,7 +675,8 @@ public final class CheckIndex implements Closeable {
         long startOpenReaderNS = System.nanoTime();
         if (infoStream != null)
           infoStream.print("    test: open reader.........");
-        reader = new SegmentReader(info, sis.getIndexCreatedVersionMajor(), false, IOContext.DEFAULT);
+        reader = new SegmentReader(info, sis.getIndexCreatedVersionMajor(), false, IOContext.DEFAULT,
+            Collections.singletonMap(BlockTreeTermsReader.FST_MODE_KEY, BlockTreeTermsReader.FSTLoadMode.OFF_HEAP.name())); // lets keep stuff on disk for check-index
         msg(infoStream, String.format(Locale.ROOT, "OK [took %.3f sec]", nsToSec(System.nanoTime()-startOpenReaderNS)));
 
         segInfoStat.openReaderPassed = true;
diff --git a/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java b/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java
index f507157..12bfe07 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java
@@ -21,6 +21,7 @@ import java.io.Closeable;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -131,7 +132,7 @@ final class DefaultIndexingChain extends DocConsumer {
     if (docState.infoStream.isEnabled("IW")) {
       docState.infoStream.message("IW", ((System.nanoTime()-t0)/1000000) + " msec to write norms");
     }
-    SegmentReadState readState = new SegmentReadState(state.directory, state.segmentInfo, state.fieldInfos, true, IOContext.READ, state.segmentSuffix);
+    SegmentReadState readState = new SegmentReadState(state.directory, state.segmentInfo, state.fieldInfos, true, IOContext.READ, state.segmentSuffix, Collections.emptyMap());
     
     t0 = System.nanoTime();
     writeDocValues(state, sortMap);
diff --git a/lucene/core/src/java/org/apache/lucene/index/DirectoryReader.java b/lucene/core/src/java/org/apache/lucene/index/DirectoryReader.java
index 7f65d15..c7d52b4 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DirectoryReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DirectoryReader.java
@@ -23,6 +23,7 @@ import java.nio.file.NoSuchFileException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.lucene.search.SearcherManager; // javadocs
 import org.apache.lucene.store.Directory;
@@ -60,7 +61,19 @@ public abstract class DirectoryReader extends BaseCompositeReader<LeafReader> {
    * @throws IOException if there is a low-level IO error
    */
   public static DirectoryReader open(final Directory directory) throws IOException {
-    return StandardDirectoryReader.open(directory, null);
+    return open(directory, Collections.emptyMap());
+  }
+
+  /** Returns a IndexReader reading the index in the given
+   *  Directory
+   * @param directory the index directory
+   * @param readerAttributes the reader attributes passed to the {@link org.apache.lucene.codecs.Codec} layer of the
+   *                         directory reader. This attribute map is forwarded to all leaf readers as well as to the readers
+   *                         that are opened subsequently via the different flavors of {@link DirectoryReader#openIfChanged(DirectoryReader)}
+   * @throws IOException if there is a low-level IO error
+   */
+  public static DirectoryReader open(final Directory directory, final Map<String, String> readerAttributes) throws IOException {
+    return StandardDirectoryReader.open(directory, null, readerAttributes);
   }
   
   /**
@@ -109,7 +122,19 @@ public abstract class DirectoryReader extends BaseCompositeReader<LeafReader> {
    * @throws IOException if there is a low-level IO error
    */
   public static DirectoryReader open(final IndexCommit commit) throws IOException {
-    return StandardDirectoryReader.open(commit.getDirectory(), commit);
+    return open(commit, Collections.emptyMap());
+  }
+
+  /** Expert: returns an IndexReader reading the index in the given
+   *  {@link IndexCommit}.
+   * @param commit the commit point to open
+   * @param readerAttributes the reader attributes passed to the {@link org.apache.lucene.codecs.Codec} layer of the
+   *                         directory reader. This attribute map is forwarded to all leaf readers as well as to the readers
+   *                         that are opened subsequently via the different flavors of {@link DirectoryReader#openIfChanged(DirectoryReader)}
+   * @throws IOException if there is a low-level IO error
+   */
+  public static DirectoryReader open(final IndexCommit commit, Map<String, String> readerAttributes) throws IOException {
+    return StandardDirectoryReader.open(commit.getDirectory(), commit, readerAttributes);
   }
 
   /**
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
index 74405af..3b110ec 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
@@ -523,7 +523,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable,
             // reader; in theory we could instead do similar retry logic,
             // just like we do when loading segments_N
             
-            r = StandardDirectoryReader.open(this, segmentInfos, applyAllDeletes, writeAllDeletes);
+            r = StandardDirectoryReader.open(this, segmentInfos, applyAllDeletes, writeAllDeletes, config.getReaderAttributes());
             if (infoStream.isEnabled("IW")) {
               infoStream.message("IW", "return reader version=" + r.getVersion() + " reader=" + r);
             }
@@ -885,7 +885,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable,
           enableTestPoints, this::newSegmentName,
           config, directoryOrig, directory, globalFieldNumberMap);
       readerPool = new ReaderPool(directory, directoryOrig, segmentInfos, globalFieldNumberMap,
-          bufferedUpdatesStream::getCompletedDelGen, infoStream, conf.getSoftDeletesField(), reader);
+          bufferedUpdatesStream::getCompletedDelGen, infoStream, conf.getSoftDeletesField(), reader, config.getReaderAttributes());
       if (config.getReaderPooling()) {
         readerPool.enableReaderPooling();
       }
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java b/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java
index 2d9b5b1..67d61a6 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java
@@ -19,7 +19,11 @@ package org.apache.lucene.index;
 
 import java.io.PrintStream;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
 import java.util.stream.Collectors;
 
 import org.apache.lucene.analysis.Analyzer;
@@ -543,5 +547,15 @@ public final class IndexWriterConfig extends LiveIndexWriterConfig {
     this.softDeletesField = softDeletesField;
     return this;
   }
+
+  /**
+   * Sets the reader attributes used for all readers pulled from the IndexWriter. Reader attributes allow configuration
+   * of low-level aspects like ram utilization on a per-reader basis.
+   * Note: This method make a shallow copy of the provided map.
+   */
+  public IndexWriterConfig setReaderAttributes(Map<String, String> readerAttributes) {
+    this.readerAttributes = Collections.unmodifiableMap(new HashMap<>(Objects.requireNonNull(readerAttributes)));
+    return this;
+  }
   
 }
diff --git a/lucene/core/src/java/org/apache/lucene/index/LiveIndexWriterConfig.java b/lucene/core/src/java/org/apache/lucene/index/LiveIndexWriterConfig.java
index 259a020..fbd72ec 100644
--- a/lucene/core/src/java/org/apache/lucene/index/LiveIndexWriterConfig.java
+++ b/lucene/core/src/java/org/apache/lucene/index/LiveIndexWriterConfig.java
@@ -18,6 +18,7 @@ package org.apache.lucene.index;
 
 
 import java.util.Collections;
+import java.util.Map;
 import java.util.Set;
 
 import org.apache.lucene.analysis.Analyzer;
@@ -113,6 +114,10 @@ public class LiveIndexWriterConfig {
   /** soft deletes field */
   protected String softDeletesField = null;
 
+  /** the attributes for the NRT readers */
+  protected Map<String, String> readerAttributes = Collections.emptyMap();
+
+
   // used by IndexWriterConfig
   LiveIndexWriterConfig(Analyzer analyzer) {
     this.analyzer = analyzer;
@@ -499,6 +504,14 @@ public class LiveIndexWriterConfig {
     sb.append("indexSort=").append(getIndexSort()).append("\n");
     sb.append("checkPendingFlushOnUpdate=").append(isCheckPendingFlushOnUpdate()).append("\n");
     sb.append("softDeletesField=").append(getSoftDeletesField()).append("\n");
+    sb.append("readerAttributes=").append(getReaderAttributes()).append("\n");
     return sb.toString();
   }
+
+  /**
+   * Returns the reader attributes passed to all published readers opened on or within the IndexWriter
+   */
+  public Map<String, String> getReaderAttributes() {
+    return this.readerAttributes;
+  }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/index/ReaderPool.java b/lucene/core/src/java/org/apache/lucene/index/ReaderPool.java
index b792be2..859f899 100644
--- a/lucene/core/src/java/org/apache/lucene/index/ReaderPool.java
+++ b/lucene/core/src/java/org/apache/lucene/index/ReaderPool.java
@@ -54,6 +54,7 @@ final class ReaderPool implements Closeable {
   private final InfoStream infoStream;
   private final SegmentInfos segmentInfos;
   private final String softDeletesField;
+  private final Map<String, String> readerAttributes;
   // This is a "write once" variable (like the organic dye
   // on a DVD-R that may or may not be heated by a laser and
   // then cooled to permanently record the event): it's
@@ -71,7 +72,7 @@ final class ReaderPool implements Closeable {
 
   ReaderPool(Directory directory, Directory originalDirectory, SegmentInfos segmentInfos,
              FieldInfos.FieldNumbers fieldNumbers, LongSupplier completedDelGenSupplier, InfoStream infoStream,
-             String softDeletesField, StandardDirectoryReader reader) throws IOException {
+             String softDeletesField, StandardDirectoryReader reader, Map<String, String> readerAttributes) throws IOException {
     this.directory = directory;
     this.originalDirectory = originalDirectory;
     this.segmentInfos = segmentInfos;
@@ -79,6 +80,7 @@ final class ReaderPool implements Closeable {
     this.completedDelGenSupplier = completedDelGenSupplier;
     this.infoStream = infoStream;
     this.softDeletesField = softDeletesField;
+    this.readerAttributes = readerAttributes;
     if (reader != null) {
       // Pre-enroll all segment readers into the reader pool; this is necessary so
       // any in-memory NRT live docs are correctly carried over, and so NRT readers
@@ -91,7 +93,7 @@ final class ReaderPool implements Closeable {
         SegmentReader newReader = new SegmentReader(segmentInfos.info(i), segReader, segReader.getLiveDocs(),
             segReader.getHardLiveDocs(), segReader.numDocs(), true);
         readerMap.put(newReader.getOriginalSegmentInfo(), new ReadersAndUpdates(segmentInfos.getIndexCreatedVersionMajor(),
-            newReader, newPendingDeletes(newReader, newReader.getOriginalSegmentInfo())));
+            newReader, newPendingDeletes(newReader, newReader.getOriginalSegmentInfo()), readerAttributes));
       }
     }
   }
@@ -372,7 +374,7 @@ final class ReaderPool implements Closeable {
       if (create == false) {
         return null;
       }
-      rld = new ReadersAndUpdates(segmentInfos.getIndexCreatedVersionMajor(), info, newPendingDeletes(info));
+      rld = new ReadersAndUpdates(segmentInfos.getIndexCreatedVersionMajor(), info, newPendingDeletes(info), readerAttributes);
       // Steal initial reference:
       readerMap.put(info, rld);
     } else {
diff --git a/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java b/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java
index 0f47ed2..73f7d66 100644
--- a/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java
+++ b/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java
@@ -36,6 +36,7 @@ import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.DocValuesConsumer;
 import org.apache.lucene.codecs.DocValuesFormat;
 import org.apache.lucene.codecs.FieldInfosFormat;
+import org.apache.lucene.codecs.blocktree.BlockTreeTermsReader;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.FlushInfo;
 import org.apache.lucene.store.IOContext;
@@ -88,18 +89,22 @@ final class ReadersAndUpdates {
 
   final AtomicLong ramBytesUsed = new AtomicLong();
 
+  private final Map<String, String> readerAttributes;
+
   ReadersAndUpdates(int indexCreatedVersionMajor, SegmentCommitInfo info,
-                    PendingDeletes pendingDeletes) {
+                    PendingDeletes pendingDeletes, Map<String, String> readerAttributes) {
     this.info = info;
     this.pendingDeletes = pendingDeletes;
     this.indexCreatedVersionMajor = indexCreatedVersionMajor;
+    this.readerAttributes = readerAttributes;
   }
 
   /** Init from a previously opened SegmentReader.
    *
    * <p>NOTE: steals incoming ref from reader. */
-  ReadersAndUpdates(int indexCreatedVersionMajor, SegmentReader reader, PendingDeletes pendingDeletes) throws IOException {
-    this(indexCreatedVersionMajor, reader.getOriginalSegmentInfo(), pendingDeletes);
+  ReadersAndUpdates(int indexCreatedVersionMajor, SegmentReader reader, PendingDeletes pendingDeletes,
+                    Map<String, String> readerAttributes) throws IOException {
+    this(indexCreatedVersionMajor, reader.getOriginalSegmentInfo(), pendingDeletes, readerAttributes);
     this.reader = reader;
     pendingDeletes.onNewReader(reader, info);
   }
@@ -169,7 +174,7 @@ final class ReadersAndUpdates {
   public synchronized SegmentReader getReader(IOContext context) throws IOException {
     if (reader == null) {
       // We steal returned ref:
-      reader = new SegmentReader(info, indexCreatedVersionMajor, true, context);
+      reader = new SegmentReader(info, indexCreatedVersionMajor, true, context, readerAttributes);
       pendingDeletes.onNewReader(reader, info);
     }
 
@@ -536,7 +541,9 @@ final class ReadersAndUpdates {
       // IndexWriter.commitMergedDeletes).
       final SegmentReader reader;
       if (this.reader == null) {
-        reader = new SegmentReader(info, indexCreatedVersionMajor, true, IOContext.READONCE);
+        reader = new SegmentReader(info, indexCreatedVersionMajor, true, IOContext.READONCE,
+            // we don't need terms - lets leave them off-heap if possible
+            Collections.singletonMap(BlockTreeTermsReader.FST_MODE_KEY, BlockTreeTermsReader.FSTLoadMode.OFF_HEAP.name()));
         pendingDeletes.onNewReader(reader, info);
       } else {
         reader = this.reader;
diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentCoreReaders.java b/lucene/core/src/java/org/apache/lucene/index/SegmentCoreReaders.java
index 21e6c82..371abc6 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SegmentCoreReaders.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SegmentCoreReaders.java
@@ -24,6 +24,7 @@ import java.io.IOException;
 import java.nio.file.NoSuchFileException;
 import java.util.Collections;
 import java.util.LinkedHashSet;
+import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
 
@@ -89,7 +90,7 @@ final class SegmentCoreReaders {
   private final Set<IndexReader.ClosedListener> coreClosedListeners = 
       Collections.synchronizedSet(new LinkedHashSet<IndexReader.ClosedListener>());
   
-  SegmentCoreReaders(Directory dir, SegmentCommitInfo si, boolean openedFromWriter, IOContext context) throws IOException {
+  SegmentCoreReaders(Directory dir, SegmentCommitInfo si, boolean openedFromWriter, IOContext context, Map<String, String> readerAttributes) throws IOException {
 
     final Codec codec = si.info.getCodec();
     final Directory cfsDir; // confusing name: if (cfs) it's the cfsdir, otherwise it's the segment's directory.
@@ -107,7 +108,7 @@ final class SegmentCoreReaders {
 
       coreFieldInfos = codec.fieldInfosFormat().read(cfsDir, si.info, "", context);
       
-      final SegmentReadState segmentReadState = new SegmentReadState(cfsDir, si.info, coreFieldInfos, openedFromWriter, context);
+      final SegmentReadState segmentReadState = new SegmentReadState(cfsDir, si.info, coreFieldInfos, openedFromWriter, context, readerAttributes);
       final PostingsFormat format = codec.postingsFormat();
       // Ask codec for its Fields
       fields = format.fieldsProducer(segmentReadState);
diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentDocValues.java b/lucene/core/src/java/org/apache/lucene/index/SegmentDocValues.java
index fee0050..6cb1a8a 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SegmentDocValues.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SegmentDocValues.java
@@ -18,6 +18,7 @@ package org.apache.lucene.index;
 
 
 import java.io.IOException;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -46,7 +47,7 @@ final class SegmentDocValues {
     }
 
     // set SegmentReadState to list only the fields that are relevant to that gen
-    SegmentReadState srs = new SegmentReadState(dvDir, si.info, infos, false, IOContext.READ, segmentSuffix);
+    SegmentReadState srs = new SegmentReadState(dvDir, si.info, infos, false, IOContext.READ, segmentSuffix, Collections.emptyMap());
     DocValuesFormat dvFormat = si.info.getCodec().docValuesFormat();
     return new RefCount<DocValuesProducer>(dvFormat.fieldsProducer(srs)) {
       @SuppressWarnings("synthetic-access")
diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java b/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java
index a91cea2..c2677c9 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java
@@ -18,6 +18,7 @@ package org.apache.lucene.index;
 
 
 import java.io.IOException;
+import java.util.Collections;
 import java.util.List;
 
 import org.apache.lucene.codecs.Codec;
@@ -112,7 +113,7 @@ final class SegmentMerger {
     final SegmentWriteState segmentWriteState = new SegmentWriteState(mergeState.infoStream, directory, mergeState.segmentInfo,
                                                                       mergeState.mergeFieldInfos, null, context);
     final SegmentReadState segmentReadState = new SegmentReadState(directory, mergeState.segmentInfo, mergeState.mergeFieldInfos,
-        true, IOContext.READ, segmentWriteState.segmentSuffix);
+        true, IOContext.READ, segmentWriteState.segmentSuffix, Collections.emptyMap());
 
     if (mergeState.mergeFieldInfos.hasNorms()) {
       if (mergeState.infoStream.isEnabled("SM")) {
diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentReadState.java b/lucene/core/src/java/org/apache/lucene/index/SegmentReadState.java
index 0c66762..61041f6 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SegmentReadState.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SegmentReadState.java
@@ -17,6 +17,10 @@
 package org.apache.lucene.index;
 
 
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
 import org.apache.lucene.codecs.PostingsFormat; // javadocs
 import org.apache.lucene.codecs.perfield.PerFieldPostingsFormat; // javadocs
 import org.apache.lucene.store.Directory;
@@ -54,10 +58,16 @@ public class SegmentReadState {
    */
   public final boolean openedFromWriter;
 
+  /**
+   * The reader attributes for this reader. This is used to configure low level options on the codec layer.
+   * This attribute map is user supplied at reader creation time.
+   */
+  public final Map<String, String> readerAttributes;
+
   /** Create a {@code SegmentReadState}. */
   public SegmentReadState(Directory dir, SegmentInfo info,
-                          FieldInfos fieldInfos, boolean openedFromWriter, IOContext context) {
-    this(dir, info, fieldInfos, openedFromWriter, context, "");
+                          FieldInfos fieldInfos, boolean openedFromWriter, IOContext context, Map<String, String> readerAttributes) {
+    this(dir, info, fieldInfos, openedFromWriter, context, "", readerAttributes);
   }
   
   /** Create a {@code SegmentReadState}. */
@@ -65,13 +75,14 @@ public class SegmentReadState {
                           SegmentInfo info,
                           FieldInfos fieldInfos,
                           boolean openedFromWriter, IOContext context,
-                          String segmentSuffix) {
+                          String segmentSuffix, Map<String, String> readerAttributes) {
     this.directory = dir;
     this.segmentInfo = info;
     this.fieldInfos = fieldInfos;
     this.context = context;
     this.segmentSuffix = segmentSuffix;
     this.openedFromWriter = openedFromWriter;
+    this.readerAttributes = Collections.unmodifiableMap(new HashMap<>(readerAttributes));
   }
 
   /** Create a {@code SegmentReadState}. */
@@ -83,5 +94,6 @@ public class SegmentReadState {
     this.context = other.context;
     this.openedFromWriter = other.openedFromWriter;
     this.segmentSuffix = newSegmentSuffix;
+    this.readerAttributes = other.readerAttributes;
   }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java b/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java
index c0ae337..d71d384 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java
@@ -19,6 +19,7 @@ package org.apache.lucene.index;
 
 import java.io.IOException;
 import java.util.Collections;
+import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.CopyOnWriteArraySet;
 
@@ -72,7 +73,7 @@ public final class SegmentReader extends CodecReader {
    * @throws CorruptIndexException if the index is corrupt
    * @throws IOException if there is a low-level IO error
    */
-  SegmentReader(SegmentCommitInfo si, int createdVersionMajor, boolean openedFromWriter, IOContext context) throws IOException {
+  SegmentReader(SegmentCommitInfo si, int createdVersionMajor, boolean openedFromWriter, IOContext context, Map<String, String> readerAttributes) throws IOException {
     this.si = si.clone();
     this.originalSi = si;
     this.metaData = new LeafMetaData(createdVersionMajor, si.info.getMinVersion(), si.info.getIndexSort());
@@ -80,7 +81,7 @@ public final class SegmentReader extends CodecReader {
     // We pull liveDocs/DV updates from disk:
     this.isNRT = false;
     
-    core = new SegmentCoreReaders(si.info.dir, si, openedFromWriter, context);
+    core = new SegmentCoreReaders(si.info.dir, si, openedFromWriter, context, readerAttributes);
     segDocValues = new SegmentDocValues();
     
     boolean success = false;
diff --git a/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java b/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java
index 0c3f4d7..80c3635 100644
--- a/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java
@@ -42,19 +42,22 @@ public final class StandardDirectoryReader extends DirectoryReader {
   final SegmentInfos segmentInfos;
   private final boolean applyAllDeletes;
   private final boolean writeAllDeletes;
+  private final Map<String, String> readerAttributes;
   
   /** called only from static open() methods */
   StandardDirectoryReader(Directory directory, LeafReader[] readers, IndexWriter writer,
-                          SegmentInfos sis, boolean applyAllDeletes, boolean writeAllDeletes) throws IOException {
+                          SegmentInfos sis, boolean applyAllDeletes, boolean writeAllDeletes,
+                          Map<String, String> readerAttributes) throws IOException {
     super(directory, readers);
     this.writer = writer;
     this.segmentInfos = sis;
     this.applyAllDeletes = applyAllDeletes;
     this.writeAllDeletes = writeAllDeletes;
+    this.readerAttributes = Collections.unmodifiableMap(new HashMap<>(readerAttributes));
   }
 
   /** called from DirectoryReader.open(...) methods */
-  static DirectoryReader open(final Directory directory, final IndexCommit commit) throws IOException {
+  static DirectoryReader open(final Directory directory, final IndexCommit commit, Map<String, String> readerAttributes) throws IOException {
     return new SegmentInfos.FindSegmentsFile<DirectoryReader>(directory) {
       @Override
       protected DirectoryReader doBody(String segmentFileName) throws IOException {
@@ -63,12 +66,12 @@ public final class StandardDirectoryReader extends DirectoryReader {
         boolean success = false;
         try {
           for (int i = sis.size()-1; i >= 0; i--) {
-            readers[i] = new SegmentReader(sis.info(i), sis.getIndexCreatedVersionMajor(), false, IOContext.READ);
+            readers[i] = new SegmentReader(sis.info(i), sis.getIndexCreatedVersionMajor(), false, IOContext.READ, readerAttributes);
           }
 
           // This may throw CorruptIndexException if there are too many docs, so
           // it must be inside try clause so we close readers in that case:
-          DirectoryReader reader = new StandardDirectoryReader(directory, readers, null, sis, false, false);
+          DirectoryReader reader = new StandardDirectoryReader(directory, readers, null, sis, false, false, readerAttributes);
           success = true;
 
           return reader;
@@ -82,7 +85,7 @@ public final class StandardDirectoryReader extends DirectoryReader {
   }
 
   /** Used by near real-time search */
-  static DirectoryReader open(IndexWriter writer, SegmentInfos infos, boolean applyAllDeletes, boolean writeAllDeletes) throws IOException {
+  static DirectoryReader open(IndexWriter writer, SegmentInfos infos, boolean applyAllDeletes, boolean writeAllDeletes, Map<String, String> readerAttributes) throws IOException {
     // IndexWriter synchronizes externally before calling
     // us, which ensures infos will not change; so there's
     // no need to process segments in reverse order
@@ -121,7 +124,7 @@ public final class StandardDirectoryReader extends DirectoryReader {
 
       StandardDirectoryReader result = new StandardDirectoryReader(dir,
           readers.toArray(new SegmentReader[readers.size()]), writer,
-          segmentInfos, applyAllDeletes, writeAllDeletes);
+          segmentInfos, applyAllDeletes, writeAllDeletes, readerAttributes);
       return result;
     } catch (Throwable t) {
       try {
@@ -136,7 +139,7 @@ public final class StandardDirectoryReader extends DirectoryReader {
   /** This constructor is only used for {@link #doOpenIfChanged(SegmentInfos)}, as well as NRT replication.
    *
    *  @lucene.internal */
-  public static DirectoryReader open(Directory directory, SegmentInfos infos, List<? extends LeafReader> oldReaders) throws IOException {
+  public static DirectoryReader open(Directory directory, SegmentInfos infos, List<? extends LeafReader> oldReaders, Map<String, String> readerAttributes) throws IOException {
 
     // we put the old SegmentReaders in a map, that allows us
     // to lookup a reader using its segment name
@@ -176,7 +179,7 @@ public final class StandardDirectoryReader extends DirectoryReader {
         SegmentReader newReader;
         if (oldReader == null || commitInfo.info.getUseCompoundFile() != oldReader.getSegmentInfo().info.getUseCompoundFile()) {
           // this is a new reader; in case we hit an exception we can decRef it safely
-          newReader = new SegmentReader(commitInfo, infos.getIndexCreatedVersionMajor(), false, IOContext.READ);
+          newReader = new SegmentReader(commitInfo, infos.getIndexCreatedVersionMajor(), false, IOContext.READ, readerAttributes);
           newReaders[i] = newReader;
         } else {
           if (oldReader.isNRT) {
@@ -218,7 +221,7 @@ public final class StandardDirectoryReader extends DirectoryReader {
         }
       }
     }    
-    return new StandardDirectoryReader(directory, newReaders, null, infos, false, false);
+    return new StandardDirectoryReader(directory, newReaders, null, infos, false, false, readerAttributes);
   }
 
   // TODO: move somewhere shared if it's useful elsewhere
@@ -331,7 +334,7 @@ public final class StandardDirectoryReader extends DirectoryReader {
   }
 
   DirectoryReader doOpenIfChanged(SegmentInfos infos) throws IOException {
-    return StandardDirectoryReader.open(directory, infos, getSequentialSubReaders());
+    return StandardDirectoryReader.open(directory, infos, getSequentialSubReaders(), readerAttributes);
   }
 
   @Override
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat.java
index 22b400a..25f06e2 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat.java
@@ -21,14 +21,14 @@ import java.io.IOException;
 import java.nio.file.Path;
 import java.util.Arrays;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.CompetitiveImpactAccumulator;
-import org.apache.lucene.codecs.PostingsFormat;
-import org.apache.lucene.codecs.asserting.AssertingCodec;
-import org.apache.lucene.codecs.blocktree.BlockTreeTermsWriter;
+import org.apache.lucene.codecs.blocktree.BlockTreeTermsReader;
 import org.apache.lucene.codecs.blocktree.FieldReader;
 import org.apache.lucene.codecs.blocktree.Stats;
 import org.apache.lucene.codecs.lucene50.Lucene50ScoreSkipReader.MutableImpactList;
@@ -127,17 +127,37 @@ public class TestBlockPostingsFormat extends BasePostingsFormatTestCase {
         assertFalse(field.isFstOffHeap());
       }
     }
+
+    try (Directory d = new SimpleFSDirectory(tempDir)) {
+      // test per field
+      Map<String, String> readerAttributes = new HashMap<>();
+      readerAttributes.put(BlockTreeTermsReader.FST_MODE_KEY, BlockTreeTermsReader.FSTLoadMode.OFF_HEAP.name());
+      readerAttributes.put(BlockTreeTermsReader.FST_MODE_KEY + ".field", BlockTreeTermsReader.FSTLoadMode.ON_HEAP.name());
+      try (DirectoryReader r = DirectoryReader.open(d, readerAttributes)) {
+        assertEquals(1, r.leaves().size());
+        FieldReader field = (FieldReader) r.leaves().get(0).reader().terms("field");
+        FieldReader id = (FieldReader) r.leaves().get(0).reader().terms("id");
+        assertTrue(id.isFstOffHeap());
+        assertFalse(field.isFstOffHeap());
+      }
+    }
+
+    IllegalArgumentException invalid = expectThrows(IllegalArgumentException.class, () -> {
+      try (Directory d = new SimpleFSDirectory(tempDir)) {
+        Map<String, String> readerAttributes = new HashMap<>();
+        readerAttributes.put(BlockTreeTermsReader.FST_MODE_KEY, "invalid");
+        DirectoryReader.open(d, readerAttributes);
+      }
+    });
+
+    assertEquals("Invalid value for blocktree.terms.fst expected one of: [OFF_HEAP, ON_HEAP, OPTIMIZE_UPDATES_OFF_HEAP, AUTO] but was: invalid", invalid.getMessage());
   }
 
   public void testDisableFSTOffHeap() throws IOException {
     Path tempDir = createTempDir();
     try (Directory d = MMapDirectory.open(tempDir)) {
-      try (IndexWriter w = new IndexWriter(d, new IndexWriterConfig(new MockAnalyzer(random())).setCodec(new AssertingCodec() {
-        @Override
-        public PostingsFormat getPostingsFormatForField(String field) {
-          return new Lucene50PostingsFormat(BlockTreeTermsWriter.DEFAULT_MIN_BLOCK_SIZE, BlockTreeTermsWriter.DEFAULT_MAX_BLOCK_SIZE, Lucene50PostingsFormat.FSTLoadMode.ON_HEAP);
-        }
-      }))) {
+      try (IndexWriter w = new IndexWriter(d, new IndexWriterConfig(new MockAnalyzer(random()))
+          .setReaderAttributes(Collections.singletonMap(BlockTreeTermsReader.FST_MODE_KEY, BlockTreeTermsReader.FSTLoadMode.ON_HEAP.name())))) {
         assumeTrue("only works with mmap directory", d instanceof MMapDirectory);
         DirectoryReader readerFromWriter = DirectoryReader.open(w);
         for (int i = 0; i < 50; i++) {
@@ -171,7 +191,7 @@ public class TestBlockPostingsFormat extends BasePostingsFormatTestCase {
         w.forceMerge(1);
         w.commit();
       }
-      try (DirectoryReader r = DirectoryReader.open(d)) {
+      try (DirectoryReader r = DirectoryReader.open(d, Collections.singletonMap(BlockTreeTermsReader.FST_MODE_KEY, BlockTreeTermsReader.FSTLoadMode.ON_HEAP.name()))) {
         assertEquals(1, r.leaves().size());
         FieldReader field = (FieldReader) r.leaves().get(0).reader().terms("field");
         FieldReader id = (FieldReader) r.leaves().get(0).reader().terms("id");
@@ -183,19 +203,15 @@ public class TestBlockPostingsFormat extends BasePostingsFormatTestCase {
 
   public void testAlwaysFSTOffHeap() throws IOException {
     boolean alsoLoadIdOffHeap = random().nextBoolean();
-    Lucene50PostingsFormat.FSTLoadMode loadMode;
+    BlockTreeTermsReader.FSTLoadMode loadMode;
     if (alsoLoadIdOffHeap) {
-      loadMode = Lucene50PostingsFormat.FSTLoadMode.OFF_HEAP;
+      loadMode = BlockTreeTermsReader.FSTLoadMode.OFF_HEAP;
     } else {
-      loadMode = Lucene50PostingsFormat.FSTLoadMode.OPTIMIZE_UPDATES_OFF_HEAP;
+      loadMode = BlockTreeTermsReader.FSTLoadMode.OPTIMIZE_UPDATES_OFF_HEAP;
     }
     try (Directory d = newDirectory()) { // any directory should work now
-      try (IndexWriter w = new IndexWriter(d, new IndexWriterConfig(new MockAnalyzer(random())).setCodec(new AssertingCodec() {
-        @Override
-        public PostingsFormat getPostingsFormatForField(String field) {
-          return new Lucene50PostingsFormat(BlockTreeTermsWriter.DEFAULT_MIN_BLOCK_SIZE, BlockTreeTermsWriter.DEFAULT_MAX_BLOCK_SIZE, loadMode);
-        }
-      }))) {
+      try (IndexWriter w = new IndexWriter(d, new IndexWriterConfig(new MockAnalyzer(random()))
+          .setReaderAttributes(Collections.singletonMap(BlockTreeTermsReader.FST_MODE_KEY, loadMode.name())))) {
         DirectoryReader readerFromWriter = DirectoryReader.open(w);
         for (int i = 0; i < 50; i++) {
           Document doc = new Document();
@@ -232,7 +248,7 @@ public class TestBlockPostingsFormat extends BasePostingsFormatTestCase {
         w.forceMerge(1);
         w.commit();
       }
-      try (DirectoryReader r = DirectoryReader.open(d)) {
+      try (DirectoryReader r = DirectoryReader.open(d, Collections.singletonMap(BlockTreeTermsReader.FST_MODE_KEY, loadMode.name()))) {
         assertEquals(1, r.leaves().size());
         FieldReader field = (FieldReader) r.leaves().get(0).reader().terms("field");
         FieldReader id = (FieldReader) r.leaves().get(0).reader().terms("id");
@@ -268,7 +284,7 @@ public class TestBlockPostingsFormat extends BasePostingsFormatTestCase {
 
   private void shouldFail(int minItemsInBlock, int maxItemsInBlock) {
     expectThrows(IllegalArgumentException.class, () -> {
-      new Lucene50PostingsFormat(minItemsInBlock, maxItemsInBlock, Lucene50PostingsFormat.FSTLoadMode.AUTO);
+      new Lucene50PostingsFormat(minItemsInBlock, maxItemsInBlock, BlockTreeTermsReader.FSTLoadMode.AUTO);
     });
   }
 
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50StoredFieldsFormatHighCompression.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50StoredFieldsFormatHighCompression.java
index d5c0ecb..eef2e11 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50StoredFieldsFormatHighCompression.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50StoredFieldsFormatHighCompression.java
@@ -33,7 +33,7 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks;
 public class TestLucene50StoredFieldsFormatHighCompression extends BaseStoredFieldsFormatTestCase {
   @Override
   protected Codec getCodec() {
-    return new Lucene80Codec(Mode.BEST_COMPRESSION, Lucene50PostingsFormat.FSTLoadMode.AUTO);
+    return new Lucene80Codec(Mode.BEST_COMPRESSION);
   }
   
   /**
@@ -44,7 +44,7 @@ public class TestLucene50StoredFieldsFormatHighCompression extends BaseStoredFie
     Directory dir = newDirectory();
     for (int i = 0; i < 10; i++) {
       IndexWriterConfig iwc = newIndexWriterConfig();
-      iwc.setCodec(new Lucene80Codec(RandomPicks.randomFrom(random(), Mode.values()), Lucene50PostingsFormat.FSTLoadMode.AUTO));
+      iwc.setCodec(new Lucene80Codec(RandomPicks.randomFrom(random(), Mode.values())));
       IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig());
       Document doc = new Document();
       doc.add(new StoredField("field1", "value1"));
@@ -71,14 +71,10 @@ public class TestLucene50StoredFieldsFormatHighCompression extends BaseStoredFie
   
   public void testInvalidOptions() {
     expectThrows(NullPointerException.class, () -> {
-      new Lucene80Codec(null, Lucene50PostingsFormat.FSTLoadMode.AUTO);
+      new Lucene80Codec(null);
     });
 
     expectThrows(NullPointerException.class, () -> {
-      new Lucene80Codec(Mode.BEST_COMPRESSION, null);
-    });
-    
-    expectThrows(NullPointerException.class, () -> {
       new Lucene50StoredFieldsFormat(null);
     });
   }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java b/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
index f48ae6e..b3033d2 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
@@ -222,7 +222,7 @@ public class TestCodecs extends LuceneTestCase {
     final SegmentInfo si = new SegmentInfo(dir, Version.LATEST, Version.LATEST, SEGMENT, 10000, false, codec, Collections.emptyMap(), StringHelper.randomId(), new HashMap<>(), null);
     
     this.write(si, fieldInfos, dir, fields);
-    final FieldsProducer reader = codec.postingsFormat().fieldsProducer(new SegmentReadState(dir, si, fieldInfos, false, newIOContext(random())));
+    final FieldsProducer reader = codec.postingsFormat().fieldsProducer(new SegmentReadState(dir, si, fieldInfos, false, newIOContext(random()), Collections.emptyMap()));
 
     final Iterator<String> fieldsEnum = reader.iterator();
     String fieldName = fieldsEnum.next();
@@ -282,7 +282,7 @@ public class TestCodecs extends LuceneTestCase {
     if (VERBOSE) {
       System.out.println("TEST: now read postings");
     }
-    final FieldsProducer terms = codec.postingsFormat().fieldsProducer(new SegmentReadState(dir, si, fieldInfos, false, newIOContext(random())));
+    final FieldsProducer terms = codec.postingsFormat().fieldsProducer(new SegmentReadState(dir, si, fieldInfos, false, newIOContext(random()), Collections.emptyMap()));
 
     final Verify[] threads = new Verify[NUM_TEST_THREADS-1];
     for(int i=0;i<NUM_TEST_THREADS-1;i++) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDemoParallelLeafReader.java b/lucene/core/src/test/org/apache/lucene/index/TestDemoParallelLeafReader.java
index 87dbdef..848a308 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDemoParallelLeafReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDemoParallelLeafReader.java
@@ -416,7 +416,7 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
 
             SegmentInfos infos = SegmentInfos.readLatestCommit(dir);
             assert infos.size() == 1;
-            final LeafReader parLeafReader = new SegmentReader(infos.info(0), Version.LATEST.major, false, IOContext.DEFAULT);
+            final LeafReader parLeafReader = new SegmentReader(infos.info(0), Version.LATEST.major, false, IOContext.DEFAULT, Collections.emptyMap());
 
             //checkParallelReader(leaf, parLeafReader, schemaGen);
 
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDoc.java b/lucene/core/src/test/org/apache/lucene/index/TestDoc.java
index c0787fa..e873f9f 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDoc.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDoc.java
@@ -213,8 +213,8 @@ public class TestDoc extends LuceneTestCase {
   private SegmentCommitInfo merge(Directory dir, SegmentCommitInfo si1, SegmentCommitInfo si2, String merged, boolean useCompoundFile)
     throws Exception {
     IOContext context = newIOContext(random(), new IOContext(new MergeInfo(-1, -1, false, -1)));
-    SegmentReader r1 = new SegmentReader(si1, Version.LATEST.major, false, context);
-    SegmentReader r2 = new SegmentReader(si2, Version.LATEST.major, false, context);
+    SegmentReader r1 = new SegmentReader(si1, Version.LATEST.major, false, context, Collections.emptyMap());
+    SegmentReader r2 = new SegmentReader(si2, Version.LATEST.major, false, context, Collections.emptyMap());
 
     final Codec codec = Codec.getDefault();
     TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(si1.info.dir);
@@ -244,7 +244,7 @@ public class TestDoc extends LuceneTestCase {
 
   private void printSegment(PrintWriter out, SegmentCommitInfo si)
     throws Exception {
-    SegmentReader reader = new SegmentReader(si, Version.LATEST.major, false, newIOContext(random()));
+    SegmentReader reader = new SegmentReader(si, Version.LATEST.major, false, newIOContext(random()), Collections.emptyMap());
 
     for (int i = 0; i < reader.numDocs(); i++)
       out.println(reader.document(i));
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java
index 8588cb2..1c4e2d4 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java
@@ -18,6 +18,7 @@ package org.apache.lucene.index;
 
 
 import java.io.IOException;
+import java.util.Collections;
 
 import org.apache.lucene.analysis.*;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
@@ -63,7 +64,7 @@ public class TestDocumentWriter extends LuceneTestCase {
     SegmentCommitInfo info = writer.newestSegment();
     writer.close();
     //After adding the document, we should be able to read it back in
-    SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()));
+    SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()), Collections.emptyMap());
     assertTrue(reader != null);
     Document doc = reader.document(0);
     assertTrue(doc != null);
@@ -124,7 +125,7 @@ public class TestDocumentWriter extends LuceneTestCase {
     writer.commit();
     SegmentCommitInfo info = writer.newestSegment();
     writer.close();
-    SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()));
+    SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()), Collections.emptyMap());
 
     PostingsEnum termPositions = MultiTerms.getTermPostingsEnum(reader, "repeated", new BytesRef("repeated"));
     assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -195,7 +196,7 @@ public class TestDocumentWriter extends LuceneTestCase {
     writer.commit();
     SegmentCommitInfo info = writer.newestSegment();
     writer.close();
-    SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()));
+    SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()), Collections.emptyMap());
 
     PostingsEnum termPositions = MultiTerms.getTermPostingsEnum(reader, "f1", new BytesRef("a"));
     assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -237,7 +238,7 @@ public class TestDocumentWriter extends LuceneTestCase {
     writer.commit();
     SegmentCommitInfo info = writer.newestSegment();
     writer.close();
-    SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()));
+    SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()), Collections.emptyMap());
 
     PostingsEnum termPositions = reader.postings(new Term("preanalyzed", "term1"), PostingsEnum.ALL);
     assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterThreadsToSegments.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterThreadsToSegments.java
index f8dbf81..4d979d0 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterThreadsToSegments.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterThreadsToSegments.java
@@ -19,6 +19,7 @@ package org.apache.lucene.index;
 
 import java.io.Closeable;
 import java.io.IOException;
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.Random;
 import java.util.Set;
@@ -332,7 +333,7 @@ public class TestIndexWriterThreadsToSegments extends LuceneTestCase {
               SegmentInfo si = TestUtil.getDefaultCodec().segmentInfoFormat().read(dir, segName, id, IOContext.DEFAULT);
               si.setCodec(codec);
               SegmentCommitInfo sci = new SegmentCommitInfo(si, 0, 0, -1, -1, -1);
-              SegmentReader sr = new SegmentReader(sci, Version.LATEST.major, false, IOContext.DEFAULT);
+              SegmentReader sr = new SegmentReader(sci, Version.LATEST.major, false, IOContext.DEFAULT, Collections.emptyMap());
               try {
                 thread0Count += sr.docFreq(new Term("field", "threadID0"));
                 thread1Count += sr.docFreq(new Term("field", "threadID1"));
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestReaderPool.java b/lucene/core/src/test/org/apache/lucene/index/TestReaderPool.java
index 7a85883..1882ca5 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestReaderPool.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestReaderPool.java
@@ -45,7 +45,7 @@ public class TestReaderPool extends LuceneTestCase {
     StandardDirectoryReader reader = (StandardDirectoryReader) DirectoryReader.open(directory);
     SegmentInfos segmentInfos = reader.segmentInfos.clone();
 
-    ReaderPool pool = new ReaderPool(directory, directory, segmentInfos, fieldNumbers, () -> 0l, null, null, null);
+    ReaderPool pool = new ReaderPool(directory, directory, segmentInfos, fieldNumbers, () -> 0l, null, null, null, Collections.emptyMap());
     SegmentCommitInfo commitInfo = RandomPicks.randomFrom(random(), segmentInfos.asList());
     ReadersAndUpdates readersAndUpdates = pool.get(commitInfo, true);
     assertSame(readersAndUpdates, pool.get(commitInfo, false));
@@ -64,7 +64,7 @@ public class TestReaderPool extends LuceneTestCase {
     StandardDirectoryReader reader = (StandardDirectoryReader) DirectoryReader.open(directory);
     SegmentInfos segmentInfos = reader.segmentInfos.clone();
 
-    ReaderPool pool = new ReaderPool(directory, directory, segmentInfos, fieldNumbers, () -> 0l, null, null, null);
+    ReaderPool pool = new ReaderPool(directory, directory, segmentInfos, fieldNumbers, () -> 0l, null, null, null, Collections.emptyMap());
     SegmentCommitInfo commitInfo = RandomPicks.randomFrom(random(), segmentInfos.asList());
     assertFalse(pool.isReaderPoolingEnabled());
     pool.release(pool.get(commitInfo, true), random().nextBoolean());
@@ -100,7 +100,7 @@ public class TestReaderPool extends LuceneTestCase {
     StandardDirectoryReader reader = (StandardDirectoryReader) DirectoryReader.open(directory);
     SegmentInfos segmentInfos = reader.segmentInfos.clone();
     ReaderPool pool = new ReaderPool(directory, directory, segmentInfos, fieldNumbers, () -> 0l,
-        new NullInfoStream(), null, null);
+        new NullInfoStream(), null, null, Collections.emptyMap());
     int id = random().nextInt(10);
     if (random().nextBoolean()) {
       pool.enableReaderPooling();
@@ -168,7 +168,7 @@ public class TestReaderPool extends LuceneTestCase {
     StandardDirectoryReader reader = (StandardDirectoryReader) DirectoryReader.open(directory);
     SegmentInfos segmentInfos = reader.segmentInfos.clone();
     ReaderPool pool = new ReaderPool(directory, directory, segmentInfos, fieldNumbers, () -> 0l,
-        new NullInfoStream(), null, null);
+        new NullInfoStream(), null, null, Collections.emptyMap());
     int id = random().nextInt(10);
     if (random().nextBoolean()) {
       pool.enableReaderPooling();
@@ -213,7 +213,7 @@ public class TestReaderPool extends LuceneTestCase {
     StandardDirectoryReader reader = (StandardDirectoryReader) DirectoryReader.open(directory);
     SegmentInfos segmentInfos = reader.segmentInfos.clone();
     ReaderPool pool = new ReaderPool(directory, directory, segmentInfos, fieldNumbers, () -> 0L,
-        new NullInfoStream(), null, null);
+        new NullInfoStream(), null, null, Collections.emptyMap());
     if (random().nextBoolean()) {
       pool.enableReaderPooling();
     }
@@ -287,7 +287,7 @@ public class TestReaderPool extends LuceneTestCase {
     StandardDirectoryReader reader = (StandardDirectoryReader) DirectoryReader.open(directory);
     SegmentInfos segmentInfos = reader.segmentInfos.clone();
     ReaderPool pool = new ReaderPool(directory, directory, segmentInfos, fieldNumbers, () -> 0l,
-        new NullInfoStream(), null, null);
+        new NullInfoStream(), null, null, Collections.emptyMap());
     assertEquals(0, pool.getReadersByRam().size());
 
     int ord = 0;
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java
index 0ac82ba..d2cd711 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java
@@ -60,8 +60,8 @@ public class TestSegmentMerger extends LuceneTestCase {
     SegmentCommitInfo info1 = DocHelper.writeDoc(random(), merge1Dir, doc1);
     DocHelper.setupDoc(doc2);
     SegmentCommitInfo info2 = DocHelper.writeDoc(random(), merge2Dir, doc2);
-    reader1 = new SegmentReader(info1, Version.LATEST.major, false, newIOContext(random()));
-    reader2 = new SegmentReader(info2, Version.LATEST.major, false, newIOContext(random()));
+    reader1 = new SegmentReader(info1, Version.LATEST.major, false, newIOContext(random()), Collections.emptyMap());
+    reader2 = new SegmentReader(info2, Version.LATEST.major, false, newIOContext(random()), Collections.emptyMap());
   }
 
   @Override
@@ -98,7 +98,7 @@ public class TestSegmentMerger extends LuceneTestCase {
                                                          mergeState.segmentInfo,
                                                          0, 0, -1L, -1L, -1L),
                                                    Version.LATEST.major,
-        false, newIOContext(random()));
+        false, newIOContext(random()), Collections.emptyMap());
     assertTrue(mergedReader != null);
     assertTrue(mergedReader.numDocs() == 2);
     Document newDoc1 = mergedReader.document(0);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java
index 2f45488..76bf1a6 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java
@@ -19,6 +19,7 @@ package org.apache.lucene.index;
 
 import java.io.IOException;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
 
@@ -43,7 +44,7 @@ public class TestSegmentReader extends LuceneTestCase {
     dir = newDirectory();
     DocHelper.setupDoc(testDoc);
     SegmentCommitInfo info = DocHelper.writeDoc(random(), dir, testDoc);
-    reader = new SegmentReader(info, Version.LATEST.major, false, IOContext.READ);
+    reader = new SegmentReader(info, Version.LATEST.major, false, IOContext.READ, Collections.emptyMap());
   }
   
   @Override
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java
index 99853f7..9fa39c0 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java
@@ -18,6 +18,7 @@ package org.apache.lucene.index;
 
 
 import java.io.IOException;
+import java.util.Collections;
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
@@ -54,7 +55,7 @@ public class TestSegmentTermDocs extends LuceneTestCase {
 
   public void testTermDocs() throws IOException {
     //After adding the document, we should be able to read it back in
-    SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()));
+    SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()), Collections.emptyMap());
     assertTrue(reader != null);
 
     TermsEnum terms = reader.terms(DocHelper.TEXT_FIELD_2_KEY).iterator();
@@ -72,7 +73,7 @@ public class TestSegmentTermDocs extends LuceneTestCase {
   public void testBadSeek() throws IOException {
     {
       //After adding the document, we should be able to read it back in
-      SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()));
+      SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()), Collections.emptyMap());
       assertTrue(reader != null);
       PostingsEnum termDocs = TestUtil.docs(random(), reader,
           "textField2",
@@ -85,7 +86,7 @@ public class TestSegmentTermDocs extends LuceneTestCase {
     }
     {
       //After adding the document, we should be able to read it back in
-      SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()));
+      SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()), Collections.emptyMap());
       assertTrue(reader != null);
       PostingsEnum termDocs = TestUtil.docs(random(), reader,
           "junk",
diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/SegmentInfosSearcherManager.java b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/SegmentInfosSearcherManager.java
index d18ee10..3bb8e91 100644
--- a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/SegmentInfosSearcherManager.java
+++ b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/SegmentInfosSearcherManager.java
@@ -19,6 +19,7 @@ package org.apache.lucene.replicator.nrt;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicInteger;
 
@@ -55,7 +56,7 @@ class SegmentInfosSearcherManager extends ReferenceManager<IndexSearcher> {
     this.searcherFactory = searcherFactory;
     currentInfos = infosIn;
     node.message("SegmentInfosSearcherManager.init: use incoming infos=" + infosIn.toString());
-    current = SearcherManager.getSearcher(searcherFactory, StandardDirectoryReader.open(dir, currentInfos, null), null);
+    current = SearcherManager.getSearcher(searcherFactory, StandardDirectoryReader.open(dir, currentInfos, null, Collections.emptyMap()), null);
     addReaderClosedListener(current.getIndexReader());
   }
 
@@ -104,7 +105,7 @@ class SegmentInfosSearcherManager extends ReferenceManager<IndexSearcher> {
     }
 
     // Open a new reader, sharing any common segment readers with the old one:
-    DirectoryReader r = StandardDirectoryReader.open(dir, currentInfos, subs);
+    DirectoryReader r = StandardDirectoryReader.open(dir, currentInfos, subs, Collections.emptyMap());
     addReaderClosedListener(r);
     node.message("refreshed to version=" + currentInfos.getVersion() + " r=" + r);
     return SearcherManager.getSearcher(searcherFactory, r, old.getIndexReader());
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/cheapbastard/CheapBastardCodec.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/cheapbastard/CheapBastardCodec.java
index f34aa82..325d312 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/cheapbastard/CheapBastardCodec.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/cheapbastard/CheapBastardCodec.java
@@ -18,6 +18,7 @@ package org.apache.lucene.codecs.cheapbastard;
 
 import org.apache.lucene.codecs.FilterCodec;
 import org.apache.lucene.codecs.PostingsFormat;
+import org.apache.lucene.codecs.blocktree.BlockTreeTermsReader;
 import org.apache.lucene.util.TestUtil;
 
 /** Codec that tries to use as little ram as possible because he spent all his money on beer */
@@ -25,8 +26,7 @@ import org.apache.lucene.util.TestUtil;
 // but if we named it "LowMemory" in codecs/ package, it would be irresistible like optimize()!
 public class CheapBastardCodec extends FilterCodec {
   
-  // TODO: would be better to have no terms index at all and bsearch a terms dict
-  private final PostingsFormat postings = TestUtil.getDefaultPostingsFormat(100, 200);
+  private final PostingsFormat postings = TestUtil.getDefaultPostingsFormat(100, 200, BlockTreeTermsReader.FSTLoadMode.OFF_HEAP);
 
   public CheapBastardCodec() {
     super("CheapBastard", TestUtil.getDefaultCodec());
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java
index 5bf9ed4..143ba9c 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java
@@ -19,6 +19,7 @@ package org.apache.lucene.codecs.mockrandom;
 import java.io.IOException;
 import java.util.Random;
 
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
 import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.codecs.FieldsConsumer;
 import org.apache.lucene.codecs.FieldsProducer;
@@ -38,7 +39,6 @@ import org.apache.lucene.codecs.blocktree.BlockTreeTermsReader;
 import org.apache.lucene.codecs.blocktree.BlockTreeTermsWriter;
 import org.apache.lucene.codecs.blocktreeords.OrdsBlockTreeTermsReader;
 import org.apache.lucene.codecs.blocktreeords.OrdsBlockTreeTermsWriter;
-import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat;
 import org.apache.lucene.codecs.lucene50.Lucene50PostingsReader;
 import org.apache.lucene.codecs.lucene50.Lucene50PostingsWriter;
 import org.apache.lucene.codecs.memory.FSTOrdTermsReader;
@@ -316,7 +316,7 @@ public final class MockRandomPostingsFormat extends PostingsFormat {
 
       boolean success = false;
       try {
-        fields = new BlockTreeTermsReader(postingsReader, state, Lucene50PostingsFormat.FSTLoadMode.AUTO);
+        fields = new BlockTreeTermsReader(postingsReader, state, RandomPicks.randomFrom(random, BlockTreeTermsReader.FSTLoadMode.values()));
         success = true;
       } finally {
         if (!success) {
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java
index 5865f27..aea5bd5 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java
@@ -355,7 +355,7 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase {
                                                          segmentInfo, fieldInfos,
                                                          null, new IOContext(new FlushInfo(1, 20)));
     
-    SegmentReadState readState = new SegmentReadState(dir, segmentInfo, fieldInfos, false, IOContext.READ);
+    SegmentReadState readState = new SegmentReadState(dir, segmentInfo, fieldInfos, false, IOContext.READ, Collections.emptyMap());
 
     // PostingsFormat
     NormsProducer fakeNorms = new NormsProducer() {
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java b/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java
index ec3c323..a5390ba 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java
@@ -26,6 +26,7 @@ import java.util.Map;
 import java.util.Random;
 import java.util.Set;
 
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
 import org.apache.lucene.codecs.DocValuesFormat;
 import org.apache.lucene.codecs.PointsFormat;
 import org.apache.lucene.codecs.PointsReader;
@@ -38,6 +39,7 @@ import org.apache.lucene.codecs.asserting.AssertingPostingsFormat;
 import org.apache.lucene.codecs.blockterms.LuceneFixedGap;
 import org.apache.lucene.codecs.blockterms.LuceneVarGapDocFreqInterval;
 import org.apache.lucene.codecs.blockterms.LuceneVarGapFixedInterval;
+import org.apache.lucene.codecs.blocktree.BlockTreeTermsReader;
 import org.apache.lucene.codecs.blocktreeords.BlockTreeOrdsPostingsFormat;
 import org.apache.lucene.codecs.bloom.TestBloomFilteredLucenePostings;
 import org.apache.lucene.codecs.lucene60.Lucene60PointsReader;
@@ -187,7 +189,7 @@ public class RandomCodec extends AssertingCodec {
     bkdSplitRandomSeed = random.nextInt();
 
     add(avoidCodecs,
-        TestUtil.getDefaultPostingsFormat(minItemsPerBlock, maxItemsPerBlock),
+        TestUtil.getDefaultPostingsFormat(minItemsPerBlock, maxItemsPerBlock, RandomPicks.randomFrom(random, BlockTreeTermsReader.FSTLoadMode.values())),
         new FSTPostingsFormat(),
         new FSTOrdPostingsFormat(),
         new DirectPostingsFormat(LuceneTestCase.rarely(random) ? 1 : (LuceneTestCase.rarely(random) ? Integer.MAX_VALUE : maxItemsPerBlock),
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/RandomPostingsTester.java b/lucene/test-framework/src/java/org/apache/lucene/index/RandomPostingsTester.java
index 324dab9..410635b 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/RandomPostingsTester.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/RandomPostingsTester.java
@@ -748,7 +748,7 @@ public class RandomPostingsTester {
 
     currentFieldInfos = newFieldInfos;
 
-    SegmentReadState readState = new SegmentReadState(dir, segmentInfo, newFieldInfos, false, IOContext.READ);
+    SegmentReadState readState = new SegmentReadState(dir, segmentInfo, newFieldInfos, false, IOContext.READ, Collections.emptyMap());
 
     return codec.postingsFormat().fieldsProducer(readState);
   }
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleSetupAndRestoreClassEnv.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleSetupAndRestoreClassEnv.java
index 87d3776..3712374 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleSetupAndRestoreClassEnv.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleSetupAndRestoreClassEnv.java
@@ -42,7 +42,6 @@ import org.apache.lucene.codecs.asserting.AssertingDocValuesFormat;
 import org.apache.lucene.codecs.asserting.AssertingPostingsFormat;
 import org.apache.lucene.codecs.cheapbastard.CheapBastardCodec;
 import org.apache.lucene.codecs.compressing.CompressingCodec;
-import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat;
 import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat;
 import org.apache.lucene.codecs.lucene80.Lucene80Codec;
 import org.apache.lucene.codecs.mockrandom.MockRandomPostingsFormat;
@@ -190,8 +189,8 @@ final class TestRuleSetupAndRestoreClassEnv extends AbstractBeforeAfterRule {
     } else if ("Compressing".equals(TEST_CODEC) || ("random".equals(TEST_CODEC) && randomVal == 6 && !shouldAvoidCodec("Compressing"))) {
       codec = CompressingCodec.randomInstance(random);
     } else if ("Lucene80".equals(TEST_CODEC) || ("random".equals(TEST_CODEC) && randomVal == 5 && !shouldAvoidCodec("Lucene80"))) {
-      codec = new Lucene80Codec(RandomPicks.randomFrom(random, Lucene50StoredFieldsFormat.Mode.values()),
-          RandomPicks.randomFrom(random, Lucene50PostingsFormat.FSTLoadMode.values()));
+      codec = new Lucene80Codec(RandomPicks.randomFrom(random, Lucene50StoredFieldsFormat.Mode.values())
+      );
     } else if (!"random".equals(TEST_CODEC)) {
       codec = Codec.forName(TEST_CODEC);
     } else if ("random".equals(TEST_POSTINGSFORMAT)) {
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java
index e706c45..f278fff 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java
@@ -51,6 +51,7 @@ import org.apache.lucene.codecs.DocValuesFormat;
 import org.apache.lucene.codecs.PostingsFormat;
 import org.apache.lucene.codecs.asserting.AssertingCodec;
 import org.apache.lucene.codecs.blockterms.LuceneFixedGap;
+import org.apache.lucene.codecs.blocktree.BlockTreeTermsReader;
 import org.apache.lucene.codecs.blocktreeords.BlockTreeOrdsPostingsFormat;
 import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat;
 import org.apache.lucene.codecs.lucene80.Lucene80DocValuesFormat;
@@ -911,8 +912,8 @@ public final class TestUtil {
    * Returns the actual default postings format (e.g. LuceneMNPostingsFormat for this version of Lucene.
    * @lucene.internal this may disappear at any time
    */
-  public static PostingsFormat getDefaultPostingsFormat(int minItemsPerBlock, int maxItemsPerBlock) {
-    return new Lucene50PostingsFormat(minItemsPerBlock, maxItemsPerBlock, Lucene50PostingsFormat.FSTLoadMode.AUTO);
+  public static PostingsFormat getDefaultPostingsFormat(int minItemsPerBlock, int maxItemsPerBlock, BlockTreeTermsReader.FSTLoadMode fstLoadMode) {
+    return new Lucene50PostingsFormat(minItemsPerBlock, maxItemsPerBlock, fstLoadMode);
   }
   
   /** Returns a random postings format that supports term ordinals */
diff --git a/solr/core/src/java/org/apache/solr/core/SchemaCodecFactory.java b/solr/core/src/java/org/apache/solr/core/SchemaCodecFactory.java
index 487d06e..5adc161 100644
--- a/solr/core/src/java/org/apache/solr/core/SchemaCodecFactory.java
+++ b/solr/core/src/java/org/apache/solr/core/SchemaCodecFactory.java
@@ -23,7 +23,6 @@ import java.util.Locale;
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.DocValuesFormat;
 import org.apache.lucene.codecs.PostingsFormat;
-import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat;
 import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode;
 import org.apache.lucene.codecs.lucene80.Lucene80Codec;
 import org.apache.solr.common.SolrException.ErrorCode;
@@ -92,7 +91,7 @@ public class SchemaCodecFactory extends CodecFactory implements SolrCoreAware {
       compressionMode = SOLR_DEFAULT_COMPRESSION_MODE;
       log.debug("Using default compressionMode: " + compressionMode);
     }
-    codec = new Lucene80Codec(compressionMode, Lucene50PostingsFormat.FSTLoadMode.AUTO) {
+    codec = new Lucene80Codec(compressionMode) {
       @Override
       public PostingsFormat getPostingsFormatForField(String field) {
         final SchemaField schemaField = core.getLatestSchema().getFieldOrNull(field);