You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by si...@apache.org on 2018/04/17 14:53:29 UTC

lucene-solr:branch_7x: LUCENE-8253: Don't create ReadersAndUpdates for foreign segments

Repository: lucene-solr
Updated Branches:
  refs/heads/branch_7x 174c11f2c -> 330fd18f2


LUCENE-8253: Don't create ReadersAndUpdates for foreign segments

IndexWriter#numDeletesToMerge was creating a ReadersAndUpdates
for all incoming SegmentCommitInfo even if that info wasn't private
to the IndexWriter. This is an illegal use of this API but since it's
transitively public via MergePolicy#findMerges we have to be conservative
with regestiering ReadersAndUpdates. In IndexWriter#numDeletesToMerge we
can only use existing ones. This means for soft-deletes we need to react
earlier in order to produce accurate numbers.

This change partially rolls back the changes in LUCENE-8253. Instead of
registering the readers once they are pulled via IndexWriter#numDeletesToMerge
we now check if segments are fully deleted on flush which is very unlikely and
can be done in a lazy fashion ie. it's only paying the extra cost of opening a
reader and checking all soft-deletes if soft deletes are used and present
in the flushed segment.

This has the side-effect that flushed segments that are 100% hard deleted are also
cleaned up right after they are flushed, previously these segments were sticking
around for a while until they got picked for a merge or received another delete.

This also closes LUCENE-8256


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/330fd18f
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/330fd18f
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/330fd18f

Branch: refs/heads/branch_7x
Commit: 330fd18f200dae0892b3aa0882668435730c4319
Parents: 174c11f
Author: Simon Willnauer <si...@apache.org>
Authored: Tue Apr 17 10:16:58 2018 +0200
Committer: Simon Willnauer <si...@apache.org>
Committed: Tue Apr 17 16:48:03 2018 +0200

----------------------------------------------------------------------
 .../lucene/index/BufferedUpdatesStream.java     |   2 +-
 .../lucene/index/DocumentsWriterFlushQueue.java |   5 +-
 .../apache/lucene/index/FilterMergePolicy.java  |   4 +-
 .../lucene/index/FrozenBufferedUpdates.java     |   1 -
 .../org/apache/lucene/index/IndexWriter.java    |  59 +++++++----
 .../org/apache/lucene/index/MergePolicy.java    |   2 +-
 .../org/apache/lucene/index/NoMergePolicy.java  |   4 +-
 .../org/apache/lucene/index/PendingDeletes.java |   2 +-
 .../apache/lucene/index/PendingSoftDeletes.java |  12 ++-
 .../apache/lucene/index/ReadersAndUpdates.java  |  37 +++----
 .../index/SoftDeletesRetentionMergePolicy.java  |   5 +-
 .../lucene/index/StandardDirectoryReader.java   |   2 +-
 .../apache/lucene/index/TestIndexWriter.java    |   3 +-
 .../lucene/index/TestIndexWriterOnDiskFull.java |   3 +-
 .../apache/lucene/index/TestMultiFields.java    |   3 +-
 .../apache/lucene/index/TestPendingDeletes.java |   4 +-
 .../TestSoftDeletesDirectoryReaderWrapper.java  |  32 ------
 .../TestSoftDeletesRetentionMergePolicy.java    | 101 ++++++++++++++++---
 .../admin/SegmentsInfoRequestHandlerTest.java   |   2 -
 19 files changed, 183 insertions(+), 100 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/330fd18f/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java b/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java
index 78fe950..32ee256 100644
--- a/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java
+++ b/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java
@@ -324,7 +324,7 @@ class BufferedUpdatesStream implements Accountable {
         totDelCount += segState.rld.getPendingDeleteCount() - segState.startDelCount;
         int fullDelCount = segState.rld.info.getDelCount() + segState.rld.getPendingDeleteCount();
         assert fullDelCount <= segState.rld.info.info.maxDoc() : fullDelCount + " > " + segState.rld.info.info.maxDoc();
-        if (segState.rld.isFullyDeleted() && writer.getConfig().mergePolicy.keepFullyDeletedSegment(segState.reader) == false) {
+        if (segState.rld.isFullyDeleted() && writer.getConfig().mergePolicy.keepFullyDeletedSegment(() -> segState.reader) == false) {
           if (allDeleted == null) {
             allDeleted = new ArrayList<>();
           }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/330fd18f/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterFlushQueue.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterFlushQueue.java b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterFlushQueue.java
index b051545..fde7587 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterFlushQueue.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterFlushQueue.java
@@ -188,7 +188,8 @@ class DocumentsWriterFlushQueue {
     protected final void publishFlushedSegment(IndexWriter indexWriter, FlushedSegment newSegment, FrozenBufferedUpdates globalPacket)
         throws IOException {
       assert newSegment != null;
-      assert newSegment.segmentInfo != null;
+      SegmentCommitInfo segmentInfo = newSegment.segmentInfo;
+      assert segmentInfo != null;
       final FrozenBufferedUpdates segmentUpdates = newSegment.segmentUpdates;
       if (indexWriter.infoStream.isEnabled("DW")) {
         indexWriter.infoStream.message("DW", "publishFlushedSegment seg-private updates=" + segmentUpdates);  
@@ -198,7 +199,7 @@ class DocumentsWriterFlushQueue {
         indexWriter.infoStream.message("DW", "flush: push buffered seg private updates: " + segmentUpdates);
       }
       // now publish!
-      indexWriter.publishFlushedSegment(newSegment.segmentInfo, segmentUpdates, globalPacket, newSegment.sortMap);
+      indexWriter.publishFlushedSegment(segmentInfo, newSegment.fieldInfos, segmentUpdates, globalPacket, newSegment.sortMap);
     }
     
     protected final void finishFlush(IndexWriter indexWriter, FlushedSegment newSegment, FrozenBufferedUpdates bufferedUpdates)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/330fd18f/lucene/core/src/java/org/apache/lucene/index/FilterMergePolicy.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/FilterMergePolicy.java b/lucene/core/src/java/org/apache/lucene/index/FilterMergePolicy.java
index d073b84..afe232a 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FilterMergePolicy.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FilterMergePolicy.java
@@ -94,8 +94,8 @@ public class FilterMergePolicy extends MergePolicy {
   }
 
   @Override
-  public boolean keepFullyDeletedSegment(CodecReader reader) throws IOException {
-    return in.keepFullyDeletedSegment(reader);
+  public boolean keepFullyDeletedSegment(IOSupplier<CodecReader> readerIOSupplier) throws IOException {
+    return in.keepFullyDeletedSegment(readerIOSupplier);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/330fd18f/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedUpdates.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedUpdates.java b/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedUpdates.java
index a90d46a..3a775bd 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedUpdates.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedUpdates.java
@@ -339,7 +339,6 @@ class FrozenBufferedUpdates {
                                                messagePrefix + "done inner apply del packet (%s) to %d segments; %d new deletes/updates; took %.3f sec",
                                                this, segStates.length, delCount, (System.nanoTime() - iterStartNS) / 1000000000.));
       }
-      
       if (privateSegment != null) {
         // No need to retry for a segment-private packet: the merge that folds in our private segment already waits for all deletes to
         // be applied before it kicks off, so this private segment must already not be in the set of merging segments

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/330fd18f/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
index d60b311..358cd40 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
@@ -2769,7 +2769,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
    * segments SegmentInfo to the index writer.
    */
   synchronized void publishFlushedSegment(SegmentCommitInfo newSegment,
-                                          FrozenBufferedUpdates packet, FrozenBufferedUpdates globalPacket,
+                                          FieldInfos fieldInfos, FrozenBufferedUpdates packet, FrozenBufferedUpdates globalPacket,
                                           Sorter.DocMap sortMap) throws IOException {
     boolean published = false;
     try {
@@ -2794,7 +2794,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
 
         // Do this as an event so it applies higher in the stack when we are not holding DocumentsWriterFlushQueue.purgeLock:
         docWriter.putEvent(new DocumentsWriter.ResolveUpdatesEvent(packet));
-          
+
       } else {
         // Since we don't have a delete packet to apply we can get a new
         // generation right away
@@ -2809,14 +2809,37 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
       segmentInfos.add(newSegment);
       published = true;
       checkpoint();
-
       if (packet != null && packet.any() && sortMap != null) {
         // TODO: not great we do this heavyish op while holding IW's monitor lock,
         // but it only applies if you are using sorted indices and updating doc values:
         ReadersAndUpdates rld = readerPool.get(newSegment, true);
         rld.sortMap = sortMap;
+        // DON't release this ReadersAndUpdates we need to stick with that sortMap
+      }
+      FieldInfo fieldInfo = fieldInfos.fieldInfo(config.softDeletesField); // will return null if no soft deletes are present
+      // this is a corner case where documents delete them-self with soft deletes. This is used to
+      // build delete tombstones etc. in this case we haven't seen any updates to the DV in this fresh flushed segment.
+      // if we have seen updates the update code checks if the segment is fully deleted.
+      boolean hasInitialSoftDeleted = (fieldInfo != null
+          && fieldInfo.getDocValuesGen() == -1
+          && fieldInfo.getDocValuesType() != DocValuesType.NONE);
+      final boolean isFullyHardDeleted = newSegment.getDelCount() == newSegment.info.maxDoc();
+      // we either have a fully hard-deleted segment or one or more docs are soft-deleted. In both cases we need
+      // to go and check if they are fully deleted. This has the nice side-effect that we now have accurate numbers
+      // for the soft delete right after we flushed to disk.
+      if (hasInitialSoftDeleted || isFullyHardDeleted){
+        // this operation is only really executed if needed an if soft-deletes are not configured it only be executed
+        // if we deleted all docs in this newly flushed segment.
+        ReadersAndUpdates rld = readerPool.get(newSegment, true);
+        try {
+          if (isFullyDeleted(rld)) {
+            dropDeletedSegment(newSegment);
+          }
+        } finally {
+          readerPool.release(rld);
+        }
       }
-      
+
     } finally {
       if (published == false) {
         adjustPendingNumDocs(-newSegment.info.maxDoc());
@@ -2824,6 +2847,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
       flushCount.incrementAndGet();
       doAfterFlush();
     }
+
   }
 
   private synchronized void resetMergeExceptions() {
@@ -3357,7 +3381,6 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
             flushSuccess = true;
 
             applyAllDeletesAndUpdates();
-
             synchronized(this) {
 
               readerPool.commit(segmentInfos);
@@ -5213,12 +5236,8 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
 
   final boolean isFullyDeleted(ReadersAndUpdates readersAndUpdates) throws IOException {
     if (readersAndUpdates.isFullyDeleted()) {
-      SegmentReader reader = readersAndUpdates.getReader(IOContext.READ);
-      try {
-        return config.mergePolicy.keepFullyDeletedSegment(reader) == false;
-      } finally {
-        readersAndUpdates.release(reader);
-      }
+      assert Thread.holdsLock(this);
+      return readersAndUpdates.keepFullyDeletedSegment(config.getMergePolicy()) == false;
     }
     return false;
   }
@@ -5232,15 +5251,17 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
    */
   public final int numDeletesToMerge(SegmentCommitInfo info) throws IOException {
     MergePolicy mergePolicy = config.getMergePolicy();
-    final ReadersAndUpdates rld = readerPool.get(info, true);
-    try {
-      int numDeletesToMerge = rld.numDeletesToMerge(mergePolicy);
-      assert numDeletesToMerge <= info.info.maxDoc() :
-          "numDeletesToMerge: " + numDeletesToMerge + " > maxDoc: " + info.info.maxDoc();
-      return numDeletesToMerge;
-    } finally {
-      readerPool.release(rld);
+    final ReadersAndUpdates rld = readerPool.get(info, false);
+    int numDeletesToMerge;
+    if (rld != null) {
+      numDeletesToMerge = rld.numDeletesToMerge(mergePolicy);
+    } else {
+      // if we don't have a  pooled instance lets just return the hard deletes, this is safe!
+      numDeletesToMerge = info.getDelCount();
     }
+    assert numDeletesToMerge <= info.info.maxDoc() :
+        "numDeletesToMerge: " + numDeletesToMerge + " > maxDoc: " + info.info.maxDoc();
+    return numDeletesToMerge;
 
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/330fd18f/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java b/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java
index 093fe5a..029cca9 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java
@@ -611,7 +611,7 @@ public abstract class MergePolicy {
    * Returns true if the segment represented by the given CodecReader should be keep even if it's fully deleted.
    * This is useful for testing of for instance if the merge policy implements retention policies for soft deletes.
    */
-  public boolean keepFullyDeletedSegment(CodecReader reader) throws IOException {
+  public boolean keepFullyDeletedSegment(IOSupplier<CodecReader> readerIOSupplier) throws IOException {
     return false;
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/330fd18f/lucene/core/src/java/org/apache/lucene/index/NoMergePolicy.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/NoMergePolicy.java b/lucene/core/src/java/org/apache/lucene/index/NoMergePolicy.java
index e1f1a54..86a173c 100644
--- a/lucene/core/src/java/org/apache/lucene/index/NoMergePolicy.java
+++ b/lucene/core/src/java/org/apache/lucene/index/NoMergePolicy.java
@@ -76,8 +76,8 @@ public final class NoMergePolicy extends MergePolicy {
   }
 
   @Override
-  public boolean keepFullyDeletedSegment(CodecReader reader) throws IOException {
-    return super.keepFullyDeletedSegment(reader);
+  public boolean keepFullyDeletedSegment(IOSupplier<CodecReader> readerIOSupplier) throws IOException {
+    return super.keepFullyDeletedSegment(readerIOSupplier);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/330fd18f/lucene/core/src/java/org/apache/lucene/index/PendingDeletes.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/PendingDeletes.java b/lucene/core/src/java/org/apache/lucene/index/PendingDeletes.java
index c0aed38..ae91be8 100644
--- a/lucene/core/src/java/org/apache/lucene/index/PendingDeletes.java
+++ b/lucene/core/src/java/org/apache/lucene/index/PendingDeletes.java
@@ -230,7 +230,7 @@ class PendingDeletes {
   /**
    * Returns <code>true</code> iff the segment represented by this {@link PendingDeletes} is fully deleted
    */
-  boolean isFullyDeleted() {
+  boolean isFullyDeleted(IOSupplier<CodecReader> readerIOSupplier) throws IOException {
     return info.getDelCount() + numPendingDeletes() == info.info.maxDoc();
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/330fd18f/lucene/core/src/java/org/apache/lucene/index/PendingSoftDeletes.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/PendingSoftDeletes.java b/lucene/core/src/java/org/apache/lucene/index/PendingSoftDeletes.java
index 3dca782..68a2eac 100644
--- a/lucene/core/src/java/org/apache/lucene/index/PendingSoftDeletes.java
+++ b/lucene/core/src/java/org/apache/lucene/index/PendingSoftDeletes.java
@@ -168,6 +168,11 @@ final class PendingSoftDeletes extends PendingDeletes {
 
   @Override
   int numDeletesToMerge(MergePolicy policy, IOSupplier<CodecReader> readerIOSupplier) throws IOException {
+    ensureInitialized(readerIOSupplier); // initialize to ensure we have accurate counts
+    return super.numDeletesToMerge(policy, readerIOSupplier);
+  }
+
+  private void ensureInitialized(IOSupplier<CodecReader> readerIOSupplier) throws IOException {
     if (dvGeneration == -2) {
       FieldInfos fieldInfos = readFieldInfos();
       FieldInfo fieldInfo = fieldInfos.fieldInfo(field);
@@ -183,7 +188,12 @@ final class PendingSoftDeletes extends PendingDeletes {
         dvGeneration = fieldInfo == null ? -1 : fieldInfo.getDocValuesGen();
       }
     }
-    return super.numDeletesToMerge(policy, readerIOSupplier);
+  }
+
+  @Override
+  boolean isFullyDeleted(IOSupplier<CodecReader> readerIOSupplier) throws IOException {
+    ensureInitialized(readerIOSupplier); // initialize to ensure we have accurate counts - only needed in the soft-delete case
+    return super.isFullyDeleted(readerIOSupplier);
   }
 
   private FieldInfos readFieldInfos() throws IOException {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/330fd18f/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java b/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java
index 2543721..76a28e2 100644
--- a/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java
+++ b/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java
@@ -41,7 +41,6 @@ import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.TrackingDirectoryWrapper;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.IOSupplier;
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.InfoStream;
 
@@ -260,19 +259,20 @@ final class ReadersAndUpdates {
   }
 
   synchronized int numDeletesToMerge(MergePolicy policy) throws IOException {
-    IOSupplier<CodecReader> readerSupplier = () -> {
-      if (this.reader == null) {
-        // get a reader and dec the ref right away we just make sure we have a reader
-        getReader(IOContext.READ).decRef();
-      }
-      if (reader.getLiveDocs() != pendingDeletes.getLiveDocs()
-          || reader.numDeletedDocs() != info.getDelCount() - pendingDeletes.numPendingDeletes()) {
-        // we have a reader but its live-docs are out of sync. let's create a temporary one that we never share
-        swapNewReaderWithLatestLiveDocs();
-      }
-      return reader;
-    };
-    return pendingDeletes.numDeletesToMerge(policy, readerSupplier);
+    return pendingDeletes.numDeletesToMerge(policy, this::getLatestReader);
+  }
+
+  private CodecReader getLatestReader() throws IOException {
+    if (this.reader == null) {
+      // get a reader and dec the ref right away we just make sure we have a reader
+      getReader(IOContext.READ).decRef();
+    }
+    if (reader.getLiveDocs() != pendingDeletes.getLiveDocs()
+        || reader.numDeletedDocs() != info.getDelCount() - pendingDeletes.numPendingDeletes()) {
+      // we have a reader but its live-docs are out of sync. let's create a temporary one that we never share
+      swapNewReaderWithLatestLiveDocs();
+    }
+    return reader;
   }
 
   public synchronized Bits getLiveDocs() {
@@ -813,8 +813,8 @@ final class ReadersAndUpdates {
     return sb.toString();
   }
 
-  public synchronized boolean isFullyDeleted() {
-    return pendingDeletes.isFullyDeleted();
+  public synchronized boolean isFullyDeleted() throws IOException {
+    return pendingDeletes.isFullyDeleted(this::getLatestReader);
   }
 
   private final void markAsShared() {
@@ -822,5 +822,8 @@ final class ReadersAndUpdates {
     liveDocsSharedPending = false;
     pendingDeletes.liveDocsShared(); // this is not costly we can just call it even if it's already marked as shared
   }
-  
+
+  boolean keepFullyDeletedSegment(MergePolicy mergePolicy) throws IOException {
+    return mergePolicy.keepFullyDeletedSegment(this::getLatestReader);
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/330fd18f/lucene/core/src/java/org/apache/lucene/index/SoftDeletesRetentionMergePolicy.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/SoftDeletesRetentionMergePolicy.java b/lucene/core/src/java/org/apache/lucene/index/SoftDeletesRetentionMergePolicy.java
index 25df3c6..68f51bb 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SoftDeletesRetentionMergePolicy.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SoftDeletesRetentionMergePolicy.java
@@ -70,7 +70,8 @@ public final class SoftDeletesRetentionMergePolicy extends OneMergeWrappingMerge
   }
 
   @Override
-  public boolean keepFullyDeletedSegment(CodecReader reader) throws IOException {
+  public boolean keepFullyDeletedSegment(IOSupplier<CodecReader> readerIOSupplier) throws IOException {
+    CodecReader reader = readerIOSupplier.get();
     /* we only need a single hit to keep it no need for soft deletes to be checked*/
     Scorer scorer = getScorer(retentionQuerySupplier.get(), wrapLiveDocs(reader, null, reader.maxDoc()));
     if (scorer != null) {
@@ -78,7 +79,7 @@ public final class SoftDeletesRetentionMergePolicy extends OneMergeWrappingMerge
       boolean atLeastOneHit = iterator.nextDoc() != DocIdSetIterator.NO_MORE_DOCS;
       return atLeastOneHit;
     }
-    return super.keepFullyDeletedSegment(reader) ;
+    return super.keepFullyDeletedSegment(readerIOSupplier) ;
   }
 
   // pkg private for testing

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/330fd18f/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java b/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java
index 488ccaf..63c6d95 100644
--- a/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java
@@ -103,7 +103,7 @@ public final class StandardDirectoryReader extends DirectoryReader {
         final ReadersAndUpdates rld = writer.readerPool.get(info, true);
         try {
           final SegmentReader reader = rld.getReadOnlyClone(IOContext.READ);
-          if (reader.numDocs() > 0 || writer.getConfig().mergePolicy.keepFullyDeletedSegment(reader)) {
+          if (reader.numDocs() > 0 || writer.getConfig().mergePolicy.keepFullyDeletedSegment(() -> reader)) {
             // Steal the ref:
             readers.add(reader);
             infosUpto++;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/330fd18f/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
index 039c2ad..df80928 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
@@ -90,6 +90,7 @@ import org.apache.lucene.store.SimpleFSLockFactory;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.Constants;
+import org.apache.lucene.util.IOSupplier;
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.InfoStream;
 import org.apache.lucene.util.LuceneTestCase;
@@ -2247,7 +2248,7 @@ public class TestIndexWriter extends LuceneTestCase {
     AtomicBoolean keepFullyDeletedSegments = new AtomicBoolean();
     iwc.setMergePolicy(new FilterMergePolicy(iwc.getMergePolicy()) {
       @Override
-      public boolean keepFullyDeletedSegment(CodecReader reader) throws IOException {
+      public boolean keepFullyDeletedSegment(IOSupplier<CodecReader> readerIOSupplier) throws IOException {
         return keepFullyDeletedSegments.get();
       }
     });

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/330fd18f/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java
index ce3c72c..d225f43 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java
@@ -35,6 +35,7 @@ import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.IOSupplier;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 
@@ -503,7 +504,7 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase {
           .setReaderPooling(true)
           .setMergePolicy(new FilterMergePolicy(newLogMergePolicy(2)) {
             @Override
-            public boolean keepFullyDeletedSegment(CodecReader reader) throws IOException {
+            public boolean keepFullyDeletedSegment(IOSupplier<CodecReader> readerIOSupplier) throws IOException {
               // we can do this because we add/delete/add (and dont merge to "nothing")
               return true;
             }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/330fd18f/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java b/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java
index 3c09bbd..439ec51 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java
@@ -32,6 +32,7 @@ import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOSupplier;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
 import org.apache.lucene.util.UnicodeUtil;
@@ -51,7 +52,7 @@ public class TestMultiFields extends LuceneTestCase {
       IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
                                              .setMergePolicy(new FilterMergePolicy(NoMergePolicy.INSTANCE) {
                                                @Override
-                                               public boolean keepFullyDeletedSegment(CodecReader reader) {
+                                               public boolean keepFullyDeletedSegment(IOSupplier<CodecReader> readerIOSupplier) {
                                                  // we can do this because we use NoMergePolicy (and dont merge to "nothing")
                                                  return true;
                                                }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/330fd18f/lucene/core/src/test/org/apache/lucene/index/TestPendingDeletes.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPendingDeletes.java b/lucene/core/src/test/org/apache/lucene/index/TestPendingDeletes.java
index bbe309a..7c6891e 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestPendingDeletes.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestPendingDeletes.java
@@ -134,13 +134,15 @@ public class TestPendingDeletes extends LuceneTestCase {
     SegmentInfo si = new SegmentInfo(dir, Version.LATEST, Version.LATEST, "test", 3, false, Codec.getDefault(),
         Collections.emptyMap(), StringHelper.randomId(), new HashMap<>(), null);
     SegmentCommitInfo commitInfo = new SegmentCommitInfo(si, 0, -1, -1, -1);
+    FieldInfos fieldInfos = new FieldInfos(new FieldInfo[0]);
+    si.getCodec().fieldInfosFormat().write(dir, si, "", fieldInfos, IOContext.DEFAULT);
     PendingDeletes deletes = newPendingDeletes(commitInfo);
     for (int i = 0; i < 3; i++) {
       assertTrue(deletes.delete(i));
       if (random().nextBoolean()) {
         assertTrue(deletes.writeLiveDocs(dir));
       }
-      assertEquals(i == 2, deletes.isFullyDeleted());
+      assertEquals(i == 2, deletes.isFullyDeleted(() -> null));
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/330fd18f/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesDirectoryReaderWrapper.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesDirectoryReaderWrapper.java b/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesDirectoryReaderWrapper.java
index 30a11b6..dea7bc9 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesDirectoryReaderWrapper.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesDirectoryReaderWrapper.java
@@ -27,7 +27,6 @@ import org.apache.lucene.document.Field;
 import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.MatchNoDocsQuery;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.IOUtils;
@@ -197,35 +196,4 @@ public class TestSoftDeletesDirectoryReaderWrapper extends LuceneTestCase {
     assertEquals(1, leafCalled.get());
     IOUtils.close(reader, writer, dir);
   }
-
-  public void testForceMergeDeletes() throws Exception {
-    Directory dir = newDirectory();
-    IndexWriterConfig config = newIndexWriterConfig().setSoftDeletesField("soft_delete");
-    config.setMergePolicy(newMergePolicy(random(), false)); // no mock MP it might not select segments for force merge
-    if (random().nextBoolean()) {
-      config.setMergePolicy(new SoftDeletesRetentionMergePolicy("soft_delete",
-          () -> new MatchNoDocsQuery(), config.getMergePolicy()));
-    }
-    IndexWriter writer = new IndexWriter(dir, config);
-    // The first segment includes d1 and d2
-    for (int i = 0; i < 2; i++) {
-      Document d = new Document();
-      d.add(new StringField("id", Integer.toString(i), Field.Store.YES));
-      writer.addDocument(d);
-    }
-    writer.flush();
-    // The second segment includes only the tombstone
-    Document tombstone = new Document();
-    tombstone.add(new NumericDocValuesField("soft_delete", 1));
-    writer.softUpdateDocument(new Term("id", "1"), tombstone, new NumericDocValuesField("soft_delete", 1));
-    // Internally, forceMergeDeletes will call flush to flush pending updates
-    // Thus, we will have two segments - both having soft-deleted documents.
-    // We expect any MP to merge these segments into one segment
-    // when calling forceMergeDeletes.
-    writer.forceMergeDeletes(true);
-    assertEquals(1, writer.maxDoc());
-    assertEquals(1, writer.segmentInfos.asList().size());
-    writer.close();
-    dir.close();
-  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/330fd18f/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesRetentionMergePolicy.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesRetentionMergePolicy.java b/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesRetentionMergePolicy.java
index 061d006..b868a2e 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesRetentionMergePolicy.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesRetentionMergePolicy.java
@@ -71,12 +71,12 @@ public class TestSoftDeletesRetentionMergePolicy extends LuceneTestCase {
     {
       assertEquals(2, reader.leaves().size());
       final SegmentReader segmentReader = (SegmentReader) reader.leaves().get(0).reader();
-      assertTrue(policy.keepFullyDeletedSegment(segmentReader));
+      assertTrue(policy.keepFullyDeletedSegment(() -> segmentReader));
       assertEquals(0, policy.numDeletesToMerge(segmentReader.getSegmentInfo(), 0, () -> segmentReader));
     }
     {
       SegmentReader segmentReader = (SegmentReader) reader.leaves().get(1).reader();
-      assertTrue(policy.keepFullyDeletedSegment(segmentReader));
+      assertTrue(policy.keepFullyDeletedSegment(() -> segmentReader));
       assertEquals(0, policy.numDeletesToMerge(segmentReader.getSegmentInfo(), 0, () -> segmentReader));
       writer.forceMerge(1);
       reader.close();
@@ -86,7 +86,7 @@ public class TestSoftDeletesRetentionMergePolicy extends LuceneTestCase {
       assertEquals(1, reader.leaves().size());
       SegmentReader segmentReader = (SegmentReader) reader.leaves().get(0).reader();
       assertEquals(2, reader.maxDoc());
-      assertTrue(policy.keepFullyDeletedSegment(segmentReader));
+      assertTrue(policy.keepFullyDeletedSegment(() -> segmentReader));
       assertEquals(0, policy.numDeletesToMerge(segmentReader.getSegmentInfo(), 0, () -> segmentReader));
     }
     writer.forceMerge(1); // make sure we don't merge this
@@ -114,10 +114,9 @@ public class TestSoftDeletesRetentionMergePolicy extends LuceneTestCase {
     writer.addDocument(doc);
     DirectoryReader reader = writer.getReader();
     assertEquals(1, reader.leaves().size());
-    SegmentReader segmentReader = (SegmentReader) reader.leaves().get(0).reader();
     MergePolicy policy = new SoftDeletesRetentionMergePolicy("soft_delete",
         () -> new DocValuesFieldExistsQuery("keep_around"), NoMergePolicy.INSTANCE);
-    assertFalse(policy.keepFullyDeletedSegment(segmentReader));
+    assertFalse(policy.keepFullyDeletedSegment(() -> (SegmentReader) reader.leaves().get(0).reader()));
     reader.close();
 
     doc = new Document();
@@ -126,15 +125,13 @@ public class TestSoftDeletesRetentionMergePolicy extends LuceneTestCase {
     doc.add(new NumericDocValuesField("soft_delete", 1));
     writer.addDocument(doc);
 
-    reader = writer.getReader();
-    assertEquals(2, reader.leaves().size());
-    segmentReader = (SegmentReader) reader.leaves().get(0).reader();
-    assertFalse(policy.keepFullyDeletedSegment(segmentReader));
+    DirectoryReader reader1 = writer.getReader();
+    assertEquals(2, reader1.leaves().size());
+    assertFalse(policy.keepFullyDeletedSegment(() -> (SegmentReader) reader1.leaves().get(0).reader()));
 
-    segmentReader = (SegmentReader) reader.leaves().get(1).reader();
-    assertTrue(policy.keepFullyDeletedSegment(segmentReader));
+    assertTrue(policy.keepFullyDeletedSegment(() -> (SegmentReader) reader1.leaves().get(1).reader()));
 
-    IOUtils.close(reader, writer, dir);
+    IOUtils.close(reader1, writer, dir);
   }
 
   public void testFieldBasedRetention() throws IOException {
@@ -365,4 +362,84 @@ public class TestSoftDeletesRetentionMergePolicy extends LuceneTestCase {
     IOUtils.close(reader, writer, dir);
   }
 
+  public void testForceMergeDeletes() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig config = newIndexWriterConfig().setSoftDeletesField("soft_delete");
+    config.setMergePolicy(newMergePolicy(random(), false)); // no mock MP it might not select segments for force merge
+    if (random().nextBoolean()) {
+      config.setMergePolicy(new SoftDeletesRetentionMergePolicy("soft_delete",
+          () -> new MatchNoDocsQuery(), config.getMergePolicy()));
+    }
+    IndexWriter writer = new IndexWriter(dir, config);
+    // The first segment includes d1 and d2
+    for (int i = 0; i < 2; i++) {
+      Document d = new Document();
+      d.add(new StringField("id", Integer.toString(i), Field.Store.YES));
+      writer.addDocument(d);
+    }
+    writer.flush();
+    // The second segment includes only the tombstone
+    Document tombstone = new Document();
+    tombstone.add(new NumericDocValuesField("soft_delete", 1));
+    writer.softUpdateDocument(new Term("id", "1"), tombstone, new NumericDocValuesField("soft_delete", 1));
+    // Internally, forceMergeDeletes will call flush to flush pending updates
+    // Thus, we will have two segments - both having soft-deleted documents.
+    // We expect any MP to merge these segments into one segment
+    // when calling forceMergeDeletes.
+    writer.forceMergeDeletes(true);
+    assertEquals(1, writer.maxDoc());
+    assertEquals(1, writer.segmentInfos.asList().size());
+    writer.close();
+    dir.close();
+  }
+
+  public void testDropFullySoftDeletedSegment() throws Exception {
+    Directory dir = newDirectory();
+    String softDelete = random().nextBoolean() ? null : "soft_delete";
+    IndexWriterConfig config = newIndexWriterConfig().setSoftDeletesField(softDelete);
+    config.setMergePolicy(newMergePolicy(random(), true));
+    if (softDelete != null && random().nextBoolean()) {
+      config.setMergePolicy(new SoftDeletesRetentionMergePolicy(softDelete,
+          () -> new MatchNoDocsQuery(), config.getMergePolicy()));
+    }
+    IndexWriter writer = new IndexWriter(dir, config);
+    for (int i = 0; i < 2; i++) {
+      Document d = new Document();
+      d.add(new StringField("id", Integer.toString(i), Field.Store.YES));
+      writer.addDocument(d);
+    }
+    writer.flush();
+    assertEquals(1, writer.segmentInfos.asList().size());
+
+    if (softDelete != null) {
+      // the newly created segment should be dropped as it is fully deleted (i.e. only contains deleted docs).
+      if (random().nextBoolean()) {
+        Document tombstone = new Document();
+        tombstone.add(new NumericDocValuesField(softDelete, 1));
+        writer.softUpdateDocument(new Term("id", "1"), tombstone, new NumericDocValuesField(softDelete, 1));
+      } else {
+        Document doc = new Document();
+        doc.add(new StringField("id", Integer.toString(1), Field.Store.YES));
+        if (random().nextBoolean()) {
+          writer.softUpdateDocument(new Term("id", "1"), doc, new NumericDocValuesField(softDelete, 1));
+        } else {
+          writer.addDocument(doc);
+        }
+        writer.updateDocValues(new Term("id", "1"), new NumericDocValuesField(softDelete, 1));
+      }
+    } else {
+      Document d = new Document();
+      d.add(new StringField("id", "1", Field.Store.YES));
+      writer.addDocument(d);
+      writer.deleteDocuments(new Term("id", "1"));
+    }
+    writer.commit();
+    IndexReader reader = writer.getReader();
+    assertEquals(reader.numDocs(), 1);
+    reader.close();
+    assertEquals(1, writer.segmentInfos.asList().size());
+
+    writer.close();
+    dir.close();
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/330fd18f/solr/core/src/test/org/apache/solr/handler/admin/SegmentsInfoRequestHandlerTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/admin/SegmentsInfoRequestHandlerTest.java b/solr/core/src/test/org/apache/solr/handler/admin/SegmentsInfoRequestHandlerTest.java
index c501f5f..3173c12 100644
--- a/solr/core/src/test/org/apache/solr/handler/admin/SegmentsInfoRequestHandlerTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/admin/SegmentsInfoRequestHandlerTest.java
@@ -16,7 +16,6 @@
  */
 package org.apache.solr.handler.admin;
 
-import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.Version;
 import org.apache.solr.index.LogDocMergePolicyFactory;
 import org.apache.solr.SolrTestCaseJ4;
@@ -27,7 +26,6 @@ import org.junit.Test;
 /**
  * Tests for SegmentsInfoRequestHandler. Plugin entry, returning data of created segment.
  */
-@LuceneTestCase.AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/LUCENE-8253")
 public class SegmentsInfoRequestHandlerTest extends SolrTestCaseJ4 {
   private static final int DOC_COUNT = 5;