You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by si...@apache.org on 2018/04/23 08:29:33 UTC

lucene-solr:master: LUCENE-8260: Extract ReaderPool from IndexWriter

Repository: lucene-solr
Updated Branches:
  refs/heads/master 4136fe0e6 -> 897569295


LUCENE-8260: Extract ReaderPool from IndexWriter

ReaderPool plays a central role in the IndexWriter pooling NRT readers
and making sure we write buffered deletes and updates to disk. This class
used to be a non-static inner class accessing many aspects including locks
from the IndexWriter itself. This change moves the class outside of IW and
defines it's responsibility in a clear way with respect to locks etc. Now
IndexWriter doesn't need to share ReaderPool anymore and reacts on writes done
inside the pool by checkpointing internally. This also removes acquiring the IW
lock inside the reader pool which makes reasoning about concurrency difficult.

This change also add javadocs and dedicated tests for the ReaderPool class.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/89756929
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/89756929
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/89756929

Branch: refs/heads/master
Commit: 8975692953713923bd1cc67766cf92565183c2b8
Parents: 4136fe0
Author: Simon Willnauer <si...@apache.org>
Authored: Mon Apr 23 10:29:10 2018 +0200
Committer: GitHub <no...@github.com>
Committed: Mon Apr 23 10:29:10 2018 +0200

----------------------------------------------------------------------
 .../lucene/index/BufferedUpdatesStream.java     |  45 +-
 .../apache/lucene/index/DocumentsWriter.java    |  43 +-
 .../lucene/index/FrozenBufferedUpdates.java     |   8 +-
 .../org/apache/lucene/index/IndexWriter.java    | 489 ++++---------------
 .../org/apache/lucene/index/ReaderPool.java     | 390 +++++++++++++++
 .../apache/lucene/index/ReadersAndUpdates.java  |   6 +-
 .../lucene/index/StandardDirectoryReader.java   |   4 +-
 .../lucene/index/TestIndexWriterDelete.java     |  24 +
 .../org/apache/lucene/index/TestReaderPool.java | 223 +++++++++
 9 files changed, 783 insertions(+), 449 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/89756929/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java b/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java
index 32ee256..7a93cfd 100644
--- a/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java
+++ b/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java
@@ -258,7 +258,7 @@ class BufferedUpdatesStream implements Accountable {
   }
 
   /** Holds all per-segment internal state used while resolving deletions. */
-  public static final class SegmentState {
+  static final class SegmentState {
     final long delGen;
     final ReadersAndUpdates rld;
     final SegmentReader reader;
@@ -268,21 +268,13 @@ class BufferedUpdatesStream implements Accountable {
     PostingsEnum postingsEnum;
     BytesRef term;
 
-    public SegmentState(IndexWriter.ReaderPool pool, SegmentCommitInfo info) throws IOException {
-      rld = pool.get(info, true);
+    SegmentState(ReadersAndUpdates rld, SegmentCommitInfo info) throws IOException {
+      this.rld = rld;
       startDelCount = rld.getPendingDeleteCount();
       reader = rld.getReader(IOContext.READ);
       delGen = info.getBufferedDeletesGen();
     }
 
-    public void finish(IndexWriter.ReaderPool pool) throws IOException {
-      try {
-        rld.release(reader);
-      } finally {
-        pool.release(rld);
-      }
-    }
-
     @Override
     public String toString() {
       return "SegmentState(" + rld.info + ")";
@@ -290,23 +282,21 @@ class BufferedUpdatesStream implements Accountable {
   }
 
   /** Opens SegmentReader and inits SegmentState for each segment. */
-  public SegmentState[] openSegmentStates(IndexWriter.ReaderPool pool, List<SegmentCommitInfo> infos,
+  public SegmentState[] openSegmentStates(List<SegmentCommitInfo> infos,
                                           Set<SegmentCommitInfo> alreadySeenSegments, long delGen) throws IOException {
     List<SegmentState> segStates = new ArrayList<>();
     try {
       for (SegmentCommitInfo info : infos) {
         if (info.getBufferedDeletesGen() <= delGen && alreadySeenSegments.contains(info) == false) {
-          segStates.add(new SegmentState(pool, info));
+          segStates.add(new SegmentState(writer.getPooledInstance(info, true), info));
           alreadySeenSegments.add(info);
         }
       }
     } catch (Throwable t) {
-      for(SegmentState segState : segStates) {
-        try {
-          segState.finish(pool);
-        } catch (Throwable th) {
-          t.addSuppressed(th);
-        }
+      try {
+        finishSegmentStates(segStates);
+      } catch (Throwable t1) {
+        t.addSuppressed(t1);
       }
       throw t;
     }
@@ -314,8 +304,19 @@ class BufferedUpdatesStream implements Accountable {
     return segStates.toArray(new SegmentState[0]);
   }
 
+  private void finishSegmentStates(List<SegmentState> segStates) throws IOException {
+    IOUtils.applyToAll(segStates, s -> {
+      ReadersAndUpdates rld = s.rld;
+      try {
+        rld.release(s.reader);
+      } finally {
+        writer.release(s.rld);
+      }
+    });
+  }
+
   /** Close segment states previously opened with openSegmentStates. */
-  public ApplyDeletesResult closeSegmentStates(IndexWriter.ReaderPool pool, SegmentState[] segStates, boolean success) throws IOException {
+  public ApplyDeletesResult closeSegmentStates(SegmentState[] segStates, boolean success) throws IOException {
     List<SegmentCommitInfo> allDeleted = null;
     long totDelCount = 0;
     final List<SegmentState> segmentStates = Arrays.asList(segStates);
@@ -332,9 +333,9 @@ class BufferedUpdatesStream implements Accountable {
         }
       }
     }
-    IOUtils.applyToAll(segmentStates, s -> s.finish(pool));
+    finishSegmentStates(segmentStates);
     if (infoStream.isEnabled("BD")) {
-      infoStream.message("BD", "closeSegmentStates: " + totDelCount + " new deleted documents; pool " + updates.size() + " packets; bytesUsed=" + pool.ramBytesUsed());
+      infoStream.message("BD", "closeSegmentStates: " + totDelCount + " new deleted documents; pool " + updates.size() + " packets; bytesUsed=" + writer.getReaderPoolRamBytesUsed());
     }
 
     return new ApplyDeletesResult(totDelCount > 0, allDeleted);      

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/89756929/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java
index f848b2a..0042dab 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java
@@ -27,6 +27,7 @@ import java.util.Queue;
 import java.util.concurrent.ConcurrentLinkedQueue;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.ToLongFunction;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.index.DocumentsWriterFlushQueue.SegmentFlushTicket;
@@ -140,40 +141,26 @@ final class DocumentsWriter implements Closeable, Accountable {
     flushControl = new DocumentsWriterFlushControl(this, config, writer.bufferedUpdatesStream);
   }
   
-  synchronized long deleteQueries(final Query... queries) throws IOException {
-    // TODO why is this synchronized?
-    final DocumentsWriterDeleteQueue deleteQueue = this.deleteQueue;
-    long seqNo = deleteQueue.addDelete(queries);
-    flushControl.doOnDelete();
-    lastSeqNo = Math.max(lastSeqNo, seqNo);
-    if (applyAllDeletes(deleteQueue)) {
-      seqNo = -seqNo;
-    }
-    return seqNo;
+  long deleteQueries(final Query... queries) throws IOException {
+    return applyDeleteOrUpdate(q -> q.addDelete(queries));
   }
 
-  synchronized void setLastSeqNo(long seqNo) {
+  void setLastSeqNo(long seqNo) {
     lastSeqNo = seqNo;
   }
 
-  // TODO: we could check w/ FreqProxTermsWriter: if the
-  // term doesn't exist, don't bother buffering into the
-  // per-DWPT map (but still must go into the global map)
-  synchronized long deleteTerms(final Term... terms) throws IOException {
-    // TODO why is this synchronized?
-    final DocumentsWriterDeleteQueue deleteQueue = this.deleteQueue;
-    long seqNo = deleteQueue.addDelete(terms);
-    flushControl.doOnDelete();
-    lastSeqNo = Math.max(lastSeqNo, seqNo);
-    if (applyAllDeletes(deleteQueue)) {
-      seqNo = -seqNo;
-    }
-    return seqNo;
+  long deleteTerms(final Term... terms) throws IOException {
+    return applyDeleteOrUpdate(q -> q.addDelete(terms));
   }
 
-  synchronized long updateDocValues(DocValuesUpdate... updates) throws IOException {
+  long updateDocValues(DocValuesUpdate... updates) throws IOException {
+    return applyDeleteOrUpdate(q -> q.addDocValuesUpdates(updates));
+  }
+
+  private synchronized long applyDeleteOrUpdate(ToLongFunction<DocumentsWriterDeleteQueue> function) throws IOException {
+    // TODO why is this synchronized?
     final DocumentsWriterDeleteQueue deleteQueue = this.deleteQueue;
-    long seqNo = deleteQueue.addDocValuesUpdates(updates);
+    long seqNo = function.applyAsLong(deleteQueue);
     flushControl.doOnDelete();
     lastSeqNo = Math.max(lastSeqNo, seqNo);
     if (applyAllDeletes(deleteQueue)) {
@@ -182,10 +169,6 @@ final class DocumentsWriter implements Closeable, Accountable {
     return seqNo;
   }
   
-  DocumentsWriterDeleteQueue currentDeleteSession() {
-    return deleteQueue;
-  }
-
   /** If buffered deletes are using too much heap, resolve them and write disk and return true. */
   private boolean applyAllDeletes(DocumentsWriterDeleteQueue deleteQueue) throws IOException {
     if (flushControl.getAndResetApplyAllDeletes()) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/89756929/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedUpdates.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedUpdates.java b/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedUpdates.java
index fc268df..586afa7 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedUpdates.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedUpdates.java
@@ -297,7 +297,7 @@ class FrozenBufferedUpdates {
 
         // Must open while holding IW lock so that e.g. segments are not merged
         // away, dropped from 100% deletions, etc., before we can open the readers
-        segStates = writer.bufferedUpdatesStream.openSegmentStates(writer.readerPool, infos, seenSegments, delGen());
+        segStates = writer.bufferedUpdatesStream.openSegmentStates(infos, seenSegments, delGen());
 
         if (segStates.length == 0) {
 
@@ -328,8 +328,8 @@ class FrozenBufferedUpdates {
         success.set(true);
       }
 
-      // Since we jus resolved some more deletes/updates, now is a good time to write them:
-      writer.readerPool.writeSomeDocValuesUpdates();
+      // Since we just resolved some more deletes/updates, now is a good time to write them:
+      writer.writeSomeDocValuesUpdates();
 
       // It's OK to add this here, even if the while loop retries, because delCount only includes newly
       // deleted documents, on the segments we didn't already do in previous iterations:
@@ -399,7 +399,7 @@ class FrozenBufferedUpdates {
 
       BufferedUpdatesStream.ApplyDeletesResult result;
       try {
-        result = writer.bufferedUpdatesStream.closeSegmentStates(writer.readerPool, segStates, success);
+        result = writer.bufferedUpdatesStream.closeSegmentStates(segStates, success);
       } finally {
         // Matches the incRef we did above, but we must do the decRef after closing segment states else
         // IFD can't delete still-open files

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/89756929/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
index d6237e1..974f6c5 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
@@ -25,7 +25,6 @@ import java.util.Collections;
 import java.util.Date;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Locale;
@@ -324,24 +323,13 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
 
   final AtomicInteger flushDeletesCount = new AtomicInteger();
 
-  final ReaderPool readerPool = new ReaderPool();
+  private final ReaderPool readerPool;
   final BufferedUpdatesStream bufferedUpdatesStream;
 
   /** Counts how many merges have completed; this is used by {@link FrozenBufferedUpdates#apply}
    *  to handle concurrently apply deletes/updates with merges completing. */
   final AtomicLong mergeFinishedGen = new AtomicLong();
 
-  // This is a "write once" variable (like the organic dye
-  // on a DVD-R that may or may not be heated by a laser and
-  // then cooled to permanently record the event): it's
-  // false, until getReader() is called for the first time,
-  // at which point it's switched to true and never changes
-  // back to false.  Once this is true, we hold open and
-  // reuse SegmentReader instances internally for applying
-  // deletes, doing merges, and reopening near real-time
-  // readers.
-  private volatile boolean poolReaders;
-
   // The instance that was passed to the constructor. It is saved only in order
   // to allow users to query an IndexWriter settings.
   private final LiveIndexWriterConfig config;
@@ -434,7 +422,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
     // Do this up front before flushing so that the readers
     // obtained during this flush are pooled, the first time
     // this method is called:
-    poolReaders = true;
+    readerPool.enableReaderPooling();
     DirectoryReader r = null;
     doBeforeFlush();
     boolean anyChanges = false;
@@ -477,11 +465,15 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
 
             // TODO: we could instead just clone SIS and pull/incref readers in sync'd block, and then do this w/o IW's lock?
             // Must do this sync'd on IW to prevent a merge from completing at the last second and failing to write its DV updates:
-            readerPool.writeAllDocValuesUpdates();
+            if (readerPool.writeAllDocValuesUpdates()) {
+              checkpoint();
+            }
 
             if (writeAllDeletes) {
               // Must move the deletes to disk:
-              readerPool.commit(segmentInfos);
+              if (readerPool.commit(segmentInfos)) {
+                checkpointNoSIS();
+              }
             }
 
             // Prevent segmentInfos from changing while opening the
@@ -536,339 +528,62 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
     return docWriter.ramBytesUsed();
   }
 
-  /** Holds shared SegmentReader instances. IndexWriter uses
-   *  SegmentReaders for 1) applying deletes/DV updates, 2) doing
-   *  merges, 3) handing out a real-time reader.  This pool
-   *  reuses instances of the SegmentReaders in all these
-   *  places if it is in "near real-time mode" (getReader()
-   *  has been called on this instance). */
-
-  class ReaderPool implements Closeable {
-    
-    private final Map<SegmentCommitInfo,ReadersAndUpdates> readerMap = new HashMap<>();
-
-    /** Asserts this info still exists in IW's segment infos */
-    public synchronized boolean assertInfoIsLive(SegmentCommitInfo info) {
-      int idx = segmentInfos.indexOf(info);
-      assert idx != -1: "info=" + info + " isn't live";
-      assert segmentInfos.info(idx) == info: "info=" + info + " doesn't match live info in segmentInfos";
-      return true;
-    }
-
-    public synchronized boolean drop(SegmentCommitInfo info) throws IOException {
-      final ReadersAndUpdates rld = readerMap.get(info);
-      if (rld != null) {
-        assert info == rld.info;
-        readerMap.remove(info);
-        rld.dropReaders();
-        return true;
-      }
-      return false;
-    }
-
-    public synchronized long ramBytesUsed() {
-      long bytes = 0;
-      for (ReadersAndUpdates rld : readerMap.values()) {
-        bytes += rld.ramBytesUsed.get();
-      }
-      return bytes;
-    }
-
-    public synchronized boolean anyPendingDeletes() {
-      for(ReadersAndUpdates rld : readerMap.values()) {
-        if (rld.getPendingDeleteCount() != 0) {
-          return true;
-        }
-      }
-
-      return false;
-    }
-
-    public synchronized void release(ReadersAndUpdates rld) throws IOException {
-      release(rld, true);
-    }
-
-    public synchronized void release(ReadersAndUpdates rld, boolean assertInfoLive) throws IOException {
-
-      // Matches incRef in get:
-      rld.decRef();
-
-      if (rld.refCount() == 0) {
-        // This happens if the segment was just merged away, while a buffered deletes packet was still applying deletes/updates to it.
-        assert readerMap.containsKey(rld.info) == false: "seg=" + rld.info + " has refCount 0 but still unexpectedly exists in the reader pool";
-      } else {
-
-        // Pool still holds a ref:
-        assert rld.refCount() > 0: "refCount=" + rld.refCount() + " reader=" + rld.info;
-
-        if (!poolReaders && rld.refCount() == 1 && readerMap.containsKey(rld.info)) {
-          // This is the last ref to this RLD, and we're not
-          // pooling, so remove it:
-          if (rld.writeLiveDocs(directory)) {
-            // Make sure we only write del docs for a live segment:
-            assert assertInfoLive == false || assertInfoIsLive(rld.info);
-            // Must checkpoint because we just
-            // created new _X_N.del and field updates files;
-            // don't call IW.checkpoint because that also
-            // increments SIS.version, which we do not want to
-            // do here: it was done previously (after we
-            // invoked BDS.applyDeletes), whereas here all we
-            // did was move the state to disk:
-            checkpointNoSIS();
-          }
-          if (rld.writeFieldUpdates(directory, globalFieldNumberMap, bufferedUpdatesStream.getCompletedDelGen(), infoStream)) {
-            checkpointNoSIS();
-          }
-          if (rld.getNumDVUpdates() == 0) {
-            rld.dropReaders();
-            readerMap.remove(rld.info);
-          } else {
-            // We are forced to pool this segment until its deletes fully apply (no delGen gaps)
-          }
-        }
-      }
-    }
-    
-    @Override
-    public void close() throws IOException {
-      dropAll(false);
-    }
-
-    void writeAllDocValuesUpdates() throws IOException {
-      assert Thread.holdsLock(IndexWriter.this);
-      Collection<ReadersAndUpdates> copy;
-      synchronized (this) {
-        // this needs to be protected by the reader pool lock otherwise we hit ConcurrentModificationException
-        copy = new HashSet<>(readerMap.values());
-      }
-      boolean any = false;
-      for (ReadersAndUpdates rld : copy) {
-        any |= rld.writeFieldUpdates(directory, globalFieldNumberMap, bufferedUpdatesStream.getCompletedDelGen(), infoStream);
-      }
-      if (any) {
-        checkpoint();
-      }
-    }
-
-    void writeDocValuesUpdatesForMerge(List<SegmentCommitInfo> infos) throws IOException {
-      assert Thread.holdsLock(IndexWriter.this);
-      boolean any = false;
-      for (SegmentCommitInfo info : infos) {
-        ReadersAndUpdates rld = get(info, false);
-        if (rld != null) {
-          any |= rld.writeFieldUpdates(directory, globalFieldNumberMap, bufferedUpdatesStream.getCompletedDelGen(), infoStream);
-          rld.setIsMerging();
-        }
-      }
-      if (any) {
-        checkpoint();
-      }
-    }
-
-    private final AtomicBoolean writeDocValuesLock = new AtomicBoolean();
-
-    void writeSomeDocValuesUpdates() throws IOException {
+  final long getReaderPoolRamBytesUsed() {
+    return readerPool.ramBytesUsed();
+  }
 
-      assert Thread.holdsLock(IndexWriter.this) == false;
+  private final AtomicBoolean writeDocValuesLock = new AtomicBoolean();
 
-      if (writeDocValuesLock.compareAndSet(false, true)) {
-        try {
+  void writeSomeDocValuesUpdates() throws IOException {
+    if (writeDocValuesLock.compareAndSet(false, true)) {
+      try {
+        final double ramBufferSizeMB = config.getRAMBufferSizeMB();
+        // If the reader pool is > 50% of our IW buffer, then write the updates:
+        if (ramBufferSizeMB != IndexWriterConfig.DISABLE_AUTO_FLUSH) {
+          long startNS = System.nanoTime();
+
+          long ramBytesUsed = getReaderPoolRamBytesUsed();
+          if (ramBytesUsed > 0.5 * ramBufferSizeMB * 1024 * 1024) {
+            if (infoStream.isEnabled("BD")) {
+              infoStream.message("BD", String.format(Locale.ROOT, "now write some pending DV updates: %.2f MB used vs IWC Buffer %.2f MB",
+                  ramBytesUsed/1024./1024., ramBufferSizeMB));
+            }
 
-          LiveIndexWriterConfig config = getConfig();
-          double mb = config.getRAMBufferSizeMB();
-          // If the reader pool is > 50% of our IW buffer, then write the updates:
-          if (mb != IndexWriterConfig.DISABLE_AUTO_FLUSH) {
-            long startNS = System.nanoTime();
-            
-            long ramBytesUsed = ramBytesUsed();
-            if (ramBytesUsed > 0.5 * mb * 1024 * 1024) {
-              if (infoStream.isEnabled("BD")) {
-                infoStream.message("BD", String.format(Locale.ROOT, "now write some pending DV updates: %.2f MB used vs IWC Buffer %.2f MB",
-                                                       ramBytesUsed/1024./1024., mb));
-              }
-          
-              // Sort by largest ramBytesUsed:
-              PriorityQueue<ReadersAndUpdates> queue = new PriorityQueue<>(readerMap.size(), (a, b) -> Long.compare(b.ramBytesUsed.get(), a.ramBytesUsed.get()));
-              synchronized (this) {
-                for (ReadersAndUpdates rld : readerMap.values()) {
-                  queue.add(rld);
-                }
+            // Sort by largest ramBytesUsed:
+            PriorityQueue<ReadersAndUpdates> queue = readerPool.getReadersByRam();
+            int count = 0;
+            while (ramBytesUsed > 0.5 * ramBufferSizeMB * 1024 * 1024) {
+              ReadersAndUpdates rld = queue.poll();
+              if (rld == null) {
+                break;
               }
 
-              int count = 0;
-              while (ramBytesUsed > 0.5 * mb * 1024 * 1024) {
-                ReadersAndUpdates rld = queue.poll();
-                if (rld == null) {
-                  break;
-                }
+              // We need to do before/after because not all RAM in this RAU is used by DV updates, and
+              // not all of those bytes can be written here:
+              long bytesUsedBefore = rld.ramBytesUsed.get();
 
-                // We need to do before/after because not all RAM in this RAU is used by DV updates, and
-                // not all of those bytes can be written here:
-                long bytesUsedBefore = rld.ramBytesUsed.get();
-
-                // Only acquire IW lock on each write, since this is a time consuming operation.  This way
-                // other threads get a chance to run in between our writes.
-                synchronized (IndexWriter.this) {
-                  if (rld.writeFieldUpdates(directory, globalFieldNumberMap, bufferedUpdatesStream.getCompletedDelGen(), infoStream)) {
-                    checkpointNoSIS();
-                  }
+              // Only acquire IW lock on each write, since this is a time consuming operation.  This way
+              // other threads get a chance to run in between our writes.
+              synchronized (this) {
+                if (rld.writeFieldUpdates(directory, globalFieldNumberMap, bufferedUpdatesStream.getCompletedDelGen(), infoStream)) {
+                  checkpointNoSIS();
                 }
-                long bytesUsedAfter = rld.ramBytesUsed.get();
-                ramBytesUsed -= bytesUsedBefore - bytesUsedAfter;
-                count++;
-              }
-
-              if (infoStream.isEnabled("BD")) {
-                infoStream.message("BD", String.format(Locale.ROOT, "done write some DV updates for %d segments: now %.2f MB used vs IWC Buffer %.2f MB; took %.2f sec",
-                                                       count, ramBytesUsed()/1024./1024., mb, ((System.nanoTime() - startNS)/1000000000.)));
               }
+              long bytesUsedAfter = rld.ramBytesUsed.get();
+              ramBytesUsed -= bytesUsedBefore - bytesUsedAfter;
+              count++;
             }
-          }
-        } finally {
-          writeDocValuesLock.set(false);
-        }
-      }
-    }
 
-    /** Remove all our references to readers, and commits
-     *  any pending changes. */
-    synchronized void dropAll(boolean doSave) throws IOException {
-      Throwable priorE = null;
-      final Iterator<Map.Entry<SegmentCommitInfo,ReadersAndUpdates>> it = readerMap.entrySet().iterator();
-      while(it.hasNext()) {
-        final ReadersAndUpdates rld = it.next().getValue();
-        try {
-          if (doSave && rld.writeLiveDocs(directory)) {
-            // Make sure we only write del docs and field updates for a live segment:
-            assert assertInfoIsLive(rld.info);
-            // Must checkpoint because we just
-            // created new _X_N.del and field updates files;
-            // don't call IW.checkpoint because that also
-            // increments SIS.version, which we do not want to
-            // do here: it was done previously (after we
-            // invoked BDS.applyDeletes), whereas here all we
-            // did was move the state to disk:
-            checkpointNoSIS();
-          }
-        } catch (Throwable t) {
-          priorE = IOUtils.useOrSuppress(priorE, t);
-          if (doSave) {
-            throw t;
-          }
-        }
-
-        // Important to remove as-we-go, not with .clear()
-        // in the end, in case we hit an exception;
-        // otherwise we could over-decref if close() is
-        // called again:
-        it.remove();
-
-        // NOTE: it is allowed that these decRefs do not
-        // actually close the SRs; this happens when a
-        // near real-time reader is kept open after the
-        // IndexWriter instance is closed:
-        try {
-          rld.dropReaders();
-        } catch (Throwable t) {
-          priorE = IOUtils.useOrSuppress(priorE, t);
-          if (doSave) {
-            throw t;
-          }
-        }
-      }
-      assert readerMap.size() == 0;
-      if (priorE != null) {
-        throw IOUtils.rethrowAlways(priorE);
-      }
-    }
-
-    /**
-     * Commit live docs changes for the segment readers for
-     * the provided infos.
-     *
-     * @throws IOException If there is a low-level I/O error
-     */
-    public synchronized void commit(SegmentInfos infos) throws IOException {
-      for (SegmentCommitInfo info : infos) {
-        final ReadersAndUpdates rld = readerMap.get(info);
-        if (rld != null) {
-          assert rld.info == info;
-          boolean changed = rld.writeLiveDocs(directory);
-          changed |= rld.writeFieldUpdates(directory, globalFieldNumberMap, bufferedUpdatesStream.getCompletedDelGen(), infoStream);
-
-          if (changed) {
-            // Make sure we only write del docs for a live segment:
-            assert assertInfoIsLive(info);
-
-            // Must checkpoint because we just
-            // created new _X_N.del and field updates files;
-            // don't call IW.checkpoint because that also
-            // increments SIS.version, which we do not want to
-            // do here: it was done previously (after we
-            // invoked BDS.applyDeletes), whereas here all we
-            // did was move the state to disk:
-            checkpointNoSIS();
+            if (infoStream.isEnabled("BD")) {
+              infoStream.message("BD", String.format(Locale.ROOT, "done write some DV updates for %d segments: now %.2f MB used vs IWC Buffer %.2f MB; took %.2f sec",
+                  count, getReaderPoolRamBytesUsed()/1024./1024., ramBufferSizeMB, ((System.nanoTime() - startNS)/1000000000.)));
+            }
           }
-
         }
+      } finally {
+        writeDocValuesLock.set(false);
       }
     }
-
-    public synchronized boolean anyChanges() {
-      for (ReadersAndUpdates rld : readerMap.values()) {
-        // NOTE: we don't check for pending deletes because deletes carry over in RAM to NRT readers
-        if (rld.getNumDVUpdates() != 0) {
-          return true;
-        }
-      }
-
-      return false;
-    }
-
-    /**
-     * Obtain a ReadersAndLiveDocs instance from the
-     * readerPool.  If create is true, you must later call
-     * {@link #release(ReadersAndUpdates)}.
-     */
-    public synchronized ReadersAndUpdates get(SegmentCommitInfo info, boolean create) {
-
-      // Make sure no new readers can be opened if another thread just closed us:
-      ensureOpen(false);
-
-      assert info.info.dir == directoryOrig: "info.dir=" + info.info.dir + " vs " + directoryOrig;
-
-      ReadersAndUpdates rld = readerMap.get(info);
-      if (rld == null) {
-        if (create == false) {
-          return null;
-        }
-        rld = new ReadersAndUpdates(segmentInfos.getIndexCreatedVersionMajor(), info, newPendingDeletes(info));
-        // Steal initial reference:
-        readerMap.put(info, rld);
-      } else {
-        assert rld.info == info: "rld.info=" + rld.info + " info=" + info + " isLive?=" + assertInfoIsLive(rld.info) + " vs " + assertInfoIsLive(info);
-      }
-
-      if (create) {
-        // Return ref to caller:
-        rld.incRef();
-      }
-
-      assert noDups();
-
-      return rld;
-    }
-
-    // Make sure that every segment appears only once in the
-    // pool:
-    private boolean noDups() {
-      Set<String> seen = new HashSet<>();
-      for(SegmentCommitInfo info : readerMap.keySet()) {
-        assert !seen.contains(info.info.name);
-        seen.add(info.info.name);
-      }
-      return true;
-    }
   }
 
   /**
@@ -880,7 +595,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
     ensureOpen(false);
     int delCount = info.getDelCount();
 
-    final ReadersAndUpdates rld = readerPool.get(info, false);
+    final ReadersAndUpdates rld = getPooledInstance(info, false);
     if (rld != null) {
       delCount += rld.getPendingDeleteCount();
     }
@@ -965,7 +680,6 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
       codec = config.getCodec();
 
       bufferedUpdatesStream = new BufferedUpdatesStream(this);
-      poolReaders = config.getReaderPooling();
 
       OpenMode mode = config.getOpenMode();
       boolean create;
@@ -1021,7 +735,6 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
         }
         
         segmentInfos = sis;
-
         rollbackSegments = segmentInfos.createBackupSegmentInfos();
 
         // Record that we have a change (zero out all
@@ -1066,11 +779,6 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
         }
 
         rollbackSegments = lastCommit.createBackupSegmentInfos();
-
-        if (infoStream.isEnabled("IW")) {
-          infoStream.message("IW", "init from reader " + reader);
-          messageState();
-        }
       } else {
         // Init from either the latest commit point, or an explicit prior commit point:
 
@@ -1118,7 +826,11 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
       config.getFlushPolicy().init(config);
       docWriter = new DocumentsWriter(this, config, directoryOrig, directory);
       eventQueue = docWriter.eventQueue();
-
+      readerPool = new ReaderPool(directory, directoryOrig, segmentInfos, globalFieldNumberMap,
+          bufferedUpdatesStream::getCompletedDelGen, infoStream, conf.getSoftDeletesField(), reader);
+      if (config.getReaderPooling()) {
+        readerPool.enableReaderPooling();
+      }
       // Default deleter (for backwards compatibility) is
       // KeepOnlyLastCommitDeleter:
 
@@ -1142,26 +854,13 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
       }
 
       if (reader != null) {
-        // Pre-enroll all segment readers into the reader pool; this is necessary so
-        // any in-memory NRT live docs are correctly carried over, and so NRT readers
-        // pulled from this IW share the same segment reader:
-        List<LeafReaderContext> leaves = reader.leaves();
-        assert segmentInfos.size() == leaves.size();
-
-        for (int i=0;i<leaves.size();i++) {
-          LeafReaderContext leaf = leaves.get(i);
-          SegmentReader segReader = (SegmentReader) leaf.reader();
-          SegmentReader newReader = new SegmentReader(segmentInfos.info(i), segReader, segReader.getLiveDocs(), segReader.numDocs());
-          readerPool.readerMap.put(newReader.getSegmentInfo(), new ReadersAndUpdates(segmentInfos.getIndexCreatedVersionMajor(), newReader, newPendingDeletes(newReader, newReader.getSegmentInfo())));
-        }
-
         // We always assume we are carrying over incoming changes when opening from reader:
         segmentInfos.changed();
         changed();
       }
 
       if (infoStream.isEnabled("IW")) {
-        infoStream.message("IW", "init: create=" + create);
+        infoStream.message("IW", "init: create=" + create + " reader=" + reader);
         messageState();
       }
 
@@ -1638,7 +1337,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
     // cost:
 
     if (segmentInfos.indexOf(info) != -1) {
-      ReadersAndUpdates rld = readerPool.get(info, false);
+      ReadersAndUpdates rld = getPooledInstance(info, false);
       if (rld != null) {
         synchronized(bufferedUpdatesStream) {
           if (rld.delete(docID)) {
@@ -2478,8 +2177,6 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
             notifyAll();
           }
         }
-        // Don't bother saving any changes in our segmentInfos
-        readerPool.dropAll(false);
         final int totalMaxDoc = segmentInfos.totalMaxDoc();
         // Keep the same segmentInfos instance but replace all
         // of its SegmentInfo instances so IFD below will remove
@@ -2505,7 +2202,8 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
         }
 
         lastCommitChangeCount = changeCount.get();
-
+        // Don't bother saving any changes in our segmentInfos
+        readerPool.close();
         // Must set closed while inside same sync block where we call deleter.refresh, else concurrent threads may try to sneak a flush in,
         // after we leave this sync block and before we enter the sync block in the finally clause below that sets closed:
         closed = true;
@@ -2630,7 +2328,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
                * We will remove the files incrementally as we go...
                */
               // Don't bother saving any changes in our segmentInfos
-              readerPool.dropAll(false);
+              readerPool.dropAll();
               // Mark that the index has changed
               changeCount.incrementAndGet();
               segmentInfos.changed();
@@ -2810,7 +2508,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
       if (packet != null && packet.any() && sortMap != null) {
         // TODO: not great we do this heavyish op while holding IW's monitor lock,
         // but it only applies if you are using sorted indices and updating doc values:
-        ReadersAndUpdates rld = readerPool.get(newSegment, true);
+        ReadersAndUpdates rld = getPooledInstance(newSegment, true);
         rld.sortMap = sortMap;
         // DON't release this ReadersAndUpdates we need to stick with that sortMap
       }
@@ -2828,13 +2526,13 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
       if (hasInitialSoftDeleted || isFullyHardDeleted){
         // this operation is only really executed if needed an if soft-deletes are not configured it only be executed
         // if we deleted all docs in this newly flushed segment.
-        ReadersAndUpdates rld = readerPool.get(newSegment, true);
+        ReadersAndUpdates rld = getPooledInstance(newSegment, true);
         try {
           if (isFullyDeleted(rld)) {
             dropDeletedSegment(newSegment);
           }
         } finally {
-          readerPool.release(rld);
+          release(rld);
         }
       }
 
@@ -3381,7 +3079,9 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
             applyAllDeletesAndUpdates();
             synchronized(this) {
 
-              readerPool.commit(segmentInfos);
+              if (readerPool.commit(segmentInfos)) {
+                checkpointNoSIS();
+              }
 
               if (changeCount.get() != lastCommitChangeCount) {
                 // There are changes to commit, so we will write a new segments_N in startCommit.
@@ -3831,7 +3531,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
     long minGen = Long.MAX_VALUE;
 
     // Lazy init (only when we find a delete or update to carry over):
-    final ReadersAndUpdates mergedDeletesAndUpdates = readerPool.get(merge.info, true);
+    final ReadersAndUpdates mergedDeletesAndUpdates = getPooledInstance(merge.info, true);
     
     // field -> delGen -> dv field updates
     Map<String,Map<Long,DocValuesFieldUpdates>> mappedDVUpdates = new HashMap<>();
@@ -3844,7 +3544,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
       minGen = Math.min(info.getBufferedDeletesGen(), minGen);
       final int maxDoc = info.info.maxDoc();
       final Bits prevLiveDocs = merge.readers.get(i).getLiveDocs();
-      final ReadersAndUpdates rld = readerPool.get(info, false);
+      final ReadersAndUpdates rld = getPooledInstance(info, false);
       // We hold a ref, from when we opened the readers during mergeInit, so it better still be in the pool:
       assert rld != null: "seg=" + info.info.name;
       final Bits currentLiveDocs = rld.getLiveDocs();
@@ -4055,7 +3755,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
         // Pass false for assertInfoLive because the merged
         // segment is not yet live (only below do we commit it
         // to the segmentInfos):
-        readerPool.release(mergedUpdates, false);
+        release(mergedUpdates, false);
         success = true;
       } finally {
         if (!success) {
@@ -4350,7 +4050,9 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
     // Must move the pending doc values updates to disk now, else the newly merged segment will not see them:
     // TODO: we could fix merging to pull the merged DV iterator so we don't have to move these updates to disk first, i.e. just carry them
     // in memory:
-    readerPool.writeDocValuesUpdatesForMerge(merge.segments);
+    if (readerPool.writeDocValuesUpdatesForMerge(merge.segments)) {
+      checkpoint();
+    }
     
     // Bind a new segment name here so even with
     // ConcurrentMergePolicy we keep deterministic segment
@@ -4419,7 +4121,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
     final boolean drop = suppressExceptions == false;
     try (Closeable finalizer = merge::mergeFinished) {
       IOUtils.applyToAll(merge.readers, sr -> {
-        final ReadersAndUpdates rld = readerPool.get(sr.getSegmentInfo(), false);
+        final ReadersAndUpdates rld = getPooledInstance(sr.getSegmentInfo(), false);
         // We still hold a ref so it should not have been removed:
         assert rld != null;
         if (drop) {
@@ -4428,7 +4130,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
           rld.dropMergingUpdates();
         }
         rld.release(sr);
-        readerPool.release(rld);
+        release(rld);
         if (drop) {
           readerPool.drop(rld.info);
         }
@@ -4468,7 +4170,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
 
         // Hold onto the "live" reader; we will use this to
         // commit merged deletes
-        final ReadersAndUpdates rld = readerPool.get(info, true);
+        final ReadersAndUpdates rld = getPooledInstance(info, true);
         rld.setIsMerging();
 
         SegmentReader reader = rld.getReaderForMerge(context);
@@ -4644,15 +4346,15 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
       }
 
       final IndexReaderWarmer mergedSegmentWarmer = config.getMergedSegmentWarmer();
-      if (poolReaders && mergedSegmentWarmer != null) {
-        final ReadersAndUpdates rld = readerPool.get(merge.info, true);
+      if (readerPool.isReaderPoolingEnabled() && mergedSegmentWarmer != null) {
+        final ReadersAndUpdates rld = getPooledInstance(merge.info, true);
         final SegmentReader sr = rld.getReader(IOContext.READ);
         try {
           mergedSegmentWarmer.warm(sr);
         } finally {
           synchronized(this) {
             rld.release(sr);
-            readerPool.release(rld);
+            release(rld);
           }
         }
       }
@@ -4998,7 +4700,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
     boolean isCurrent = infos.getVersion() == segmentInfos.getVersion()
       && docWriter.anyChanges() == false
       && bufferedUpdatesStream.any() == false
-      && readerPool.anyChanges() == false;
+      && readerPool.anyDocValuesChanges() == false;
     if (infoStream.isEnabled("IW")) {
       if (isCurrent == false) {
         infoStream.message("IW", "nrtIsCurrent: infoVersion matches: " + (infos.getVersion() == segmentInfos.getVersion()) + "; DW changes: " + docWriter.anyChanges() + "; BD changes: "+ bufferedUpdatesStream.any());
@@ -5222,16 +4924,6 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
     return count;
   }
 
-  private PendingDeletes newPendingDeletes(SegmentCommitInfo info) {
-    String softDeletesField = config.getSoftDeletesField();
-    return softDeletesField == null ? new PendingDeletes(info) : new PendingSoftDeletes(softDeletesField, info);
-  }
-
-  private PendingDeletes newPendingDeletes(SegmentReader reader, SegmentCommitInfo info) {
-    String softDeletesField = config.getSoftDeletesField();
-    return softDeletesField == null ? new PendingDeletes(reader, info) : new PendingSoftDeletes(softDeletesField, reader, info);
-  }
-
   final boolean isFullyDeleted(ReadersAndUpdates readersAndUpdates) throws IOException {
     if (readersAndUpdates.isFullyDeleted()) {
       assert Thread.holdsLock(this);
@@ -5240,7 +4932,6 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
     return false;
   }
 
-
   /**
    * Returns the number of deletes a merge would claim back if the given segment is merged.
    * @see MergePolicy#numDeletesToMerge(SegmentCommitInfo, int, org.apache.lucene.util.IOSupplier)
@@ -5248,8 +4939,9 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
    * @lucene.experimental
    */
   public final int numDeletesToMerge(SegmentCommitInfo info) throws IOException {
+    ensureOpen(false);
     MergePolicy mergePolicy = config.getMergePolicy();
-    final ReadersAndUpdates rld = readerPool.get(info, false);
+    final ReadersAndUpdates rld = getPooledInstance(info, false);
     int numDeletesToMerge;
     if (rld != null) {
       numDeletesToMerge = rld.numDeletesToMerge(mergePolicy);
@@ -5260,6 +4952,23 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
     assert numDeletesToMerge <= info.info.maxDoc() :
         "numDeletesToMerge: " + numDeletesToMerge + " > maxDoc: " + info.info.maxDoc();
     return numDeletesToMerge;
+  }
 
+  void release(ReadersAndUpdates readersAndUpdates) throws IOException {
+    release(readersAndUpdates, true);
+  }
+
+  private void release(ReadersAndUpdates readersAndUpdates, boolean assertLiveInfo) throws IOException {
+    assert Thread.holdsLock(this);
+    if (readerPool.release(readersAndUpdates, assertLiveInfo)) {
+      // if we write anything here we have to hold the lock otherwise IDF will delete files underneath us
+      assert Thread.holdsLock(this);
+      checkpointNoSIS();
+    }
+  }
+
+  ReadersAndUpdates getPooledInstance(SegmentCommitInfo info, boolean create) {
+    ensureOpen(false);
+    return readerPool.get(info, create);
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/89756929/lucene/core/src/java/org/apache/lucene/index/ReaderPool.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/ReaderPool.java b/lucene/core/src/java/org/apache/lucene/index/ReaderPool.java
new file mode 100644
index 0000000..cecc310
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/index/ReaderPool.java
@@ -0,0 +1,390 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.index;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.PriorityQueue;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.function.LongSupplier;
+
+import org.apache.lucene.store.AlreadyClosedException;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.InfoStream;
+
+/** Holds shared SegmentReader instances. IndexWriter uses
+ *  SegmentReaders for 1) applying deletes/DV updates, 2) doing
+ *  merges, 3) handing out a real-time reader.  This pool
+ *  reuses instances of the SegmentReaders in all these
+ *  places if it is in "near real-time mode" (getReader()
+ *  has been called on this instance). */
+final class ReaderPool implements Closeable {
+
+  private final Map<SegmentCommitInfo,ReadersAndUpdates> readerMap = new HashMap<>();
+  private final Directory directory;
+  private final Directory originalDirectory;
+  private final FieldInfos.FieldNumbers fieldNumbers;
+  private final LongSupplier completedDelGenSupplier;
+  private final InfoStream infoStream;
+  private final SegmentInfos segmentInfos;
+  private final String softDeletesField;
+  // This is a "write once" variable (like the organic dye
+  // on a DVD-R that may or may not be heated by a laser and
+  // then cooled to permanently record the event): it's
+  // false, by default until {@link #enableReaderPooling()}
+  // is called for the first time,
+  // at which point it's switched to true and never changes
+  // back to false.  Once this is true, we hold open and
+  // reuse SegmentReader instances internally for applying
+  // deletes, doing merges, and reopening near real-time
+  // readers.
+  // in practice this should be called once the readers are likely
+  // to be needed and reused ie if IndexWriter#getReader is called.
+  private volatile boolean poolReaders;
+  private final AtomicBoolean closed = new AtomicBoolean(false);
+
+  ReaderPool(Directory directory, Directory originalDirectory, SegmentInfos segmentInfos,
+             FieldInfos.FieldNumbers fieldNumbers, LongSupplier completedDelGenSupplier, InfoStream infoStream,
+             String softDeletesField, StandardDirectoryReader reader) throws IOException {
+    this.directory = directory;
+    this.originalDirectory = originalDirectory;
+    this.segmentInfos = segmentInfos;
+    this.fieldNumbers = fieldNumbers;
+    this.completedDelGenSupplier = completedDelGenSupplier;
+    this.infoStream = infoStream;
+    this.softDeletesField = softDeletesField;
+    if (reader != null) {
+      // Pre-enroll all segment readers into the reader pool; this is necessary so
+      // any in-memory NRT live docs are correctly carried over, and so NRT readers
+      // pulled from this IW share the same segment reader:
+      List<LeafReaderContext> leaves = reader.leaves();
+      assert segmentInfos.size() == leaves.size();
+      for (int i=0;i<leaves.size();i++) {
+        LeafReaderContext leaf = leaves.get(i);
+        SegmentReader segReader = (SegmentReader) leaf.reader();
+        SegmentReader newReader = new SegmentReader(segmentInfos.info(i), segReader, segReader.getLiveDocs(),
+            segReader.numDocs());
+        readerMap.put(newReader.getSegmentInfo(), new ReadersAndUpdates(segmentInfos.getIndexCreatedVersionMajor(),
+            newReader, newPendingDeletes(newReader, newReader.getSegmentInfo())));
+      }
+    }
+  }
+
+  /** Asserts this info still exists in IW's segment infos */
+  synchronized boolean assertInfoIsLive(SegmentCommitInfo info) {
+    int idx = segmentInfos.indexOf(info);
+    assert idx != -1: "info=" + info + " isn't live";
+    assert segmentInfos.info(idx) == info: "info=" + info + " doesn't match live info in segmentInfos";
+    return true;
+  }
+
+  /**
+   * Drops reader for the given {@link SegmentCommitInfo} if it's pooled
+   * @return <code>true</code> if a reader is pooled
+   */
+  synchronized boolean drop(SegmentCommitInfo info) throws IOException {
+    final ReadersAndUpdates rld = readerMap.get(info);
+    if (rld != null) {
+      assert info == rld.info;
+      readerMap.remove(info);
+      rld.dropReaders();
+      return true;
+    }
+    return false;
+  }
+
+  /**
+   * Returns the sum of the ram used by all the buffered readers and updates in MB
+   */
+  synchronized long ramBytesUsed() {
+    long bytes = 0;
+    for (ReadersAndUpdates rld : readerMap.values()) {
+      bytes += rld.ramBytesUsed.get();
+    }
+    return bytes;
+  }
+
+  /**
+   * Returns <code>true</code> iff any of the buffered readers and updates has at least one pending delete
+   */
+  synchronized boolean anyPendingDeletes() {
+    for(ReadersAndUpdates rld : readerMap.values()) {
+      if (rld.getPendingDeleteCount() != 0) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  /**
+   * Enables reader pooling for this pool. This should be called once the readers in this pool are shared with an
+   * outside resource like an NRT reader. Once reader pooling is enabled a {@link ReadersAndUpdates} will be kept around
+   * in the reader pool on calling {@link #release(ReadersAndUpdates, boolean)} until the segment get dropped via calls
+   * to {@link #drop(SegmentCommitInfo)} or {@link #dropAll()} or {@link #close()}.
+   * Reader pooling is disabled upon construction but can't be disabled again once it's enabled.
+   */
+  void enableReaderPooling() {
+    poolReaders = true;
+  }
+
+  boolean isReaderPoolingEnabled() {
+    return poolReaders;
+  }
+
+  /**
+   * Releases the {@link ReadersAndUpdates}. This should only be called if the {@link #get(SegmentCommitInfo, boolean)}
+   * is called with the create paramter set to true.
+   * @return <code>true</code> if any files were written by this release call.
+   */
+  synchronized boolean release(ReadersAndUpdates rld, boolean assertInfoLive) throws IOException {
+    boolean changed = false;
+    // Matches incRef in get:
+    rld.decRef();
+
+    if (rld.refCount() == 0) {
+      // This happens if the segment was just merged away,
+      // while a buffered deletes packet was still applying deletes/updates to it.
+      assert readerMap.containsKey(rld.info) == false: "seg=" + rld.info
+          + " has refCount 0 but still unexpectedly exists in the reader pool";
+    } else {
+
+      // Pool still holds a ref:
+      assert rld.refCount() > 0: "refCount=" + rld.refCount() + " reader=" + rld.info;
+
+      if (poolReaders == false && rld.refCount() == 1 && readerMap.containsKey(rld.info)) {
+        // This is the last ref to this RLD, and we're not
+        // pooling, so remove it:
+        if (rld.writeLiveDocs(directory)) {
+          // Make sure we only write del docs for a live segment:
+          assert assertInfoLive == false || assertInfoIsLive(rld.info);
+          // Must checkpoint because we just
+          // created new _X_N.del and field updates files;
+          // don't call IW.checkpoint because that also
+          // increments SIS.version, which we do not want to
+          // do here: it was done previously (after we
+          // invoked BDS.applyDeletes), whereas here all we
+          // did was move the state to disk:
+          changed = true;
+        }
+        if (rld.writeFieldUpdates(directory, fieldNumbers, completedDelGenSupplier.getAsLong(), infoStream)) {
+          changed = true;
+        }
+        if (rld.getNumDVUpdates() == 0) {
+          rld.dropReaders();
+          readerMap.remove(rld.info);
+        } else {
+          // We are forced to pool this segment until its deletes fully apply (no delGen gaps)
+        }
+      }
+    }
+    return changed;
+  }
+
+  @Override
+  public synchronized void close() throws IOException {
+    if (closed.compareAndSet(false, true)) {
+      dropAll();
+    }
+  }
+
+  /**
+   * Writes all doc values updates to disk if there are any.
+   * @return <code>true</code> iff any files where written
+   */
+  boolean writeAllDocValuesUpdates() throws IOException {
+    Collection<ReadersAndUpdates> copy;
+    synchronized (this) {
+      // this needs to be protected by the reader pool lock otherwise we hit ConcurrentModificationException
+      copy = new HashSet<>(readerMap.values());
+    }
+    boolean any = false;
+    for (ReadersAndUpdates rld : copy) {
+      any |= rld.writeFieldUpdates(directory, fieldNumbers, completedDelGenSupplier.getAsLong(), infoStream);
+    }
+    return any;
+  }
+
+  /**
+   * Writes all doc values updates to disk if there are any.
+   * @return <code>true</code> iff any files where written
+   */
+  boolean writeDocValuesUpdatesForMerge(List<SegmentCommitInfo> infos) throws IOException {
+    boolean any = false;
+    for (SegmentCommitInfo info : infos) {
+      ReadersAndUpdates rld = get(info, false);
+      if (rld != null) {
+        any |= rld.writeFieldUpdates(directory, fieldNumbers, completedDelGenSupplier.getAsLong(), infoStream);
+        rld.setIsMerging();
+      }
+    }
+    return any;
+  }
+
+  PriorityQueue<ReadersAndUpdates> getReadersByRam() {
+    // Sort by largest ramBytesUsed:
+    PriorityQueue<ReadersAndUpdates> queue = new PriorityQueue<>(readerMap.size(),
+        (a, b) -> Long.compare(b.ramBytesUsed.get(), a.ramBytesUsed.get()));
+    synchronized (this) {
+      for (ReadersAndUpdates rld : readerMap.values()) {
+        queue.add(rld);
+      }
+    }
+    return queue;
+  }
+
+
+  /** Remove all our references to readers, and commits
+   *  any pending changes. */
+  synchronized void dropAll() throws IOException {
+    Throwable priorE = null;
+    final Iterator<Map.Entry<SegmentCommitInfo,ReadersAndUpdates>> it = readerMap.entrySet().iterator();
+    while(it.hasNext()) {
+      final ReadersAndUpdates rld = it.next().getValue();
+
+      // Important to remove as-we-go, not with .clear()
+      // in the end, in case we hit an exception;
+      // otherwise we could over-decref if close() is
+      // called again:
+      it.remove();
+
+      // NOTE: it is allowed that these decRefs do not
+      // actually close the SRs; this happens when a
+      // near real-time reader is kept open after the
+      // IndexWriter instance is closed:
+      try {
+        rld.dropReaders();
+      } catch (Throwable t) {
+        priorE = IOUtils.useOrSuppress(priorE, t);
+      }
+    }
+    assert readerMap.size() == 0;
+    if (priorE != null) {
+      throw IOUtils.rethrowAlways(priorE);
+    }
+  }
+
+  /**
+   * Commit live docs changes for the segment readers for
+   * the provided infos.
+   *
+   * @throws IOException If there is a low-level I/O error
+   */
+  synchronized boolean commit(SegmentInfos infos) throws IOException {
+    boolean atLeastOneChange = false;
+    for (SegmentCommitInfo info : infos) {
+      final ReadersAndUpdates rld = readerMap.get(info);
+      if (rld != null) {
+        assert rld.info == info;
+        boolean changed = rld.writeLiveDocs(directory);
+        changed |= rld.writeFieldUpdates(directory, fieldNumbers, completedDelGenSupplier.getAsLong(), infoStream);
+
+        if (changed) {
+          // Make sure we only write del docs for a live segment:
+          assert assertInfoIsLive(info);
+
+          // Must checkpoint because we just
+          // created new _X_N.del and field updates files;
+          // don't call IW.checkpoint because that also
+          // increments SIS.version, which we do not want to
+          // do here: it was done previously (after we
+          // invoked BDS.applyDeletes), whereas here all we
+          // did was move the state to disk:
+          atLeastOneChange = true;
+        }
+      }
+    }
+    return atLeastOneChange;
+  }
+
+  /**
+   * Returns <code>true</code> iff there are any buffered doc values updates. Otherwise <code>false</code>.
+   * @see #anyPendingDeletes()
+   */
+  synchronized boolean anyDocValuesChanges() {
+    for (ReadersAndUpdates rld : readerMap.values()) {
+      // NOTE: we don't check for pending deletes because deletes carry over in RAM to NRT readers
+      if (rld.getNumDVUpdates() != 0) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  /**
+   * Obtain a ReadersAndLiveDocs instance from the
+   * readerPool.  If create is true, you must later call
+   * {@link #release(ReadersAndUpdates, boolean)}.
+   */
+  synchronized ReadersAndUpdates get(SegmentCommitInfo info, boolean create) {
+    assert info.info.dir ==  originalDirectory: "info.dir=" + info.info.dir + " vs " + originalDirectory;
+    if (closed.get()) {
+      assert readerMap.isEmpty() : "Reader map is not empty: " + readerMap;
+      throw new AlreadyClosedException("ReaderPool is already closed");
+    }
+
+    ReadersAndUpdates rld = readerMap.get(info);
+    if (rld == null) {
+      if (create == false) {
+        return null;
+      }
+      rld = new ReadersAndUpdates(segmentInfos.getIndexCreatedVersionMajor(), info, newPendingDeletes(info));
+      // Steal initial reference:
+      readerMap.put(info, rld);
+    } else {
+      assert rld.info == info: "rld.info=" + rld.info + " info=" + info + " isLive?=" + assertInfoIsLive(rld.info)
+          + " vs " + assertInfoIsLive(info);
+    }
+
+    if (create) {
+      // Return ref to caller:
+      rld.incRef();
+    }
+
+    assert noDups();
+
+    return rld;
+  }
+
+  private PendingDeletes newPendingDeletes(SegmentCommitInfo info) {
+    return softDeletesField == null ? new PendingDeletes(info) : new PendingSoftDeletes(softDeletesField, info);
+  }
+
+  private PendingDeletes newPendingDeletes(SegmentReader reader, SegmentCommitInfo info) {
+    return softDeletesField == null ? new PendingDeletes(reader, info) :
+        new PendingSoftDeletes(softDeletesField, reader, info);
+  }
+
+  // Make sure that every segment appears only once in the
+  // pool:
+  private boolean noDups() {
+    Set<String> seen = new HashSet<>();
+    for(SegmentCommitInfo info : readerMap.keySet()) {
+      assert !seen.contains(info.info.name);
+      seen.add(info.info.name);
+    }
+    return true;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/89756929/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java b/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java
index 76a28e2..dd20910 100644
--- a/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java
+++ b/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java
@@ -750,7 +750,7 @@ final class ReadersAndUpdates {
     liveDocsSharedPending = true;
   }
 
-  synchronized public void setIsMerging() {
+  synchronized void setIsMerging() {
     // This ensures any newly resolved doc value updates while we are merging are
     // saved for re-applying after this segment is done merging:
     if (isMerging == false) {
@@ -759,6 +759,10 @@ final class ReadersAndUpdates {
     }
   }
 
+  synchronized boolean isMerging() {
+    return isMerging;
+  }
+
   /** Returns a reader for merge, with the latest doc values updates and deletions. */
   synchronized SegmentReader getReaderForMerge(IOContext context) throws IOException {
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/89756929/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java b/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java
index 63c6d95..3b1b72f 100644
--- a/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java
@@ -100,7 +100,7 @@ public final class StandardDirectoryReader extends DirectoryReader {
         // IndexWriter's segmentInfos:
         final SegmentCommitInfo info = infos.info(i);
         assert info.info.dir == dir;
-        final ReadersAndUpdates rld = writer.readerPool.get(info, true);
+        final ReadersAndUpdates rld = writer.getPooledInstance(info, true);
         try {
           final SegmentReader reader = rld.getReadOnlyClone(IOContext.READ);
           if (reader.numDocs() > 0 || writer.getConfig().mergePolicy.keepFullyDeletedSegment(() -> reader)) {
@@ -112,7 +112,7 @@ public final class StandardDirectoryReader extends DirectoryReader {
             segmentInfos.remove(infosUpto);
           }
         } finally {
-          writer.readerPool.release(rld);
+          writer.release(rld);
         }
       }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/89756929/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
index 8bc3f42..fe951f3 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
@@ -1207,7 +1207,9 @@ public class TestIndexWriterDelete extends LuceneTestCase {
     w = new IndexWriter(d, iwc);
     IndexReader r = DirectoryReader.open(w, false, false);
     assertTrue(w.tryDeleteDocument(r, 1) != -1);
+    assertFalse(((StandardDirectoryReader)r).isCurrent());
     assertTrue(w.tryDeleteDocument(r.leaves().get(0).reader(), 0) != -1);
+    assertFalse(((StandardDirectoryReader)r).isCurrent());
     r.close();
     w.close();
 
@@ -1218,6 +1220,28 @@ public class TestIndexWriterDelete extends LuceneTestCase {
     d.close();
   }
 
+  public void testNRTIsCurrentAfterDelete() throws Exception {
+    Directory d = newDirectory();
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    IndexWriter w = new IndexWriter(d, iwc);
+    Document doc = new Document();
+    w.addDocument(doc);
+    w.addDocument(doc);
+    w.addDocument(doc);
+    doc.add(new StringField("id", "1", Field.Store.YES));
+    w.addDocument(doc);
+    w.close();
+    iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
+    w = new IndexWriter(d, iwc);
+    IndexReader r = DirectoryReader.open(w, false, false);
+    w.deleteDocuments(new Term("id", "1"));
+    IndexReader r2 = DirectoryReader.open(w, true, true);
+    assertFalse(((StandardDirectoryReader)r).isCurrent());
+    assertTrue(((StandardDirectoryReader)r2).isCurrent());
+    IOUtils.close(r, r2, w, d);
+  }
+
   public void testOnlyDeletesTriggersMergeOnClose() throws Exception {
     Directory dir = newDirectory();
     IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/89756929/lucene/core/src/test/org/apache/lucene/index/TestReaderPool.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestReaderPool.java b/lucene/core/src/test/org/apache/lucene/index/TestReaderPool.java
new file mode 100644
index 0000000..29c5dd3
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/index/TestReaderPool.java
@@ -0,0 +1,223 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.index;
+
+import java.io.IOException;
+import java.util.Collections;
+
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NullInfoStream;
+
+public class TestReaderPool extends LuceneTestCase {
+
+  public void testDrop() throws IOException {
+    Directory directory = newDirectory();
+    FieldInfos.FieldNumbers fieldNumbers = buildIndex(directory);
+    StandardDirectoryReader reader = (StandardDirectoryReader) DirectoryReader.open(directory);
+    SegmentInfos segmentInfos = reader.segmentInfos.clone();
+
+    ReaderPool pool = new ReaderPool(directory, directory, segmentInfos, fieldNumbers, () -> 0l, null, null, null);
+    SegmentCommitInfo commitInfo = RandomPicks.randomFrom(random(), segmentInfos.asList());
+    ReadersAndUpdates readersAndUpdates = pool.get(commitInfo, true);
+    assertSame(readersAndUpdates, pool.get(commitInfo, false));
+    assertTrue(pool.drop(commitInfo));
+    if (random().nextBoolean()) {
+      assertFalse(pool.drop(commitInfo));
+    }
+    assertNull(pool.get(commitInfo, false));
+    pool.release(readersAndUpdates, random().nextBoolean());
+    IOUtils.close(pool, reader, directory);
+  }
+
+  public void testPoolReaders() throws IOException {
+    Directory directory = newDirectory();
+    FieldInfos.FieldNumbers fieldNumbers = buildIndex(directory);
+    StandardDirectoryReader reader = (StandardDirectoryReader) DirectoryReader.open(directory);
+    SegmentInfos segmentInfos = reader.segmentInfos.clone();
+
+    ReaderPool pool = new ReaderPool(directory, directory, segmentInfos, fieldNumbers, () -> 0l, null, null, null);
+    SegmentCommitInfo commitInfo = RandomPicks.randomFrom(random(), segmentInfos.asList());
+    assertFalse(pool.isReaderPoolingEnabled());
+    pool.release(pool.get(commitInfo, true), random().nextBoolean());
+    assertNull(pool.get(commitInfo, false));
+    // now start pooling
+    pool.enableReaderPooling();
+    assertTrue(pool.isReaderPoolingEnabled());
+    pool.release(pool.get(commitInfo, true), random().nextBoolean());
+    assertNotNull(pool.get(commitInfo, false));
+    assertSame(pool.get(commitInfo, false), pool.get(commitInfo, false));
+    pool.drop(commitInfo);
+    long ramBytesUsed = 0;
+    assertEquals(0, pool.ramBytesUsed());
+    for (SegmentCommitInfo info : segmentInfos) {
+      pool.release(pool.get(info, true), random().nextBoolean());
+      assertEquals(" used: " + ramBytesUsed + " actual: " + pool.ramBytesUsed(), 0, pool.ramBytesUsed());
+      ramBytesUsed = pool.ramBytesUsed();
+      assertSame(pool.get(info, false), pool.get(info, false));
+    }
+    assertNotSame(0, pool.ramBytesUsed());
+    pool.dropAll();
+    for (SegmentCommitInfo info : segmentInfos) {
+      assertNull(pool.get(info, false));
+    }
+    assertEquals(0, pool.ramBytesUsed());
+    IOUtils.close(pool, reader, directory);
+  }
+
+
+  public void testUpdate() throws IOException {
+    Directory directory = newDirectory();
+    FieldInfos.FieldNumbers fieldNumbers = buildIndex(directory);
+    StandardDirectoryReader reader = (StandardDirectoryReader) DirectoryReader.open(directory);
+    SegmentInfos segmentInfos = reader.segmentInfos.clone();
+    ReaderPool pool = new ReaderPool(directory, directory, segmentInfos, fieldNumbers, () -> 0l,
+        new NullInfoStream(), null, null);
+    int id = random().nextInt(10);
+    if (random().nextBoolean()) {
+      pool.enableReaderPooling();
+    }
+    for (SegmentCommitInfo commitInfo : segmentInfos) {
+      ReadersAndUpdates readersAndUpdates = pool.get(commitInfo, true);
+      SegmentReader readOnlyClone = readersAndUpdates.getReadOnlyClone(IOContext.READ);
+      PostingsEnum postings = readOnlyClone.postings(new Term("id", "" + id));
+      boolean expectUpdate = false;
+      int doc = -1;
+      if (postings != null && postings.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+        NumericDocValuesFieldUpdates number = new NumericDocValuesFieldUpdates(0, "number", commitInfo.info.maxDoc());
+        number.add(doc = postings.docID(), 1000l);
+        number.finish();
+        readersAndUpdates.addDVUpdate(number);
+        expectUpdate = true;
+        assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings.nextDoc());
+        assertTrue(pool.anyDocValuesChanges());
+      } else {
+        assertFalse(pool.anyDocValuesChanges());
+      }
+      readOnlyClone.close();
+      boolean writtenToDisk;
+      if (pool.isReaderPoolingEnabled()) {
+        if (random().nextBoolean()) {
+          writtenToDisk = pool.writeAllDocValuesUpdates();
+          assertFalse(readersAndUpdates.isMerging());
+        } else if (random().nextBoolean()) {
+          writtenToDisk = pool.commit(segmentInfos);
+          assertFalse(readersAndUpdates.isMerging());
+        } else {
+          writtenToDisk = pool.writeDocValuesUpdatesForMerge(Collections.singletonList(commitInfo));
+          assertTrue(readersAndUpdates.isMerging());
+        }
+        assertFalse(pool.release(readersAndUpdates, random().nextBoolean()));
+      } else {
+        if (random().nextBoolean()) {
+          writtenToDisk = pool.release(readersAndUpdates, random().nextBoolean());
+          assertFalse(readersAndUpdates.isMerging());
+        } else {
+          writtenToDisk = pool.writeDocValuesUpdatesForMerge(Collections.singletonList(commitInfo));
+          assertTrue(readersAndUpdates.isMerging());
+          assertFalse(pool.release(readersAndUpdates, random().nextBoolean()));
+        }
+      }
+      assertFalse(pool.anyDocValuesChanges());
+      assertEquals(expectUpdate, writtenToDisk);
+      if (expectUpdate) {
+        readersAndUpdates = pool.get(commitInfo, true);
+        SegmentReader updatedReader = readersAndUpdates.getReadOnlyClone(IOContext.READ);
+        assertNotSame(-1, doc);
+        NumericDocValues number = updatedReader.getNumericDocValues("number");
+        assertEquals(doc, number.advance(doc));
+        assertEquals(1000l, number.longValue());
+       readersAndUpdates.release(updatedReader);
+       assertFalse(pool.release(readersAndUpdates, random().nextBoolean()));
+      }
+    }
+    IOUtils.close(pool, reader, directory);
+  }
+
+  public void testDeletes() throws IOException {
+    Directory directory = newDirectory();
+    FieldInfos.FieldNumbers fieldNumbers = buildIndex(directory);
+    StandardDirectoryReader reader = (StandardDirectoryReader) DirectoryReader.open(directory);
+    SegmentInfos segmentInfos = reader.segmentInfos.clone();
+    ReaderPool pool = new ReaderPool(directory, directory, segmentInfos, fieldNumbers, () -> 0l,
+        new NullInfoStream(), null, null);
+    int id = random().nextInt(10);
+    if (random().nextBoolean()) {
+      pool.enableReaderPooling();
+    }
+    for (SegmentCommitInfo commitInfo : segmentInfos) {
+      ReadersAndUpdates readersAndUpdates = pool.get(commitInfo, true);
+      SegmentReader readOnlyClone = readersAndUpdates.getReadOnlyClone(IOContext.READ);
+      PostingsEnum postings = readOnlyClone.postings(new Term("id", "" + id));
+      boolean expectUpdate = false;
+      int doc = -1;
+      if (postings != null && postings.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+        readersAndUpdates.delete(doc = postings.docID());
+        expectUpdate = true;
+        assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings.nextDoc());
+        assertTrue(pool.anyPendingDeletes());
+      } else {
+        assertFalse(pool.anyPendingDeletes());
+      }
+      assertFalse(pool.anyDocValuesChanges()); // deletes are not accounted here
+      readOnlyClone.close();
+      boolean writtenToDisk;
+      if (pool.isReaderPoolingEnabled()) {
+        writtenToDisk = pool.commit(segmentInfos);
+        assertFalse(pool.release(readersAndUpdates, random().nextBoolean()));
+      } else {
+        writtenToDisk = pool.release(readersAndUpdates, random().nextBoolean());
+      }
+      assertFalse(pool.anyDocValuesChanges());
+      assertEquals(expectUpdate, writtenToDisk);
+      if (expectUpdate) {
+        readersAndUpdates = pool.get(commitInfo, true);
+        SegmentReader updatedReader = readersAndUpdates.getReadOnlyClone(IOContext.READ);
+        assertNotSame(-1, doc);
+        assertFalse(updatedReader.getLiveDocs().get(doc));
+        readersAndUpdates.release(updatedReader);
+        assertFalse(pool.release(readersAndUpdates, random().nextBoolean()));
+      }
+    }
+    IOUtils.close(pool, reader, directory);
+  }
+
+  private FieldInfos.FieldNumbers buildIndex(Directory directory) throws IOException {
+    IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig());
+    for (int i = 0; i < 10; i++) {
+      Document document = new Document();
+      document.add(new StringField("id", "" + i, Field.Store.YES));
+      document.add(new NumericDocValuesField("number", i));
+      writer.addDocument(document);
+      if (random().nextBoolean()) {
+        writer.flush();
+      }
+    }
+    writer.commit();
+    writer.close();
+    return writer.globalFieldNumberMap;
+  }
+}