You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by si...@apache.org on 2020/04/10 11:09:31 UTC

[lucene-solr] branch branch_8x updated (17a1204 -> 29c7f07)

This is an automated email from the ASF dual-hosted git repository.

simonw pushed a change to branch branch_8x
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git.


    from 17a1204  Fix typo in SolrRequestHandler's javadocs.
     new e0d8ad8  LUCENE-9298: Improve RAM accounting in BufferedUpdates when deleted doc IDs and terms are cleared (#1389)
     new 29c7f07  LUCENE-9309: Wait for #addIndexes merges when aborting merges (#1418)

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 lucene/CHANGES.txt                                 |  4 ++
 .../org/apache/lucene/index/BufferedUpdates.java   | 11 ++--
 .../java/org/apache/lucene/index/IndexWriter.java  | 32 ++++++----
 .../org/apache/lucene/index/SegmentMerger.java     |  2 +-
 .../apache/lucene/index/TestBufferedUpdates.java   | 71 ++++++++++++++++++++++
 5 files changed, 102 insertions(+), 18 deletions(-)
 create mode 100644 lucene/core/src/test/org/apache/lucene/index/TestBufferedUpdates.java


[lucene-solr] 01/02: LUCENE-9298: Improve RAM accounting in BufferedUpdates when deleted doc IDs and terms are cleared (#1389)

Posted by si...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

simonw pushed a commit to branch branch_8x
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git

commit e0d8ad813c06df030dd87ba8b7f8e598b90de2fb
Author: YuBinglei <br...@qq.com>
AuthorDate: Fri Apr 10 18:30:47 2020 +0800

    LUCENE-9298: Improve RAM accounting in BufferedUpdates when deleted doc IDs and terms are cleared (#1389)
---
 lucene/CHANGES.txt                                 |  2 +
 .../org/apache/lucene/index/BufferedUpdates.java   | 11 ++--
 .../apache/lucene/index/TestBufferedUpdates.java   | 71 ++++++++++++++++++++++
 3 files changed, 80 insertions(+), 4 deletions(-)

diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 0d6dde3..3c1987a 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -147,6 +147,8 @@ Improvements
 * LUCENE-9171: QueryBuilder can now use BoostAttributes on input token streams to selectively
   boost particular terms or synonyms in parsed queries. (Alessandro Benedetti, Alan Woodward)
 
+* LUCENE-9298: Improve RAM accounting in BufferedUpdates when deleted doc IDs and terms are cleared. (Yu Binglei, Simon Willnauer)
+
 Optimizations
 ---------------------
 
diff --git a/lucene/core/src/java/org/apache/lucene/index/BufferedUpdates.java b/lucene/core/src/java/org/apache/lucene/index/BufferedUpdates.java
index d8317f8..ab826d0 100644
--- a/lucene/core/src/java/org/apache/lucene/index/BufferedUpdates.java
+++ b/lucene/core/src/java/org/apache/lucene/index/BufferedUpdates.java
@@ -80,6 +80,7 @@ class BufferedUpdates implements Accountable {
 
   private final Counter bytesUsed = Counter.newCounter(true);
   final Counter fieldUpdatesBytesUsed = Counter.newCounter(true);
+  private final Counter termsBytesUsed = Counter.newCounter(true);
 
   private final static boolean VERBOSE_DELETES = false;
 
@@ -151,7 +152,7 @@ class BufferedUpdates implements Accountable {
     // is done to respect IndexWriterConfig.setMaxBufferedDeleteTerms.
     numTermDeletes.incrementAndGet();
     if (current == null) {
-      bytesUsed.addAndGet(BYTES_PER_DEL_TERM + term.bytes.length + (Character.BYTES * term.field().length()));
+      termsBytesUsed.addAndGet(BYTES_PER_DEL_TERM + term.bytes.length + (Character.BYTES * term.field().length()));
     }
   }
  
@@ -176,8 +177,9 @@ class BufferedUpdates implements Accountable {
   }
 
   void clearDeleteTerms() {
-    deleteTerms.clear();
     numTermDeletes.set(0);
+    termsBytesUsed.addAndGet(-termsBytesUsed.get());
+    deleteTerms.clear();
   }
   
   void clear() {
@@ -189,6 +191,7 @@ class BufferedUpdates implements Accountable {
     fieldUpdates.clear();
     bytesUsed.addAndGet(-bytesUsed.get());
     fieldUpdatesBytesUsed.addAndGet(-fieldUpdatesBytesUsed.get());
+    termsBytesUsed.addAndGet(-termsBytesUsed.get());
   }
   
   boolean any() {
@@ -197,11 +200,11 @@ class BufferedUpdates implements Accountable {
 
   @Override
   public long ramBytesUsed() {
-    return bytesUsed.get() + fieldUpdatesBytesUsed.get();
+    return bytesUsed.get() + fieldUpdatesBytesUsed.get() + termsBytesUsed.get();
   }
 
   void clearDeletedDocIds() {
-    deleteDocIDs.clear();
     bytesUsed.addAndGet(-deleteDocIDs.size() * BufferedUpdates.BYTES_PER_DEL_DOCID);
+    deleteDocIDs.clear();
   }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestBufferedUpdates.java b/lucene/core/src/test/org/apache/lucene/index/TestBufferedUpdates.java
new file mode 100644
index 0000000..b240af6
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/index/TestBufferedUpdates.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.index;
+
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.util.LuceneTestCase;
+
+import java.util.stream.IntStream;
+
+/**
+ * Unit test for {@link BufferedUpdates}
+ */
+public class TestBufferedUpdates extends LuceneTestCase {
+  /**
+   * return a term that maybe duplicated with pre
+   */
+  private static Term mayDuplicate(int bound) {
+    boolean shouldDuplicated = bound > 3 && random().nextBoolean();
+    if (shouldDuplicated) {
+      return new Term("myField", String.valueOf(random().nextInt(bound)));
+    }
+    return new Term("myField", String.valueOf(bound));
+  }
+
+  public void testRamBytesUsed() {
+    BufferedUpdates bu = new BufferedUpdates("seg1");
+    assertEquals(bu.ramBytesUsed(), 0L);
+    assertFalse(bu.any());
+    IntStream.range(0, random().nextInt(atLeast(200))).forEach(id -> {
+      int reminder = random().nextInt(3);
+      if (reminder == 0) {
+        bu.addDocID(id);
+      } else if (reminder == 1) {
+        bu.addQuery(new TermQuery(mayDuplicate(id)), id);
+      } else if (reminder == 2) {
+        bu.addTerm((mayDuplicate(id)), id);
+      }
+    });
+    assertTrue("we have added tons of docIds, terms and queries", bu.any());
+
+    long totalUsed = bu.ramBytesUsed();
+    assertTrue(totalUsed > 0);
+
+    bu.clearDeletedDocIds();
+    assertTrue("only docIds are cleaned, buffer shouldn't be empty", bu.any());
+    assertTrue("docIds are cleaned, ram in used should decrease", totalUsed > bu.ramBytesUsed());
+    totalUsed = bu.ramBytesUsed();
+
+    bu.clearDeleteTerms();
+    assertTrue("only terms and docIds are cleaned, the queries are still in memory", bu.any());
+    assertTrue("terms are cleaned, ram in used should decrease", totalUsed > bu.ramBytesUsed());
+
+    bu.clear();
+    assertFalse(bu.any());
+    assertEquals(bu.ramBytesUsed(), 0L);
+  }
+}


[lucene-solr] 02/02: LUCENE-9309: Wait for #addIndexes merges when aborting merges (#1418)

Posted by si...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

simonw pushed a commit to branch branch_8x
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git

commit 29c7f07de2b5ccb65840ed11987d4b7750f1f070
Author: Simon Willnauer <si...@apache.org>
AuthorDate: Fri Apr 10 12:55:02 2020 +0200

    LUCENE-9309: Wait for #addIndexes merges when aborting merges (#1418)
    
    The SegmentMerger usage in IW#addIndexes(CodecReader...) might make changes
    to the Directory while the IW tries to clean-up files on rollback. This
    causes issues like FileNotFoundExceptions when IDF tries to remove temp files.
    This changes adds a waiting mechanism to the abortMerges method that, in addition
    to the running merges, also waits for merges in addIndices(CodecReader...)
---
 lucene/CHANGES.txt                                 |  2 ++
 .../java/org/apache/lucene/index/IndexWriter.java  | 32 +++++++++++++---------
 .../org/apache/lucene/index/SegmentMerger.java     |  2 +-
 3 files changed, 22 insertions(+), 14 deletions(-)

diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 3c1987a..1875ca0 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -57,6 +57,8 @@ Bug Fixes
 * LUCENE-9300: Fix corruption of the new gen field infos when doc values updates are applied on a segment created
   externally and added to the index with IndexWriter#addIndexes(Directory). (Jim Ferenczi, Adrien Grand)
 
+* LUCENE-9309: Wait for #addIndexes merges when aborting merges. (Simon Willnauer)
+
 Other
 ---------------------
 
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
index 23637a4..d9cf673 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
@@ -371,10 +371,6 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable,
         }
       }
     }
-
-    int availablePermits() {
-      return permits.availablePermits();
-    }
   }
 
   final IndexFileDeleter deleter;
@@ -397,11 +393,12 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable,
   HashSet<SegmentCommitInfo> mergingSegments = new HashSet<>();
 
   private final MergeScheduler mergeScheduler;
+  private Set<SegmentMerger> runningAddIndexesMerges = new HashSet<>();
   private LinkedList<MergePolicy.OneMerge> pendingMerges = new LinkedList<>();
   private Set<MergePolicy.OneMerge> runningMerges = new HashSet<>();
   private List<MergePolicy.OneMerge> mergeExceptions = new ArrayList<>();
   private long mergeGen;
-  private boolean stopMerges;
+  private boolean stopMerges; // TODO make sure this is only changed once and never set back to false
   private boolean didMessageState;
 
   final AtomicInteger flushCount = new AtomicInteger();
@@ -2302,6 +2299,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable,
     }
     
     try {
+      stopMerges = true; // this disables merges forever
       abortMerges();
       if (infoStream.isEnabled("IW")) {
         infoStream.message("IW", "rollback: done finish merges");
@@ -2464,8 +2462,6 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable,
             try {
               // Abort any running merges
               abortMerges();
-              // Let merges run again
-              stopMerges = false;
               adjustPendingNumDocs(-segmentInfos.totalMaxDoc());
               // Remove all segments
               segmentInfos.clear();
@@ -2508,9 +2504,6 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable,
    *  method: when you abort a long-running merge, you lose
    *  a lot of work that must later be redone. */
   private synchronized void abortMerges() {
-
-    stopMerges = true;
-
     // Abort all pending & running merges:
     for (final MergePolicy.OneMerge merge : pendingMerges) {
       if (infoStream.isEnabled("IW")) {
@@ -2531,10 +2524,11 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable,
     // We wait here to make all merges stop.  It should not
     // take very long because they periodically check if
     // they are aborted.
-    while (runningMerges.size() != 0) {
+    while (runningMerges.size() + runningAddIndexesMerges.size() != 0) {
 
       if (infoStream.isEnabled("IW")) {
-        infoStream.message("IW", "now wait for " + runningMerges.size() + " running merge/s to abort");
+        infoStream.message("IW", "now wait for " + runningMerges.size()
+            + " running merge/s to abort; currently running addIndexes: " + runningAddIndexesMerges.size());
       }
 
       doWait();
@@ -3010,7 +3004,19 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable,
         return docWriter.deleteQueue.getNextSequenceNumber();
       }
 
-      merger.merge();                // merge 'em
+      synchronized (this) {
+        ensureOpen();
+        assert stopMerges == false;
+        runningAddIndexesMerges.add(merger);
+      }
+      try {
+        merger.merge();  // merge 'em
+      } finally {
+        synchronized (this) {
+          runningAddIndexesMerges.remove(merger);
+          notifyAll();
+        }
+      }
       SegmentCommitInfo infoPerCommit = new SegmentCommitInfo(info, 0, numSoftDeleted, -1L, -1L, -1L);
 
       info.setFiles(new HashSet<>(trackingDir.getCreatedFiles()));
diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java b/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java
index a078020..4f55497 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java
@@ -208,7 +208,7 @@ final class SegmentMerger {
     }
   }
   
-  public void mergeFieldInfos() throws IOException {
+  public void mergeFieldInfos() {
     for (FieldInfos readerFieldInfos : mergeState.fieldInfos) {
       for (FieldInfo fi : readerFieldInfos) {
         fieldInfosBuilder.add(fi);