You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by jp...@apache.org on 2020/09/14 16:15:17 UTC

[lucene-solr] branch branch_8x updated (72af208 -> cdfdc1e)

This is an automated email from the ASF dual-hosted git repository.

jpountz pushed a change to branch branch_8x
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git.


    from 72af208  SOLR-14725 update batchSize parameter docs for update() and delete() stream expressions (#1729)
     new 6cd3fa0  Make sure to test normal scorers with asserting wrappers. (#1834)
     new cdfdc1e  LUCENE-9510: Don't pull a merge instance when flushing stored fields out-of-order. (#1872)

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 lucene/CHANGES.txt                                     |  4 +++-
 .../lucene/index/SortingStoredFieldsConsumer.java      |  5 +++--
 .../org/apache/lucene/facet/TestDrillSideways.java     | 18 ++++++++++++------
 .../lucene/facet/range/TestRangeFacetCounts.java       |  5 +++--
 .../java/org/apache/lucene/search/AssertingWeight.java | 13 +++++++++++--
 5 files changed, 32 insertions(+), 13 deletions(-)


[lucene-solr] 02/02: LUCENE-9510: Don't pull a merge instance when flushing stored fields out-of-order. (#1872)

Posted by jp...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jpountz pushed a commit to branch branch_8x
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git

commit cdfdc1e0851478713b6f0e997bff3947cdaf98e9
Author: Adrien Grand <jp...@gmail.com>
AuthorDate: Mon Sep 14 18:07:04 2020 +0200

    LUCENE-9510: Don't pull a merge instance when flushing stored fields out-of-order. (#1872)
    
    With recent changes to stored fields that split blocks into several sub
    blocks, the merge instance has become much slower at random access since
    it would decompress all sub blocks when accessing a document. Since
    stored fields likely get accessed in random order at flush time when
    index sorting is enabled, it's better not to use the merge instance.
    
    On a synthetic benchmark that has one stored field and one numeric
    doc-value field that is used for sorting and fed with random values,
    this made indexing more than 4x faster.
---
 .../java/org/apache/lucene/index/SortingStoredFieldsConsumer.java    | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/lucene/core/src/java/org/apache/lucene/index/SortingStoredFieldsConsumer.java b/lucene/core/src/java/org/apache/lucene/index/SortingStoredFieldsConsumer.java
index a861f72..4647a70 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SortingStoredFieldsConsumer.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SortingStoredFieldsConsumer.java
@@ -60,7 +60,8 @@ final class SortingStoredFieldsConsumer extends StoredFieldsConsumer {
     }
     StoredFieldsReader reader = codec.storedFieldsFormat()
         .fieldsReader(tmpDirectory, state.segmentInfo, state.fieldInfos, IOContext.DEFAULT);
-    StoredFieldsReader mergeReader = reader.getMergeInstance();
+    // Don't pull a merge instance, since merge instances optimize for
+    // sequential access while we consume stored fields in random order here.
     StoredFieldsWriter sortWriter = codec.storedFieldsFormat()
         .fieldsWriter(state.directory, state.segmentInfo, IOContext.DEFAULT);
     try {
@@ -68,7 +69,7 @@ final class SortingStoredFieldsConsumer extends StoredFieldsConsumer {
       CopyVisitor visitor = new CopyVisitor(sortWriter);
       for (int docID = 0; docID < state.segmentInfo.maxDoc(); docID++) {
         sortWriter.startDocument();
-        mergeReader.visitDocument(sortMap.newToOld(docID), visitor);
+        reader.visitDocument(sortMap.newToOld(docID), visitor);
         sortWriter.finishDocument();
       }
       sortWriter.finish(state.fieldInfos, state.segmentInfo.maxDoc());


[lucene-solr] 01/02: Make sure to test normal scorers with asserting wrappers. (#1834)

Posted by jp...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jpountz pushed a commit to branch branch_8x
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git

commit 6cd3fa0a2bf4d91e098d3300c3c4ddb7c6bc703c
Author: Julie Tibshirani <ju...@elastic.co>
AuthorDate: Mon Sep 14 09:06:41 2020 -0700

    Make sure to test normal scorers with asserting wrappers. (#1834)
---
 lucene/CHANGES.txt                                     |  4 +++-
 .../org/apache/lucene/facet/TestDrillSideways.java     | 18 ++++++++++++------
 .../lucene/facet/range/TestRangeFacetCounts.java       |  5 +++--
 .../java/org/apache/lucene/search/AssertingWeight.java | 13 +++++++++++--
 4 files changed, 29 insertions(+), 11 deletions(-)

diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 12fe913..bdf62dd 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -45,7 +45,9 @@ Improvements
 * LUCENE-9446: In BooleanQuery rewrite, always remove MatchAllDocsQuery filter clauses
   when possible. (Julie Tibshirani)
 
-* LUCENE-9501: Improve how Asserting* test classes handle singleton doc values.
+* LUCENE-9501: Improve coverage for Asserting* test classes: make sure to handle singleton doc
+  values, and sometimes exercise Weight#scorer instead of Weight#bulkScorer for top-level
+  queries. (Julie Tibshirani)
 
 * LUCENE-9511: Include StoredFieldsWriter in DWPT accounting to ensure that it's 
   heap consumption is taken into account when IndexWriter stalls or should flush
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/TestDrillSideways.java b/lucene/facet/src/test/org/apache/lucene/facet/TestDrillSideways.java
index dfc32da..18a307c 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/TestDrillSideways.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/TestDrillSideways.java
@@ -115,6 +115,12 @@ public class TestDrillSideways extends FacetTestCase {
     };
   }
 
+  private IndexSearcher getNewSearcher(IndexReader reader) {
+    // Do not wrap with an asserting searcher, since DrillSidewaysQuery doesn't
+    // implement all the required components like Weight#scorer.
+    return newSearcher(reader, true, false);
+  }
+
   public void testBasic() throws Exception {
     Directory dir = newDirectory();
     Directory taxoDir = newDirectory();
@@ -154,7 +160,7 @@ public class TestDrillSideways extends FacetTestCase {
     writer.addDocument(config.build(taxoWriter, doc));
 
     // NRT open
-    IndexSearcher searcher = newSearcher(writer.getReader());
+    IndexSearcher searcher = getNewSearcher(writer.getReader());
 
     //System.out.println("searcher=" + searcher);
 
@@ -339,7 +345,7 @@ public class TestDrillSideways extends FacetTestCase {
     writer.addDocument(config.build(taxoWriter, doc));
 
     // NRT open
-    IndexSearcher searcher = newSearcher(writer.getReader());
+    IndexSearcher searcher = getNewSearcher(writer.getReader());
 
     //System.out.println("searcher=" + searcher);
 
@@ -402,7 +408,7 @@ public class TestDrillSideways extends FacetTestCase {
     writer.addDocument(config.build(taxoWriter, doc));
 
     // NRT open
-    IndexSearcher searcher = newSearcher(writer.getReader());
+    IndexSearcher searcher = getNewSearcher(writer.getReader());
 
     //System.out.println("searcher=" + searcher);
 
@@ -629,7 +635,7 @@ public class TestDrillSideways extends FacetTestCase {
     IndexReader r = w.getReader();
 
     final SortedSetDocValuesReaderState sortedSetDVState;
-    IndexSearcher s = newSearcher(r);
+    IndexSearcher s = getNewSearcher(r);
 
     if (doUseDV) {
       sortedSetDVState = new DefaultSortedSetDocValuesReaderState(s.getIndexReader());
@@ -1139,7 +1145,7 @@ public class TestDrillSideways extends FacetTestCase {
     Directory taxoDir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
     DirectoryTaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir, IndexWriterConfig.OpenMode.CREATE);
-    IndexSearcher searcher = newSearcher(writer.getReader());
+    IndexSearcher searcher = getNewSearcher(writer.getReader());
     TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoWriter);
 
     // Count "Author"
@@ -1178,7 +1184,7 @@ public class TestDrillSideways extends FacetTestCase {
     writer.addDocument(config.build(taxoWriter, doc));
 
     // NRT open
-    IndexSearcher searcher = newSearcher(writer.getReader());
+    IndexSearcher searcher = getNewSearcher(writer.getReader());
 
     // NRT open
     TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoWriter);
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java b/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java
index 7a4367a..f1d847c 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java
@@ -254,7 +254,7 @@ public class TestRangeFacetCounts extends FacetTestCase {
 
     final TaxonomyReader tr = new DirectoryTaxonomyReader(tw);
 
-    IndexSearcher s = newSearcher(r, false);
+    IndexSearcher s = newSearcher(r, false, false);
 
     if (VERBOSE) {
       System.out.println("TEST: searcher=" + s);
@@ -813,7 +813,8 @@ public class TestRangeFacetCounts extends FacetTestCase {
     FacetsCollector fc = new FacetsCollector();
 
     IndexReader r = writer.getReader();
-    IndexSearcher s = newSearcher(r);
+
+    IndexSearcher s = newSearcher(r, false, false);
     s.search(new MatchAllDocsQuery(), fc);
 
     final DoubleRange[] ranges = new DoubleRange[] {
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingWeight.java b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingWeight.java
index 55fda23..88b9499 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingWeight.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingWeight.java
@@ -20,6 +20,8 @@ import java.io.IOException;
 import java.util.Random;
 import org.apache.lucene.index.LeafReaderContext;
 
+import static org.apache.lucene.util.LuceneTestCase.usually;
+
 class AssertingWeight extends FilterWeight {
 
   final Random random;
@@ -85,11 +87,18 @@ class AssertingWeight extends FilterWeight {
 
   @Override
   public BulkScorer bulkScorer(LeafReaderContext context) throws IOException {
-    BulkScorer inScorer = in.bulkScorer(context);
+    BulkScorer inScorer;
+    // We explicitly test both the delegate's bulk scorer, and also the normal scorer.
+    // This ensures that normal scorers are sometimes tested with an asserting wrapper.
+    if (usually(random)) {
+      inScorer = in.bulkScorer(context);
+    } else {
+      inScorer = super.bulkScorer(context);
+    }
+
     if (inScorer == null) {
       return null;
     }
-
     return AssertingBulkScorer.wrap(new Random(random.nextLong()), inScorer, context.reader().maxDoc(), scoreMode);
   }
 }