You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by jp...@apache.org on 2022/09/20 13:38:44 UTC
[lucene] branch main updated: Guard FieldExistsQuery against null pointers (#11794)
This is an automated email from the ASF dual-hosted git repository.
jpountz pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/lucene.git
The following commit(s) were added to refs/heads/main by this push:
new 4eaebee6861 Guard FieldExistsQuery against null pointers (#11794)
4eaebee6861 is described below
commit 4eaebee6861d508e642cbb24d9802a11a95adcab
Author: Luca Cavanna <ja...@users.noreply.github.com>
AuthorDate: Tue Sep 20 15:38:38 2022 +0200
Guard FieldExistsQuery against null pointers (#11794)
FieldExistsQuery checks if there are points for a certain field, and then retrieves the
corresponding point values. When all documents that had points for a certain field have
been deleted from a certain segments, as well as merged away, field info may report
that there are points yet the corresponding point values are null.
With this change we add a null check in FieldExistsQuery. Long term, we will likely want
to prevent this situation from happening.
Relates #11393
---
lucene/CHANGES.txt | 2 +
.../org/apache/lucene/search/FieldExistsQuery.java | 6 ++-
.../apache/lucene/search/TestFieldExistsQuery.java | 58 ++++++++++++++++++++++
3 files changed, 64 insertions(+), 2 deletions(-)
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 242bac523d7..6280fd22d2b 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -169,6 +169,8 @@ Bug Fixes
* LUCENE-10674: Ensure BitSetConjDISI returns NO_MORE_DOCS when sub-iterator exhausts. (Jack Mazanec)
+* GITHUB#11794: Guard FieldExistsQuery against null pointers (Luca Cavanna)
+
Build
---------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/FieldExistsQuery.java b/lucene/core/src/java/org/apache/lucene/search/FieldExistsQuery.java
index 5a3d8b755b3..1e5dd09d183 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FieldExistsQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FieldExistsQuery.java
@@ -230,9 +230,11 @@ public class FieldExistsQuery extends Query {
!= DocValuesType.NONE) { // the field indexes doc values
if (reader.hasDeletions() == false) {
if (fieldInfo.getPointDimensionCount() > 0) {
- return reader.getPointValues(field).getDocCount();
+ PointValues pointValues = reader.getPointValues(field);
+ return pointValues == null ? 0 : pointValues.getDocCount();
} else if (fieldInfo.getIndexOptions() != IndexOptions.NONE) {
- return reader.terms(field).getDocCount();
+ Terms terms = reader.terms(field);
+ return terms == null ? 0 : terms.getDocCount();
}
}
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFieldExistsQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestFieldExistsQuery.java
index 0cb43864948..1f2ab372a1e 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestFieldExistsQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestFieldExistsQuery.java
@@ -702,6 +702,64 @@ public class TestFieldExistsQuery extends LuceneTestCase {
return v;
}
+ public void testDeleteAllPointDocs() throws Exception {
+ try (Directory dir = newDirectory();
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir)) {
+
+ Document doc = new Document();
+ doc.add(new StringField("id", "0", Field.Store.NO));
+ doc.add(new LongPoint("long", 17));
+ doc.add(new NumericDocValuesField("long", 17));
+ iw.addDocument(doc);
+ // add another document before the flush, otherwise the segment only has the document that
+ // we are going to delete and the merge simply ignores the segment without carrying over its
+ // field infos
+ iw.addDocument(new Document());
+ // make sure there are two segments or force merge will be a no-op
+ iw.flush();
+ iw.addDocument(new Document());
+ iw.commit();
+
+ iw.deleteDocuments(new Term("id", "0"));
+ iw.forceMerge(1);
+
+ try (IndexReader reader = iw.getReader()) {
+ assertTrue(reader.leaves().size() == 1 && reader.hasDeletions() == false);
+ IndexSearcher searcher = newSearcher(reader);
+ assertEquals(0, searcher.count(new FieldExistsQuery("long")));
+ }
+ }
+ }
+
+ public void testDeleteAllTermDocs() throws Exception {
+ try (Directory dir = newDirectory();
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir)) {
+
+ Document doc = new Document();
+ doc.add(new StringField("id", "0", Field.Store.NO));
+ doc.add(new StringField("str", "foo", Store.NO));
+ doc.add(new SortedDocValuesField("str", new BytesRef("foo")));
+ iw.addDocument(doc);
+ // add another document before the flush, otherwise the segment only has the document that
+ // we are going to delete and the merge simply ignores the segment without carrying over its
+ // field infos
+ iw.addDocument(new Document());
+ // make sure there are two segments or force merge will be a no-op
+ iw.flush();
+ iw.addDocument(new Document());
+ iw.commit();
+
+ iw.deleteDocuments(new Term("id", "0"));
+ iw.forceMerge(1);
+
+ try (IndexReader reader = iw.getReader()) {
+ assertTrue(reader.leaves().size() == 1 && reader.hasDeletions() == false);
+ IndexSearcher searcher = newSearcher(reader);
+ assertEquals(0, searcher.count(new FieldExistsQuery("str")));
+ }
+ }
+ }
+
private void assertSameMatches(IndexSearcher searcher, Query q1, Query q2, boolean scores)
throws IOException {
final int maxDoc = searcher.getIndexReader().maxDoc();