You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by dw...@apache.org on 2020/12/23 11:44:32 UTC
[lucene-solr] 03/11: :lucene:core -
src/**/org/apache/lucene/search/**
This is an automated email from the ASF dual-hosted git repository.
dweiss pushed a commit to branch jira/LUCENE-9570
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git
commit 7c771bece49eee9a6da00de47cf8e65a3c5c7f43
Author: Dawid Weiss <da...@carrotsearch.com>
AuthorDate: Wed Dec 23 11:02:18 2020 +0100
:lucene:core - src/**/org/apache/lucene/search/**
---
gradle/validation/spotless.gradle | 2 +-
.../org/apache/lucene/search/AutomatonQuery.java | 101 +--
.../org/apache/lucene/search/BlendedTermQuery.java | 139 +--
.../lucene/search/BlockMaxConjunctionScorer.java | 20 +-
.../org/apache/lucene/search/BlockMaxDISI.java | 10 +-
.../lucene/search/Boolean2ScorerSupplier.java | 104 ++-
.../org/apache/lucene/search/BooleanClause.java | 75 +-
.../org/apache/lucene/search/BooleanQuery.java | 199 ++---
.../org/apache/lucene/search/BooleanScorer.java | 74 +-
.../org/apache/lucene/search/BooleanWeight.java | 102 ++-
.../org/apache/lucene/search/BoostAttribute.java | 22 +-
.../apache/lucene/search/BoostAttributeImpl.java | 9 +-
.../java/org/apache/lucene/search/BoostQuery.java | 39 +-
.../java/org/apache/lucene/search/BulkScorer.java | 71 +-
.../org/apache/lucene/search/CachingCollector.java | 128 ++-
.../apache/lucene/search/CollectionStatistics.java | 124 +--
.../search/CollectionTerminatedException.java | 16 +-
.../java/org/apache/lucene/search/Collector.java | 61 +-
.../org/apache/lucene/search/CollectorManager.java | 35 +-
.../org/apache/lucene/search/ConjunctionDISI.java | 156 ++--
.../apache/lucene/search/ConjunctionScorer.java | 9 +-
.../apache/lucene/search/ConstantScoreQuery.java | 49 +-
.../apache/lucene/search/ConstantScoreScorer.java | 65 +-
.../apache/lucene/search/ConstantScoreWeight.java | 12 +-
.../search/ControlledRealTimeReopenThread.java | 113 ++-
.../apache/lucene/search/DisiPriorityQueue.java | 15 +-
.../java/org/apache/lucene/search/DisiWrapper.java | 7 +-
.../search/DisjunctionDISIApproximation.java | 9 +-
.../lucene/search/DisjunctionMatchesIterator.java | 68 +-
.../apache/lucene/search/DisjunctionMaxQuery.java | 123 +--
.../apache/lucene/search/DisjunctionMaxScorer.java | 26 +-
.../DisjunctionScoreBlockBoundaryPropagator.java | 40 +-
.../apache/lucene/search/DisjunctionScorer.java | 44 +-
.../apache/lucene/search/DisjunctionSumScorer.java | 14 +-
.../java/org/apache/lucene/search/DocIdSet.java | 73 +-
.../org/apache/lucene/search/DocIdSetIterator.java | 112 ++-
.../lucene/search/DocValuesFieldExistsQuery.java | 20 +-
.../lucene/search/DocValuesRewriteMethod.java | 160 ++--
.../org/apache/lucene/search/DoubleValues.java | 38 +-
.../apache/lucene/search/DoubleValuesSource.java | 237 +++--
.../apache/lucene/search/ExactPhraseMatcher.java | 68 +-
.../java/org/apache/lucene/search/Explanation.java | 63 +-
.../org/apache/lucene/search/FieldComparator.java | 250 +++---
.../lucene/search/FieldComparatorSource.java | 11 +-
.../java/org/apache/lucene/search/FieldDoc.java | 35 +-
.../apache/lucene/search/FieldValueHitQueue.java | 81 +-
.../org/apache/lucene/search/FilterCollector.java | 2 -
.../apache/lucene/search/FilterLeafCollector.java | 2 -
.../lucene/search/FilterMatchesIterator.java | 9 +-
.../org/apache/lucene/search/FilterScorable.java | 11 +-
.../org/apache/lucene/search/FilterScorer.java | 22 +-
.../org/apache/lucene/search/FilterWeight.java | 25 +-
.../lucene/search/FilteredDocIdSetIterator.java | 12 +-
.../lucene/search/FuzzyAutomatonBuilder.java | 14 +-
.../java/org/apache/lucene/search/FuzzyQuery.java | 170 ++--
.../org/apache/lucene/search/FuzzyTermsEnum.java | 171 ++--
.../java/org/apache/lucene/search/HitQueue.java | 65 +-
.../apache/lucene/search/HitsThresholdChecker.java | 23 +-
.../java/org/apache/lucene/search/ImpactsDISI.java | 25 +-
.../lucene/search/IndexOrDocValuesQuery.java | 54 +-
.../org/apache/lucene/search/IndexSearcher.java | 688 ++++++++-------
.../org/apache/lucene/search/LRUQueryCache.java | 279 +++---
.../org/apache/lucene/search/LeafCollector.java | 74 +-
.../apache/lucene/search/LeafFieldComparator.java | 119 ++-
.../org/apache/lucene/search/LeafSimScorer.java | 31 +-
.../org/apache/lucene/search/LiveFieldValues.java | 50 +-
.../java/org/apache/lucene/search/LongValues.java | 10 +-
.../org/apache/lucene/search/LongValuesSource.java | 65 +-
.../apache/lucene/search/MatchAllDocsQuery.java | 14 +-
.../org/apache/lucene/search/MatchNoDocsQuery.java | 12 +-
.../src/java/org/apache/lucene/search/Matches.java | 19 +-
.../org/apache/lucene/search/MatchesIterator.java | 36 +-
.../org/apache/lucene/search/MatchesUtils.java | 73 +-
.../search/MaxNonCompetitiveBoostAttribute.java | 34 +-
.../MaxNonCompetitiveBoostAttributeImpl.java | 17 +-
.../apache/lucene/search/MaxScoreAccumulator.java | 14 +-
.../org/apache/lucene/search/MaxScoreCache.java | 30 +-
.../lucene/search/MaxScoreSumPropagator.java | 22 +-
.../lucene/search/MinShouldMatchSumScorer.java | 182 ++--
.../org/apache/lucene/search/MultiCollector.java | 69 +-
.../lucene/search/MultiCollectorManager.java | 30 +-
.../lucene/search/MultiLeafFieldComparator.java | 7 +-
.../org/apache/lucene/search/MultiPhraseQuery.java | 193 ++--
.../org/apache/lucene/search/MultiTermQuery.java | 317 ++++---
.../search/MultiTermQueryConstantScoreWrapper.java | 66 +-
.../java/org/apache/lucene/search/Multiset.java | 10 +-
.../org/apache/lucene/search/NGramPhraseQuery.java | 32 +-
.../org/apache/lucene/search/NamedMatches.java | 32 +-
.../lucene/search/NormsFieldExistsQuery.java | 16 +-
.../org/apache/lucene/search/PhraseMatcher.java | 47 +-
.../org/apache/lucene/search/PhrasePositions.java | 41 +-
.../java/org/apache/lucene/search/PhraseQuery.java | 244 ++---
.../java/org/apache/lucene/search/PhraseQueue.java | 5 +-
.../org/apache/lucene/search/PhraseScorer.java | 1 -
.../org/apache/lucene/search/PhraseWeight.java | 142 +--
.../org/apache/lucene/search/PointInSetQuery.java | 167 ++--
.../org/apache/lucene/search/PointRangeQuery.java | 184 ++--
.../lucene/search/PositiveScoresOnlyCollector.java | 12 +-
.../java/org/apache/lucene/search/PrefixQuery.java | 23 +-
.../src/java/org/apache/lucene/search/Query.java | 109 +--
.../java/org/apache/lucene/search/QueryCache.java | 9 +-
.../apache/lucene/search/QueryCachingPolicy.java | 23 +-
.../org/apache/lucene/search/QueryRescorer.java | 101 ++-
.../org/apache/lucene/search/QueryVisitor.java | 35 +-
.../lucene/search/QueueSizeBasedExecutor.java | 10 +-
.../org/apache/lucene/search/ReferenceManager.java | 215 +++--
.../java/org/apache/lucene/search/RegexpQuery.java | 117 ++-
.../apache/lucene/search/ReqExclBulkScorer.java | 3 -
.../org/apache/lucene/search/ReqExclScorer.java | 33 +-
.../org/apache/lucene/search/ReqOptSumScorer.java | 269 +++---
.../java/org/apache/lucene/search/Rescorer.java | 42 +-
.../java/org/apache/lucene/search/Scorable.java | 47 +-
.../java/org/apache/lucene/search/ScoreAndDoc.java | 7 +-
.../lucene/search/ScoreCachingWrappingScorer.java | 17 +-
.../java/org/apache/lucene/search/ScoreDoc.java | 13 +-
.../java/org/apache/lucene/search/ScoreMode.java | 36 +-
.../src/java/org/apache/lucene/search/Scorer.java | 72 +-
.../org/apache/lucene/search/ScorerSupplier.java | 25 +-
.../org/apache/lucene/search/ScoringRewrite.java | 181 ++--
.../org/apache/lucene/search/SearcherFactory.java | 35 +-
.../lucene/search/SearcherLifetimeManager.java | 168 ++--
.../org/apache/lucene/search/SearcherManager.java | 153 ++--
.../org/apache/lucene/search/SegmentCacheable.java | 19 +-
.../org/apache/lucene/search/SimpleCollector.java | 3 -
.../lucene/search/SimpleFieldComparator.java | 5 +-
.../org/apache/lucene/search/SliceExecutor.java | 11 +-
.../apache/lucene/search/SloppyPhraseMatcher.java | 332 +++----
.../src/java/org/apache/lucene/search/Sort.java | 146 ++-
.../java/org/apache/lucene/search/SortField.java | 365 ++++----
.../org/apache/lucene/search/SortRescorer.java | 29 +-
.../lucene/search/SortedNumericSelector.java | 65 +-
.../lucene/search/SortedNumericSortField.java | 134 +--
.../apache/lucene/search/SortedSetSelector.java | 98 ++-
.../apache/lucene/search/SortedSetSortField.java | 81 +-
.../org/apache/lucene/search/SynonymQuery.java | 161 ++--
.../lucene/search/TermCollectingRewrite.java | 37 +-
.../org/apache/lucene/search/TermInSetQuery.java | 68 +-
.../apache/lucene/search/TermMatchesIterator.java | 9 +-
.../java/org/apache/lucene/search/TermQuery.java | 94 +-
.../org/apache/lucene/search/TermRangeQuery.java | 130 +--
.../java/org/apache/lucene/search/TermScorer.java | 19 +-
.../org/apache/lucene/search/TermStatistics.java | 92 +-
.../lucene/search/TimeLimitingCollector.java | 176 ++--
.../src/java/org/apache/lucene/search/TopDocs.java | 136 +--
.../org/apache/lucene/search/TopDocsCollector.java | 123 ++-
.../apache/lucene/search/TopFieldCollector.java | 214 ++---
.../org/apache/lucene/search/TopFieldDocs.java | 24 +-
.../apache/lucene/search/TopScoreDocCollector.java | 111 ++-
.../org/apache/lucene/search/TopTermsRewrite.java | 244 ++---
.../lucene/search/TotalHitCountCollector.java | 6 +-
.../java/org/apache/lucene/search/TotalHits.java | 32 +-
.../org/apache/lucene/search/TwoPhaseIterator.java | 51 +-
.../search/UsageTrackingQueryCachingPolicy.java | 49 +-
.../java/org/apache/lucene/search/WANDScorer.java | 147 ++--
.../src/java/org/apache/lucene/search/Weight.java | 177 ++--
.../org/apache/lucene/search/WildcardQuery.java | 59 +-
.../lucene/search/comparators/DocComparator.java | 25 +-
.../search/comparators/DoubleComparator.java | 15 +-
.../lucene/search/comparators/FloatComparator.java | 15 +-
.../lucene/search/comparators/IntComparator.java | 15 +-
.../lucene/search/comparators/LongComparator.java | 15 +-
.../lucene/search/comparators/MinDocIterator.java | 7 +-
.../search/comparators/NumericComparator.java | 171 ++--
.../lucene/search/comparators/package-info.java | 5 +-
.../org/apache/lucene/search/package-info.java | 840 +++++++++---------
.../lucene/search/similarities/AfterEffect.java | 31 +-
.../lucene/search/similarities/AfterEffectB.java | 17 +-
.../lucene/search/similarities/AfterEffectL.java | 8 +-
.../lucene/search/similarities/Axiomatic.java | 160 ++--
.../lucene/search/similarities/AxiomaticF1EXP.java | 82 +-
.../lucene/search/similarities/AxiomaticF1LOG.java | 76 +-
.../lucene/search/similarities/AxiomaticF2EXP.java | 85 +-
.../lucene/search/similarities/AxiomaticF2LOG.java | 80 +-
.../lucene/search/similarities/AxiomaticF3EXP.java | 80 +-
.../lucene/search/similarities/AxiomaticF3LOG.java | 73 +-
.../lucene/search/similarities/BM25Similarity.java | 140 +--
.../lucene/search/similarities/BasicModel.java | 37 +-
.../lucene/search/similarities/BasicModelG.java | 34 +-
.../lucene/search/similarities/BasicModelIF.java | 23 +-
.../lucene/search/similarities/BasicModelIn.java | 18 +-
.../lucene/search/similarities/BasicModelIne.java | 27 +-
.../lucene/search/similarities/BasicStats.java | 32 +-
.../search/similarities/BooleanSimilarity.java | 18 +-
.../search/similarities/ClassicSimilarity.java | 23 +-
.../lucene/search/similarities/DFISimilarity.java | 85 +-
.../lucene/search/similarities/DFRSimilarity.java | 146 ++-
.../lucene/search/similarities/Distribution.java | 30 +-
.../lucene/search/similarities/DistributionLL.java | 10 +-
.../search/similarities/DistributionSPL.java | 19 +-
.../lucene/search/similarities/IBSimilarity.java | 149 ++--
.../lucene/search/similarities/Independence.java | 21 +-
.../similarities/IndependenceChiSquared.java | 15 +-
.../search/similarities/IndependenceSaturated.java | 13 +-
.../similarities/IndependenceStandardized.java | 16 +-
.../search/similarities/LMDirichletSimilarity.java | 93 +-
.../similarities/LMJelinekMercerSimilarity.java | 70 +-
.../lucene/search/similarities/LMSimilarity.java | 101 +--
.../apache/lucene/search/similarities/Lambda.java | 21 +-
.../lucene/search/similarities/LambdaDF.java | 17 +-
.../lucene/search/similarities/LambdaTTF.java | 18 +-
.../search/similarities/MultiSimilarity.java | 25 +-
.../lucene/search/similarities/Normalization.java | 52 +-
.../search/similarities/NormalizationH1.java | 47 +-
.../search/similarities/NormalizationH2.java | 48 +-
.../search/similarities/NormalizationH3.java | 33 +-
.../lucene/search/similarities/NormalizationZ.java | 28 +-
.../similarities/PerFieldSimilarityWrapper.java | 26 +-
.../lucene/search/similarities/Similarity.java | 205 +++--
.../lucene/search/similarities/SimilarityBase.java | 151 ++--
.../search/similarities/TFIDFSimilarity.java | 485 +++++-----
.../lucene/search/similarities/package-info.java | 191 ++--
.../lucene/search/spans/ConjunctionSpans.java | 24 +-
.../apache/lucene/search/spans/ContainSpans.java | 18 +-
.../lucene/search/spans/FieldMaskingSpanQuery.java | 68 +-
.../apache/lucene/search/spans/FilterSpans.java | 57 +-
.../lucene/search/spans/NearSpansOrdered.java | 46 +-
.../lucene/search/spans/NearSpansUnordered.java | 40 +-
.../apache/lucene/search/spans/SpanBoostQuery.java | 33 +-
.../apache/lucene/search/spans/SpanCollector.java | 16 +-
.../lucene/search/spans/SpanContainQuery.java | 35 +-
.../lucene/search/spans/SpanContainingQuery.java | 35 +-
.../apache/lucene/search/spans/SpanFirstQuery.java | 27 +-
.../search/spans/SpanMultiTermQueryWrapper.java | 200 +++--
.../apache/lucene/search/spans/SpanNearQuery.java | 131 ++-
.../apache/lucene/search/spans/SpanNotQuery.java | 107 ++-
.../apache/lucene/search/spans/SpanOrQuery.java | 59 +-
.../search/spans/SpanPositionCheckQuery.java | 63 +-
.../lucene/search/spans/SpanPositionQueue.java | 8 +-
.../search/spans/SpanPositionRangeQuery.java | 33 +-
.../org/apache/lucene/search/spans/SpanQuery.java | 11 +-
.../org/apache/lucene/search/spans/SpanScorer.java | 39 +-
.../apache/lucene/search/spans/SpanTermQuery.java | 98 ++-
.../org/apache/lucene/search/spans/SpanWeight.java | 242 ++---
.../lucene/search/spans/SpanWithinQuery.java | 36 +-
.../java/org/apache/lucene/search/spans/Spans.java | 62 +-
.../org/apache/lucene/search/spans/TermSpans.java | 28 +-
.../apache/lucene/search/spans/package-info.java | 97 +-
.../apache/lucene/search/BaseTestRangeFilter.java | 70 +-
.../lucene/search/FuzzyTermOnShortTermsTest.java | 119 +--
.../apache/lucene/search/JustCompileSearch.java | 33 +-
.../apache/lucene/search/TermInSetQueryTest.java | 87 +-
.../search/TestApproximationSearchEquivalence.java | 12 +-
.../apache/lucene/search/TestAutomatonQuery.java | 131 ++-
.../lucene/search/TestAutomatonQueryUnicode.java | 19 +-
.../apache/lucene/search/TestBlendedTermQuery.java | 68 +-
.../lucene/search/TestBlockMaxConjunction.java | 29 +-
.../org/apache/lucene/search/TestBoolean2.java | 125 +--
.../lucene/search/TestBoolean2ScorerSupplier.java | 144 ++-
.../lucene/search/TestBooleanMinShouldMatch.java | 678 +++++++-------
.../org/apache/lucene/search/TestBooleanOr.java | 107 +--
.../org/apache/lucene/search/TestBooleanQuery.java | 327 +++----
.../search/TestBooleanQueryVisitSubscorers.java | 100 ++-
.../apache/lucene/search/TestBooleanRewrites.java | 495 ++++++-----
.../apache/lucene/search/TestBooleanScorer.java | 172 ++--
.../org/apache/lucene/search/TestBoostQuery.java | 34 +-
.../apache/lucene/search/TestCachingCollector.java | 83 +-
.../lucene/search/TestComplexExplanations.java | 238 ++---
.../TestComplexExplanationsOfNonMatches.java | 11 +-
.../apache/lucene/search/TestConjunctionDISI.java | 139 ++-
.../org/apache/lucene/search/TestConjunctions.java | 74 +-
.../lucene/search/TestConstantScoreQuery.java | 141 +--
.../lucene/search/TestConstantScoreScorer.java | 69 +-
.../search/TestControlledRealTimeReopenThread.java | 263 +++---
.../lucene/search/TestCustomSearcherSort.java | 105 +--
.../org/apache/lucene/search/TestDateSort.java | 11 +-
.../lucene/search/TestDisjunctionMaxQuery.java | 436 +++++----
...estDisjunctionScoreBlockBoundaryPropagator.java | 5 +-
.../apache/lucene/search/TestDocIdSetIterator.java | 22 +-
.../apache/lucene/search/TestDocValuesQueries.java | 117 ++-
.../lucene/search/TestDocValuesRewriteMethod.java | 48 +-
.../lucene/search/TestDoubleRangeFieldQueries.java | 86 +-
.../lucene/search/TestDoubleValuesSource.java | 195 ++--
.../apache/lucene/search/TestEarlyTermination.java | 56 +-
.../lucene/search/TestElevationComparator.java | 221 +++--
.../lucene/search/TestFieldCacheRewriteMethod.java | 18 +-
.../search/TestFieldSortOptimizationSkipping.java | 137 +--
.../apache/lucene/search/TestFieldValueQuery.java | 21 +-
.../org/apache/lucene/search/TestFilterWeight.java | 30 +-
.../lucene/search/TestFloatRangeFieldQueries.java | 89 +-
.../org/apache/lucene/search/TestFuzzyQuery.java | 340 ++++---
.../lucene/search/TestIndexOrDocValuesQuery.java | 89 +-
.../apache/lucene/search/TestIndexSearcher.java | 228 ++---
.../lucene/search/TestInetAddressRangeQueries.java | 43 +-
.../lucene/search/TestIntRangeFieldQueries.java | 59 +-
.../apache/lucene/search/TestLRUQueryCache.java | 781 +++++++++-------
.../lucene/search/TestLatLonDocValuesQueries.java | 6 +-
.../lucene/search/TestLatLonPointQueries.java | 7 +-
.../apache/lucene/search/TestLiveFieldValues.java | 167 ++--
.../lucene/search/TestLongRangeFieldQueries.java | 63 +-
.../apache/lucene/search/TestLongValuesSource.java | 28 +-
.../lucene/search/TestMatchAllDocsQuery.java | 36 +-
.../apache/lucene/search/TestMatchNoDocsQuery.java | 16 +-
.../apache/lucene/search/TestMatchesIterator.java | 709 ++++++++-------
.../apache/lucene/search/TestMaxClauseLimit.java | 72 +-
.../lucene/search/TestMaxScoreAccumulator.java | 3 +-
.../lucene/search/TestMaxScoreSumPropagator.java | 31 +-
.../apache/lucene/search/TestMinShouldMatch2.java | 177 ++--
.../apache/lucene/search/TestMultiCollector.java | 218 ++---
.../apache/lucene/search/TestMultiPhraseEnum.java | 56 +-
.../apache/lucene/search/TestMultiPhraseQuery.java | 288 +++---
.../apache/lucene/search/TestMultiSliceMerge.java | 25 +-
.../lucene/search/TestMultiTermConstantScore.java | 168 ++--
.../lucene/search/TestMultiTermQueryRewrites.java | 166 ++--
.../lucene/search/TestMultiThreadTermVectors.java | 25 +-
.../org/apache/lucene/search/TestMultiset.java | 3 -
.../apache/lucene/search/TestNGramPhraseQuery.java | 27 +-
.../org/apache/lucene/search/TestNeedsScores.java | 36 +-
.../lucene/search/TestNormsFieldExistsQuery.java | 15 +-
.../src/test/org/apache/lucene/search/TestNot.java | 12 +-
.../lucene/search/TestPhrasePrefixQuery.java | 26 +-
.../org/apache/lucene/search/TestPhraseQuery.java | 557 ++++++------
.../org/apache/lucene/search/TestPointQueries.java | 980 +++++++++++++--------
.../lucene/search/TestPositionIncrement.java | 133 ++-
.../search/TestPositiveScoresOnlyCollector.java | 53 +-
.../lucene/search/TestPrefixInBooleanQuery.java | 54 +-
.../org/apache/lucene/search/TestPrefixQuery.java | 21 +-
.../org/apache/lucene/search/TestPrefixRandom.java | 48 +-
.../apache/lucene/search/TestQueryRescorer.java | 162 ++--
.../org/apache/lucene/search/TestQueryVisitor.java | 228 +++--
.../search/TestRangeFieldsDocValuesQuery.java | 8 +-
.../org/apache/lucene/search/TestRegexpQuery.java | 118 +--
.../org/apache/lucene/search/TestRegexpRandom.java | 46 +-
.../apache/lucene/search/TestRegexpRandom2.java | 51 +-
.../lucene/search/TestReqExclBulkScorer.java | 88 +-
.../apache/lucene/search/TestReqOptSumScorer.java | 162 ++--
.../lucene/search/TestSameScoresWithThreads.java | 72 +-
.../search/TestScoreCachingWrappingScorer.java | 77 +-
.../org/apache/lucene/search/TestScorerPerf.java | 240 +++--
.../org/apache/lucene/search/TestSearchAfter.java | 97 +-
.../lucene/search/TestSearchWithThreads.java | 50 +-
.../apache/lucene/search/TestSearcherManager.java | 554 ++++++------
.../lucene/search/TestSegmentCacheables.java | 7 +-
.../apache/lucene/search/TestShardSearching.java | 101 ++-
.../org/apache/lucene/search/TestSimilarity.java | 208 +++--
.../lucene/search/TestSimilarityProvider.java | 8 +-
.../lucene/search/TestSimpleExplanations.java | 355 ++++----
.../search/TestSimpleExplanationsOfNonMatches.java | 12 +-
.../TestSimpleExplanationsWithFillerDocs.java | 60 +-
.../lucene/search/TestSimpleSearchEquivalence.java | 55 +-
.../lucene/search/TestSloppyPhraseQuery.java | 242 ++---
.../lucene/search/TestSloppyPhraseQuery2.java | 26 +-
.../test/org/apache/lucene/search/TestSort.java | 124 +--
.../org/apache/lucene/search/TestSortRandom.java | 114 ++-
.../org/apache/lucene/search/TestSortRescorer.java | 91 +-
.../lucene/search/TestSortedNumericSortField.java | 53 +-
.../lucene/search/TestSortedSetSelector.java | 93 +-
.../lucene/search/TestSortedSetSortField.java | 37 +-
.../apache/lucene/search/TestSubScorerFreqs.java | 44 +-
.../org/apache/lucene/search/TestSynonymQuery.java | 250 +++---
.../org/apache/lucene/search/TestTermQuery.java | 107 ++-
.../apache/lucene/search/TestTermRangeQuery.java | 84 +-
.../org/apache/lucene/search/TestTermScorer.java | 193 ++--
.../lucene/search/TestTimeLimitingCollector.java | 267 +++---
.../apache/lucene/search/TestTopDocsCollector.java | 235 +++--
.../org/apache/lucene/search/TestTopDocsMerge.java | 117 ++-
.../lucene/search/TestTopFieldCollector.java | 246 +++---
.../TestTopFieldCollectorEarlyTermination.java | 157 ++--
.../lucene/search/TestTotalHitCountCollector.java | 9 +-
.../TestUsageTrackingFilterCachingPolicy.java | 37 +-
.../org/apache/lucene/search/TestWANDScorer.java | 174 ++--
.../org/apache/lucene/search/TestWildcard.java | 279 +++---
.../apache/lucene/search/TestWildcardRandom.java | 49 +-
.../lucene/search/TestXYPointDistanceSort.java | 67 +-
.../apache/lucene/search/TestXYPointQueries.java | 2 +-
.../search/similarities/AxiomaticTestCase.java | 6 +-
.../search/similarities/BasicModelTestCase.java | 11 +-
.../search/similarities/DistributionTestCase.java | 5 +-
.../search/similarities/TestAxiomaticF1EXP.java | 1 -
.../search/similarities/TestAxiomaticF1LOG.java | 1 -
.../search/similarities/TestAxiomaticF2EXP.java | 1 -
.../search/similarities/TestAxiomaticF2LOG.java | 1 -
.../search/similarities/TestAxiomaticF3EXP.java | 1 -
.../search/similarities/TestAxiomaticF3LOG.java | 1 -
.../similarities/TestAxiomaticSimilarity.java | 81 +-
.../search/similarities/TestBM25Similarity.java | 80 +-
.../search/similarities/TestBasicModelG.java | 1 -
.../search/similarities/TestBasicModelIF.java | 1 -
.../search/similarities/TestBasicModelIn.java | 1 -
.../search/similarities/TestBasicModelIne.java | 1 -
.../search/similarities/TestBooleanSimilarity.java | 26 +-
.../search/similarities/TestClassicSimilarity.java | 77 +-
.../search/similarities/TestDistributionLL.java | 1 -
.../search/similarities/TestDistributionSPL.java | 1 -
.../similarities/TestIndependenceChiSquared.java | 1 -
.../similarities/TestIndependenceSaturated.java | 1 -
.../similarities/TestIndependenceStandardized.java | 1 -
.../similarities/TestLMDirichletSimilarity.java | 1 -
.../TestLMJelinekMercerSimilarity.java | 1 -
.../search/similarities/TestSimilarity2.java | 62 +-
.../search/similarities/TestSimilarityBase.java | 397 ++++-----
.../search/spans/JustCompileSearchSpans.java | 18 +-
.../org/apache/lucene/search/spans/TestBasics.java | 377 ++++----
.../search/spans/TestFieldMaskingSpanQuery.java | 348 ++++----
.../lucene/search/spans/TestFilterSpans.java | 2 -
.../lucene/search/spans/TestNearSpansOrdered.java | 275 +++---
.../lucene/search/spans/TestSpanBoostQuery.java | 14 +-
.../lucene/search/spans/TestSpanCollection.java | 51 +-
.../lucene/search/spans/TestSpanContainQuery.java | 34 +-
.../lucene/search/spans/TestSpanExplanations.java | 154 ++--
.../spans/TestSpanExplanationsOfNonMatches.java | 13 +-
.../lucene/search/spans/TestSpanFirstQuery.java | 15 +-
.../spans/TestSpanMultiTermQueryWrapper.java | 133 ++-
.../lucene/search/spans/TestSpanNearQuery.java | 54 +-
.../lucene/search/spans/TestSpanNotQuery.java | 32 +-
.../lucene/search/spans/TestSpanOrQuery.java | 18 +-
.../search/spans/TestSpanSearchEquivalence.java | 313 +++----
.../lucene/search/spans/TestSpanTermQuery.java | 19 +-
.../org/apache/lucene/search/spans/TestSpans.java | 248 +++---
.../apache/lucene/search/spans/TestSpansEnum.java | 56 +-
409 files changed, 19986 insertions(+), 18225 deletions(-)
diff --git a/gradle/validation/spotless.gradle b/gradle/validation/spotless.gradle
index 6e35970..bc8e10f 100644
--- a/gradle/validation/spotless.gradle
+++ b/gradle/validation/spotless.gradle
@@ -50,7 +50,7 @@ allprojects { prj ->
"src/**/org/apache/lucene/document/**",
"src/**/org/apache/lucene/geo/**",
"src/**/org/apache/lucene/index/**",
- // "src/**/org/apache/lucene/search/**",
+ "src/**/org/apache/lucene/search/**",
"src/**/org/apache/lucene/store/**",
"src/**/org/apache/lucene/util/**"
diff --git a/lucene/core/src/java/org/apache/lucene/search/AutomatonQuery.java b/lucene/core/src/java/org/apache/lucene/search/AutomatonQuery.java
index 08b9eef..69b7ed2 100644
--- a/lucene/core/src/java/org/apache/lucene/search/AutomatonQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/AutomatonQuery.java
@@ -16,9 +16,7 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
-
import org.apache.lucene.index.Term;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
@@ -31,41 +29,40 @@ import org.apache.lucene.util.automaton.Operations;
/**
* A {@link Query} that will match terms against a finite-state machine.
- * <p>
- * This query will match documents that contain terms accepted by a given
- * finite-state machine. The automaton can be constructed with the
- * {@link org.apache.lucene.util.automaton} API. Alternatively, it can be
- * created from a regular expression with {@link RegexpQuery} or from
- * the standard Lucene wildcard syntax with {@link WildcardQuery}.
- * </p>
- * <p>
- * When the query is executed, it will create an equivalent DFA of the
- * finite-state machine, and will enumerate the term dictionary in an
- * intelligent way to reduce the number of comparisons. For example: the regular
- * expression of <code>[dl]og?</code> will make approximately four comparisons:
+ *
+ * <p>This query will match documents that contain terms accepted by a given finite-state machine.
+ * The automaton can be constructed with the {@link org.apache.lucene.util.automaton} API.
+ * Alternatively, it can be created from a regular expression with {@link RegexpQuery} or from the
+ * standard Lucene wildcard syntax with {@link WildcardQuery}.
+ *
+ * <p>When the query is executed, it will create an equivalent DFA of the finite-state machine, and
+ * will enumerate the term dictionary in an intelligent way to reduce the number of comparisons. For
+ * example: the regular expression of <code>[dl]og?</code> will make approximately four comparisons:
* do, dog, lo, and log.
- * </p>
+ *
* @lucene.experimental
*/
public class AutomatonQuery extends MultiTermQuery implements Accountable {
- private static final long BASE_RAM_BYTES = RamUsageEstimator.shallowSizeOfInstance(AutomatonQuery.class);
+ private static final long BASE_RAM_BYTES =
+ RamUsageEstimator.shallowSizeOfInstance(AutomatonQuery.class);
/** the automaton to match index terms against */
protected final Automaton automaton;
+
protected final CompiledAutomaton compiled;
/** term containing the field, and possibly some pattern structure */
protected final Term term;
+
protected final boolean automatonIsBinary;
private final long ramBytesUsed; // cache
/**
* Create a new AutomatonQuery from an {@link Automaton}.
- *
- * @param term Term containing field and possibly some pattern structure. The
- * term text is ignored.
- * @param automaton Automaton to run, terms that are accepted are considered a
- * match.
+ *
+ * @param term Term containing field and possibly some pattern structure. The term text is
+ * ignored.
+ * @param automaton Automaton to run, terms that are accepted are considered a match.
*/
public AutomatonQuery(final Term term, Automaton automaton) {
this(term, automaton, Operations.DEFAULT_MAX_DETERMINIZED_STATES);
@@ -73,15 +70,13 @@ public class AutomatonQuery extends MultiTermQuery implements Accountable {
/**
* Create a new AutomatonQuery from an {@link Automaton}.
- *
- * @param term Term containing field and possibly some pattern structure. The
- * term text is ignored.
- * @param automaton Automaton to run, terms that are accepted are considered a
- * match.
- * @param maxDeterminizedStates maximum number of states in the resulting
- * automata. If the automata would need more than this many states
- * TooComplextToDeterminizeException is thrown. Higher number require more
- * space but can process more complex automata.
+ *
+ * @param term Term containing field and possibly some pattern structure. The term text is
+ * ignored.
+ * @param automaton Automaton to run, terms that are accepted are considered a match.
+ * @param maxDeterminizedStates maximum number of states in the resulting automata. If the
+ * automata would need more than this many states TooComplextToDeterminizeException is thrown.
+ * Higher number require more space but can process more complex automata.
*/
public AutomatonQuery(final Term term, Automaton automaton, int maxDeterminizedStates) {
this(term, automaton, maxDeterminizedStates, false);
@@ -89,19 +84,18 @@ public class AutomatonQuery extends MultiTermQuery implements Accountable {
/**
* Create a new AutomatonQuery from an {@link Automaton}.
- *
- * @param term Term containing field and possibly some pattern structure. The
- * term text is ignored.
- * @param automaton Automaton to run, terms that are accepted are considered a
- * match.
- * @param maxDeterminizedStates maximum number of states in the resulting
- * automata. If the automata would need more than this many states
- * TooComplextToDeterminizeException is thrown. Higher number require more
- * space but can process more complex automata.
- * @param isBinary if true, this automaton is already binary and
- * will not go through the UTF32ToUTF8 conversion
+ *
+ * @param term Term containing field and possibly some pattern structure. The term text is
+ * ignored.
+ * @param automaton Automaton to run, terms that are accepted are considered a match.
+ * @param maxDeterminizedStates maximum number of states in the resulting automata. If the
+ * automata would need more than this many states TooComplextToDeterminizeException is thrown.
+ * Higher number require more space but can process more complex automata.
+ * @param isBinary if true, this automaton is already binary and will not go through the
+ * UTF32ToUTF8 conversion
*/
- public AutomatonQuery(final Term term, Automaton automaton, int maxDeterminizedStates, boolean isBinary) {
+ public AutomatonQuery(
+ final Term term, Automaton automaton, int maxDeterminizedStates, boolean isBinary) {
super(term.field());
this.term = term;
this.automaton = automaton;
@@ -109,7 +103,8 @@ public class AutomatonQuery extends MultiTermQuery implements Accountable {
// TODO: we could take isFinite too, to save a bit of CPU in CompiledAutomaton ctor?:
this.compiled = new CompiledAutomaton(automaton, null, true, maxDeterminizedStates, isBinary);
- this.ramBytesUsed = BASE_RAM_BYTES + term.ramBytesUsed() + automaton.ramBytesUsed() + compiled.ramBytesUsed();
+ this.ramBytesUsed =
+ BASE_RAM_BYTES + term.ramBytesUsed() + automaton.ramBytesUsed() + compiled.ramBytesUsed();
}
@Override
@@ -128,20 +123,14 @@ public class AutomatonQuery extends MultiTermQuery implements Accountable {
@Override
public boolean equals(Object obj) {
- if (this == obj)
- return true;
- if (!super.equals(obj))
- return false;
- if (getClass() != obj.getClass())
- return false;
+ if (this == obj) return true;
+ if (!super.equals(obj)) return false;
+ if (getClass() != obj.getClass()) return false;
AutomatonQuery other = (AutomatonQuery) obj;
- if (!compiled.equals(other.compiled))
- return false;
+ if (!compiled.equals(other.compiled)) return false;
if (term == null) {
- if (other.term != null)
- return false;
- } else if (!term.equals(other.term))
- return false;
+ if (other.term != null) return false;
+ } else if (!term.equals(other.term)) return false;
return true;
}
@@ -172,7 +161,7 @@ public class AutomatonQuery extends MultiTermQuery implements Accountable {
return automaton;
}
- /** Is this a binary (byte) oriented automaton. See the constructor. */
+ /** Is this a binary (byte) oriented automaton. See the constructor. */
public boolean isAutomatonBinary() {
return automatonIsBinary;
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/BlendedTermQuery.java b/lucene/core/src/java/org/apache/lucene/search/BlendedTermQuery.java
index c307d78..e9a135f 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BlendedTermQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BlendedTermQuery.java
@@ -16,11 +16,9 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
-
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.LeafReaderContext;
@@ -32,17 +30,18 @@ import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.InPlaceMergeSorter;
/**
- * A {@link Query} that blends index statistics across multiple terms.
- * This is particularly useful when several terms should produce identical
- * scores, regardless of their index statistics.
- * <p>For instance imagine that you are resolving synonyms at search time,
- * all terms should produce identical scores instead of the default behavior,
- * which tends to give higher scores to rare terms.
- * <p>An other useful use-case is cross-field search: imagine that you would
- * like to search for {@code john} on two fields: {@code first_name} and
- * {@code last_name}. You might not want to give a higher weight to matches
- * on the field where {@code john} is rarer, in which case
- * {@link BlendedTermQuery} would help as well.
+ * A {@link Query} that blends index statistics across multiple terms. This is particularly useful
+ * when several terms should produce identical scores, regardless of their index statistics.
+ *
+ * <p>For instance imagine that you are resolving synonyms at search time, all terms should produce
+ * identical scores instead of the default behavior, which tends to give higher scores to rare
+ * terms.
+ *
+ * <p>An other useful use-case is cross-field search: imagine that you would like to search for
+ * {@code john} on two fields: {@code first_name} and {@code last_name}. You might not want to give
+ * a higher weight to matches on the field where {@code john} is rarer, in which case {@link
+ * BlendedTermQuery} would help as well.
+ *
* @lucene.experimental
*/
public final class BlendedTermQuery extends Query {
@@ -59,31 +58,37 @@ public final class BlendedTermQuery extends Query {
/** Sole constructor. */
public Builder() {}
- /** Set the {@link RewriteMethod}. Default is to use
- * {@link BlendedTermQuery#DISJUNCTION_MAX_REWRITE}.
- * @see RewriteMethod */
+ /**
+ * Set the {@link RewriteMethod}. Default is to use {@link
+ * BlendedTermQuery#DISJUNCTION_MAX_REWRITE}.
+ *
+ * @see RewriteMethod
+ */
public Builder setRewriteMethod(RewriteMethod rewiteMethod) {
this.rewriteMethod = rewiteMethod;
return this;
}
- /** Add a new {@link Term} to this builder, with a default boost of {@code 1}.
- * @see #add(Term, float) */
+ /**
+ * Add a new {@link Term} to this builder, with a default boost of {@code 1}.
+ *
+ * @see #add(Term, float)
+ */
public Builder add(Term term) {
return add(term, 1f);
}
- /** Add a {@link Term} with the provided boost. The higher the boost, the
- * more this term will contribute to the overall score of the
- * {@link BlendedTermQuery}. */
+ /**
+ * Add a {@link Term} with the provided boost. The higher the boost, the more this term will
+ * contribute to the overall score of the {@link BlendedTermQuery}.
+ */
public Builder add(Term term, float boost) {
return add(term, boost, null);
}
/**
- * Expert: Add a {@link Term} with the provided boost and context.
- * This method is useful if you already have a {@link TermStates}
- * object constructed for the given term.
+ * Expert: Add a {@link Term} with the provided boost and context. This method is useful if you
+ * already have a {@link TermStates} object constructed for the given term.
*/
public Builder add(Term term, float boost, TermStates context) {
if (numTerms >= IndexSearcher.getMaxClauseCount()) {
@@ -107,53 +112,56 @@ public final class BlendedTermQuery extends Query {
ArrayUtil.copyOfSubArray(contexts, 0, numTerms),
rewriteMethod);
}
-
}
- /** A {@link RewriteMethod} defines how queries for individual terms should
- * be merged.
- * @lucene.experimental
- * @see BlendedTermQuery#BOOLEAN_REWRITE
- * @see BlendedTermQuery.DisjunctionMaxRewrite */
- public static abstract class RewriteMethod {
+ /**
+ * A {@link RewriteMethod} defines how queries for individual terms should be merged.
+ *
+ * @lucene.experimental
+ * @see BlendedTermQuery#BOOLEAN_REWRITE
+ * @see BlendedTermQuery.DisjunctionMaxRewrite
+ */
+ public abstract static class RewriteMethod {
/** Sole constructor */
protected RewriteMethod() {}
/** Merge the provided sub queries into a single {@link Query} object. */
public abstract Query rewrite(Query[] subQueries);
-
}
/**
- * A {@link RewriteMethod} that adds all sub queries to a {@link BooleanQuery}.
- * This {@link RewriteMethod} is useful when matching on several fields is
- * considered better than having a good match on a single field.
+ * A {@link RewriteMethod} that adds all sub queries to a {@link BooleanQuery}. This {@link
+ * RewriteMethod} is useful when matching on several fields is considered better than having a
+ * good match on a single field.
*/
- public static final RewriteMethod BOOLEAN_REWRITE = new RewriteMethod() {
- @Override
- public Query rewrite(Query[] subQueries) {
- BooleanQuery.Builder merged = new BooleanQuery.Builder();
- for (Query query : subQueries) {
- merged.add(query, Occur.SHOULD);
- }
- return merged.build();
- }
- };
+ public static final RewriteMethod BOOLEAN_REWRITE =
+ new RewriteMethod() {
+ @Override
+ public Query rewrite(Query[] subQueries) {
+ BooleanQuery.Builder merged = new BooleanQuery.Builder();
+ for (Query query : subQueries) {
+ merged.add(query, Occur.SHOULD);
+ }
+ return merged.build();
+ }
+ };
/**
- * A {@link RewriteMethod} that creates a {@link DisjunctionMaxQuery} out
- * of the sub queries. This {@link RewriteMethod} is useful when having a
- * good match on a single field is considered better than having average
- * matches on several fields.
+ * A {@link RewriteMethod} that creates a {@link DisjunctionMaxQuery} out of the sub queries. This
+ * {@link RewriteMethod} is useful when having a good match on a single field is considered better
+ * than having average matches on several fields.
*/
public static class DisjunctionMaxRewrite extends RewriteMethod {
private final float tieBreakerMultiplier;
- /** This {@link RewriteMethod} will create {@link DisjunctionMaxQuery}
- * instances that have the provided tie breaker.
- * @see DisjunctionMaxQuery */
+ /**
+ * This {@link RewriteMethod} will create {@link DisjunctionMaxQuery} instances that have the
+ * provided tie breaker.
+ *
+ * @see DisjunctionMaxQuery
+ */
public DisjunctionMaxRewrite(float tieBreakerMultiplier) {
this.tieBreakerMultiplier = tieBreakerMultiplier;
}
@@ -176,7 +184,6 @@ public final class BlendedTermQuery extends Query {
public int hashCode() {
return 31 * getClass().hashCode() + Float.floatToIntBits(tieBreakerMultiplier);
}
-
}
/** {@link DisjunctionMaxRewrite} instance with a tie-breaker of {@code 0.01}. */
@@ -187,8 +194,8 @@ public final class BlendedTermQuery extends Query {
private final TermStates[] contexts;
private final RewriteMethod rewriteMethod;
- private BlendedTermQuery(Term[] terms, float[] boosts, TermStates[] contexts,
- RewriteMethod rewriteMethod) {
+ private BlendedTermQuery(
+ Term[] terms, float[] boosts, TermStates[] contexts, RewriteMethod rewriteMethod) {
assert terms.length == boosts.length;
assert terms.length == contexts.length;
this.terms = terms;
@@ -223,15 +230,14 @@ public final class BlendedTermQuery extends Query {
@Override
public boolean equals(Object other) {
- return sameClassAs(other) &&
- equalsTo(getClass().cast(other));
+ return sameClassAs(other) && equalsTo(getClass().cast(other));
}
-
+
private boolean equalsTo(BlendedTermQuery other) {
- return Arrays.equals(terms, other.terms) &&
- Arrays.equals(contexts, other.contexts) &&
- Arrays.equals(boosts, other.boosts) &&
- rewriteMethod.equals(other.rewriteMethod);
+ return Arrays.equals(terms, other.terms)
+ && Arrays.equals(contexts, other.contexts)
+ && Arrays.equals(boosts, other.boosts)
+ && rewriteMethod.equals(other.rewriteMethod);
}
@Override
@@ -296,15 +302,17 @@ public final class BlendedTermQuery extends Query {
@Override
public void visit(QueryVisitor visitor) {
- Term[] termsToVisit = Arrays.stream(terms).filter(t -> visitor.acceptField(t.field())).toArray(Term[]::new);
+ Term[] termsToVisit =
+ Arrays.stream(terms).filter(t -> visitor.acceptField(t.field())).toArray(Term[]::new);
if (termsToVisit.length > 0) {
QueryVisitor v = visitor.getSubVisitor(Occur.SHOULD, this);
v.consumeTerms(this, termsToVisit);
}
}
- private static TermStates adjustFrequencies(IndexReaderContext readerContext,
- TermStates ctx, int artificialDf, long artificialTtf) throws IOException {
+ private static TermStates adjustFrequencies(
+ IndexReaderContext readerContext, TermStates ctx, int artificialDf, long artificialTtf)
+ throws IOException {
List<LeafReaderContext> leaves = readerContext.leaves();
final int len;
if (leaves == null) {
@@ -323,5 +331,4 @@ public final class BlendedTermQuery extends Query {
newCtx.accumulateStatistics(artificialDf, artificialTtf);
return newCtx;
}
-
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/BlockMaxConjunctionScorer.java b/lucene/core/src/java/org/apache/lucene/search/BlockMaxConjunctionScorer.java
index 898a07d..7810726 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BlockMaxConjunctionScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BlockMaxConjunctionScorer.java
@@ -24,8 +24,8 @@ import java.util.Comparator;
import java.util.List;
/**
- * Scorer for conjunctions that checks the maximum scores of each clause in
- * order to potentially skip over blocks that can't have competitive matches.
+ * Scorer for conjunctions that checks the maximum scores of each clause in order to potentially
+ * skip over blocks that can't have competitive matches.
*/
final class BlockMaxConjunctionScorer extends Scorer {
final Scorer[] scorers;
@@ -64,9 +64,8 @@ final class BlockMaxConjunctionScorer extends Scorer {
if (twoPhases.length == 0) {
return null;
}
- float matchCost = (float) Arrays.stream(twoPhases)
- .mapToDouble(TwoPhaseIterator::matchCost)
- .sum();
+ float matchCost =
+ (float) Arrays.stream(twoPhases).mapToDouble(TwoPhaseIterator::matchCost).sum();
final DocIdSetIterator approx = approximation();
return new TwoPhaseIterator(approx) {
@Override
@@ -89,8 +88,9 @@ final class BlockMaxConjunctionScorer extends Scorer {
@Override
public DocIdSetIterator iterator() {
- return twoPhases.length == 0 ? approximation() :
- TwoPhaseIterator.asDocIdSetIterator(twoPhaseIterator());
+ return twoPhases.length == 0
+ ? approximation()
+ : TwoPhaseIterator.asDocIdSetIterator(twoPhaseIterator());
}
private DocIdSetIterator approximation() {
@@ -149,7 +149,8 @@ final class BlockMaxConjunctionScorer extends Scorer {
}
private int doNext(int doc) throws IOException {
- advanceHead: for(;;) {
+ advanceHead:
+ for (; ; ) {
assert doc == lead.docID();
if (doc == NO_MORE_DOCS) {
@@ -179,7 +180,8 @@ final class BlockMaxConjunctionScorer extends Scorer {
final int next = other.advance(doc);
if (next > doc) {
- // iterator beyond the current doc - advance lead and continue to the new highest doc.
+ // iterator beyond the current doc - advance lead and continue to the new highest
+ // doc.
doc = lead.advance(advanceTarget(next));
continue advanceHead;
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/BlockMaxDISI.java b/lucene/core/src/java/org/apache/lucene/search/BlockMaxDISI.java
index 804f96f..2f59d46 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BlockMaxDISI.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BlockMaxDISI.java
@@ -20,10 +20,10 @@ package org.apache.lucene.search;
import java.io.IOException;
/**
- * {@link DocIdSetIterator} that skips non-competitive docs by checking
- * the max score of the provided {@link Scorer} for the current block.
- * Call {@link #setMinCompetitiveScore(float)} in order to give this iterator the ability
- * to skip low-scoring documents.
+ * {@link DocIdSetIterator} that skips non-competitive docs by checking the max score of the
+ * provided {@link Scorer} for the current block. Call {@link #setMinCompetitiveScore(float)} in
+ * order to give this iterator the ability to skip low-scoring documents.
+ *
* @lucene.internal
*/
public class BlockMaxDISI extends DocIdSetIterator {
@@ -45,7 +45,7 @@ public class BlockMaxDISI extends DocIdSetIterator {
@Override
public int nextDoc() throws IOException {
- return advance(docID()+1);
+ return advance(docID() + 1);
}
@Override
diff --git a/lucene/core/src/java/org/apache/lucene/search/Boolean2ScorerSupplier.java b/lucene/core/src/java/org/apache/lucene/search/Boolean2ScorerSupplier.java
index 78eac03..3fa5886 100644
--- a/lucene/core/src/java/org/apache/lucene/search/Boolean2ScorerSupplier.java
+++ b/lucene/core/src/java/org/apache/lucene/search/Boolean2ScorerSupplier.java
@@ -25,7 +25,6 @@ import java.util.List;
import java.util.Map;
import java.util.OptionalLong;
import java.util.stream.Stream;
-
import org.apache.lucene.search.BooleanClause.Occur;
final class Boolean2ScorerSupplier extends ScorerSupplier {
@@ -36,20 +35,28 @@ final class Boolean2ScorerSupplier extends ScorerSupplier {
private final int minShouldMatch;
private long cost = -1;
- Boolean2ScorerSupplier(Weight weight,
+ Boolean2ScorerSupplier(
+ Weight weight,
Map<Occur, Collection<ScorerSupplier>> subs,
- ScoreMode scoreMode, int minShouldMatch) {
+ ScoreMode scoreMode,
+ int minShouldMatch) {
if (minShouldMatch < 0) {
- throw new IllegalArgumentException("minShouldMatch must be positive, but got: " + minShouldMatch);
+ throw new IllegalArgumentException(
+ "minShouldMatch must be positive, but got: " + minShouldMatch);
}
if (minShouldMatch != 0 && minShouldMatch >= subs.get(Occur.SHOULD).size()) {
- throw new IllegalArgumentException("minShouldMatch must be strictly less than the number of SHOULD clauses");
+ throw new IllegalArgumentException(
+ "minShouldMatch must be strictly less than the number of SHOULD clauses");
}
- if (scoreMode.needsScores() == false && minShouldMatch == 0 && subs.get(Occur.SHOULD).size() > 0
+ if (scoreMode.needsScores() == false
+ && minShouldMatch == 0
+ && subs.get(Occur.SHOULD).size() > 0
&& subs.get(Occur.MUST).size() + subs.get(Occur.FILTER).size() > 0) {
- throw new IllegalArgumentException("Cannot pass purely optional clauses if scores are not needed");
+ throw new IllegalArgumentException(
+ "Cannot pass purely optional clauses if scores are not needed");
}
- if (subs.get(Occur.SHOULD).size() + subs.get(Occur.MUST).size() + subs.get(Occur.FILTER).size() == 0) {
+ if (subs.get(Occur.SHOULD).size() + subs.get(Occur.MUST).size() + subs.get(Occur.FILTER).size()
+ == 0) {
throw new IllegalArgumentException("There should be at least one positive clause");
}
this.weight = weight;
@@ -59,18 +66,19 @@ final class Boolean2ScorerSupplier extends ScorerSupplier {
}
private long computeCost() {
- OptionalLong minRequiredCost = Stream.concat(
- subs.get(Occur.MUST).stream(),
- subs.get(Occur.FILTER).stream())
- .mapToLong(ScorerSupplier::cost)
- .min();
+ OptionalLong minRequiredCost =
+ Stream.concat(subs.get(Occur.MUST).stream(), subs.get(Occur.FILTER).stream())
+ .mapToLong(ScorerSupplier::cost)
+ .min();
if (minRequiredCost.isPresent() && minShouldMatch == 0) {
return minRequiredCost.getAsLong();
} else {
final Collection<ScorerSupplier> optionalScorers = subs.get(Occur.SHOULD);
- final long shouldCost = MinShouldMatchSumScorer.cost(
- optionalScorers.stream().mapToLong(ScorerSupplier::cost),
- optionalScorers.size(), minShouldMatch);
+ final long shouldCost =
+ MinShouldMatchSumScorer.cost(
+ optionalScorers.stream().mapToLong(ScorerSupplier::cost),
+ optionalScorers.size(),
+ minShouldMatch);
return Math.min(minRequiredCost.orElse(Long.MAX_VALUE), shouldCost);
}
}
@@ -86,13 +94,14 @@ final class Boolean2ScorerSupplier extends ScorerSupplier {
@Override
public Scorer get(long leadCost) throws IOException {
Scorer scorer = getInternal(leadCost);
- if (scoreMode == ScoreMode.TOP_SCORES &&
- subs.get(Occur.SHOULD).isEmpty() && subs.get(Occur.MUST).isEmpty()) {
+ if (scoreMode == ScoreMode.TOP_SCORES
+ && subs.get(Occur.SHOULD).isEmpty()
+ && subs.get(Occur.MUST).isEmpty()) {
// no scoring clauses but scores are needed so we wrap the scorer in
// a constant score in order to allow early termination
- return scorer.twoPhaseIterator() != null ?
- new ConstantScoreScorer(weight, 0f, scoreMode, scorer.twoPhaseIterator()) :
- new ConstantScoreScorer(weight, 0f, scoreMode, scorer.iterator());
+ return scorer.twoPhaseIterator() != null
+ ? new ConstantScoreScorer(weight, 0f, scoreMode, scorer.twoPhaseIterator())
+ : new ConstantScoreScorer(weight, 0f, scoreMode, scorer.iterator());
}
return scorer;
}
@@ -103,12 +112,18 @@ final class Boolean2ScorerSupplier extends ScorerSupplier {
// pure conjunction
if (subs.get(Occur.SHOULD).isEmpty()) {
- return excl(req(subs.get(Occur.FILTER), subs.get(Occur.MUST), leadCost), subs.get(Occur.MUST_NOT), leadCost);
+ return excl(
+ req(subs.get(Occur.FILTER), subs.get(Occur.MUST), leadCost),
+ subs.get(Occur.MUST_NOT),
+ leadCost);
}
// pure disjunction
if (subs.get(Occur.FILTER).isEmpty() && subs.get(Occur.MUST).isEmpty()) {
- return excl(opt(subs.get(Occur.SHOULD), minShouldMatch, scoreMode, leadCost), subs.get(Occur.MUST_NOT), leadCost);
+ return excl(
+ opt(subs.get(Occur.SHOULD), minShouldMatch, scoreMode, leadCost),
+ subs.get(Occur.MUST_NOT),
+ leadCost);
}
// conjunction-disjunction mix:
@@ -117,23 +132,40 @@ final class Boolean2ScorerSupplier extends ScorerSupplier {
// optional side must match. otherwise it's required + optional
if (minShouldMatch > 0) {
- Scorer req = excl(req(subs.get(Occur.FILTER), subs.get(Occur.MUST), leadCost), subs.get(Occur.MUST_NOT), leadCost);
+ Scorer req =
+ excl(
+ req(subs.get(Occur.FILTER), subs.get(Occur.MUST), leadCost),
+ subs.get(Occur.MUST_NOT),
+ leadCost);
Scorer opt = opt(subs.get(Occur.SHOULD), minShouldMatch, scoreMode, leadCost);
return new ConjunctionScorer(weight, Arrays.asList(req, opt), Arrays.asList(req, opt));
} else {
assert scoreMode.needsScores();
return new ReqOptSumScorer(
- excl(req(subs.get(Occur.FILTER), subs.get(Occur.MUST), leadCost), subs.get(Occur.MUST_NOT), leadCost),
- opt(subs.get(Occur.SHOULD), minShouldMatch, scoreMode, leadCost), scoreMode);
+ excl(
+ req(subs.get(Occur.FILTER), subs.get(Occur.MUST), leadCost),
+ subs.get(Occur.MUST_NOT),
+ leadCost),
+ opt(subs.get(Occur.SHOULD), minShouldMatch, scoreMode, leadCost),
+ scoreMode);
}
}
- /** Create a new scorer for the given required clauses. Note that
- * {@code requiredScoring} is a subset of {@code required} containing
- * required clauses that should participate in scoring. */
- private Scorer req(Collection<ScorerSupplier> requiredNoScoring, Collection<ScorerSupplier> requiredScoring, long leadCost) throws IOException {
+ /**
+ * Create a new scorer for the given required clauses. Note that {@code requiredScoring} is a
+ * subset of {@code required} containing required clauses that should participate in scoring.
+ */
+ private Scorer req(
+ Collection<ScorerSupplier> requiredNoScoring,
+ Collection<ScorerSupplier> requiredScoring,
+ long leadCost)
+ throws IOException {
if (requiredNoScoring.size() + requiredScoring.size() == 1) {
- Scorer req = (requiredNoScoring.isEmpty() ? requiredScoring : requiredNoScoring).iterator().next().get(leadCost);
+ Scorer req =
+ (requiredNoScoring.isEmpty() ? requiredScoring : requiredNoScoring)
+ .iterator()
+ .next()
+ .get(leadCost);
if (scoreMode.needsScores() == false) {
return req;
@@ -148,6 +180,7 @@ final class Boolean2ScorerSupplier extends ScorerSupplier {
public float score() throws IOException {
return 0f;
}
+
@Override
public float getMaxScore(int upTo) throws IOException {
return 0f;
@@ -178,7 +211,8 @@ final class Boolean2ScorerSupplier extends ScorerSupplier {
}
}
- private Scorer excl(Scorer main, Collection<ScorerSupplier> prohibited, long leadCost) throws IOException {
+ private Scorer excl(Scorer main, Collection<ScorerSupplier> prohibited, long leadCost)
+ throws IOException {
if (prohibited.isEmpty()) {
return main;
} else {
@@ -186,8 +220,9 @@ final class Boolean2ScorerSupplier extends ScorerSupplier {
}
}
- private Scorer opt(Collection<ScorerSupplier> optional, int minShouldMatch,
- ScoreMode scoreMode, long leadCost) throws IOException {
+ private Scorer opt(
+ Collection<ScorerSupplier> optional, int minShouldMatch, ScoreMode scoreMode, long leadCost)
+ throws IOException {
if (optional.size() == 1) {
return optional.iterator().next().get(leadCost);
} else {
@@ -204,5 +239,4 @@ final class Boolean2ScorerSupplier extends ScorerSupplier {
}
}
}
-
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/BooleanClause.java b/lucene/core/src/java/org/apache/lucene/search/BooleanClause.java
index f3931b1..8455e0c 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BooleanClause.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BooleanClause.java
@@ -16,50 +16,66 @@
*/
package org.apache.lucene.search;
-
import java.util.Objects;
/** A clause in a BooleanQuery. */
public final class BooleanClause {
-
+
/** Specifies how clauses are to occur in matching documents. */
public static enum Occur {
/** Use this operator for clauses that <i>must</i> appear in the matching documents. */
- MUST { @Override public String toString() { return "+"; } },
+ MUST {
+ @Override
+ public String toString() {
+ return "+";
+ }
+ },
/** Like {@link #MUST} except that these clauses do not participate in scoring. */
- FILTER { @Override public String toString() { return "#"; } },
-
- /** Use this operator for clauses that <i>should</i> appear in the
- * matching documents. For a BooleanQuery with no <code>MUST</code>
- * clauses one or more <code>SHOULD</code> clauses must match a document
- * for the BooleanQuery to match.
+ FILTER {
+ @Override
+ public String toString() {
+ return "#";
+ }
+ },
+
+ /**
+ * Use this operator for clauses that <i>should</i> appear in the matching documents. For a
+ * BooleanQuery with no <code>MUST</code> clauses one or more <code>SHOULD</code> clauses must
+ * match a document for the BooleanQuery to match.
+ *
* @see BooleanQuery.Builder#setMinimumNumberShouldMatch
*/
- SHOULD { @Override public String toString() { return ""; } },
-
- /** Use this operator for clauses that <i>must not</i> appear in the matching documents.
- * Note that it is not possible to search for queries that only consist
- * of a <code>MUST_NOT</code> clause. These clauses do not contribute to the
- * score of documents. */
- MUST_NOT { @Override public String toString() { return "-"; } };
-
+ SHOULD {
+ @Override
+ public String toString() {
+ return "";
+ }
+ },
+
+ /**
+ * Use this operator for clauses that <i>must not</i> appear in the matching documents. Note
+ * that it is not possible to search for queries that only consist of a <code>MUST_NOT</code>
+ * clause. These clauses do not contribute to the score of documents.
+ */
+ MUST_NOT {
+ @Override
+ public String toString() {
+ return "-";
+ }
+ };
}
- /** The query whose matching documents are combined by the boolean query.
- */
+ /** The query whose matching documents are combined by the boolean query. */
private final Query query;
private final Occur occur;
-
- /** Constructs a BooleanClause.
- */
+ /** Constructs a BooleanClause. */
public BooleanClause(Query query, Occur occur) {
this.query = Objects.requireNonNull(query, "Query must not be null");
this.occur = Objects.requireNonNull(occur, "Occur must not be null");
-
}
public Occur getOccur() {
@@ -69,7 +85,7 @@ public final class BooleanClause {
public Query getQuery() {
return query;
}
-
+
public boolean isProhibited() {
return Occur.MUST_NOT == occur;
}
@@ -85,20 +101,17 @@ public final class BooleanClause {
/** Returns true if <code>o</code> is equal to this. */
@Override
public boolean equals(Object o) {
- if (o == null || !(o instanceof BooleanClause))
- return false;
- BooleanClause other = (BooleanClause)o;
- return this.query.equals(other.query)
- && this.occur == other.occur;
+ if (o == null || !(o instanceof BooleanClause)) return false;
+ BooleanClause other = (BooleanClause) o;
+ return this.query.equals(other.query) && this.occur == other.occur;
}
- /** Returns a hash code value for this object.*/
+ /** Returns a hash code value for this object. */
@Override
public int hashCode() {
return 31 * query.hashCode() + occur.hashCode();
}
-
@Override
public String toString() {
return occur.toString() + query.toString();
diff --git a/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java b/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java
index d23bed1..e74d63d 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java
@@ -16,7 +16,6 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
@@ -31,37 +30,40 @@ import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.function.Predicate;
-
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.BooleanClause.Occur;
-/** A Query that matches documents matching boolean combinations of other
- * queries, e.g. {@link TermQuery}s, {@link PhraseQuery}s or other
- * BooleanQuerys.
- */
+/**
+ * A Query that matches documents matching boolean combinations of other queries, e.g. {@link
+ * TermQuery}s, {@link PhraseQuery}s or other BooleanQuerys.
+ */
public class BooleanQuery extends Query implements Iterable<BooleanClause> {
- /** Thrown when an attempt is made to add more than {@link
- * #getMaxClauseCount()} clauses. This typically happens if
- * a PrefixQuery, FuzzyQuery, WildcardQuery, or TermRangeQuery
- * is expanded to many terms during search.
+ /**
+ * Thrown when an attempt is made to add more than {@link #getMaxClauseCount()} clauses. This
+ * typically happens if a PrefixQuery, FuzzyQuery, WildcardQuery, or TermRangeQuery is expanded to
+ * many terms during search.
+ *
* @deprecated use {@link IndexSearcher.TooManyClauses}
*/
@Deprecated // Remove in Lucene 10
- public static class TooManyClauses extends IndexSearcher.TooManyClauses { }
+ public static class TooManyClauses extends IndexSearcher.TooManyClauses {}
- /** Return the maximum number of clauses permitted, 1024 by default.
- * Attempts to add more than the permitted number of clauses cause {@link
- * TooManyClauses} to be thrown.
+ /**
+ * Return the maximum number of clauses permitted, 1024 by default. Attempts to add more than the
+ * permitted number of clauses cause {@link TooManyClauses} to be thrown.
+ *
* @see IndexSearcher#setMaxClauseCount(int)
* @deprecated use {@link IndexSearcher#getMaxClauseCount()}
*/
@Deprecated // Remove in Lucene 10
- public static int getMaxClauseCount() { return IndexSearcher.getMaxClauseCount(); }
+ public static int getMaxClauseCount() {
+ return IndexSearcher.getMaxClauseCount();
+ }
/**
- * Set the maximum number of clauses permitted per BooleanQuery.
- * Default value is 1024.
+ * Set the maximum number of clauses permitted per BooleanQuery. Default value is 1024.
+ *
* @deprecated use {@link IndexSearcher#setMaxClauseCount(int)}
*/
@Deprecated // Remove in Lucene 10
@@ -79,19 +81,14 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
public Builder() {}
/**
- * Specifies a minimum number of the optional BooleanClauses
- * which must be satisfied.
+ * Specifies a minimum number of the optional BooleanClauses which must be satisfied.
+ *
+ * <p>By default no optional clauses are necessary for a match (unless there are no required
+ * clauses). If this method is used, then the specified number of clauses is required.
*
- * <p>
- * By default no optional clauses are necessary for a match
- * (unless there are no required clauses). If this method is used,
- * then the specified number of clauses is required.
- * </p>
- * <p>
- * Use of this method is totally independent of specifying that
- * any specific clauses are required (or prohibited). This number will
- * only be compared against the number of matching optional clauses.
- * </p>
+ * <p>Use of this method is totally independent of specifying that any specific clauses are
+ * required (or prohibited). This number will only be compared against the number of matching
+ * optional clauses.
*
* @param min the number of optional clauses that must match
*/
@@ -101,14 +98,15 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
}
/**
- * Add a new clause to this {@link Builder}. Note that the order in which
- * clauses are added does not have any impact on matching documents or query
- * performance.
- * @throws IndexSearcher.TooManyClauses if the new number of clauses exceeds the maximum clause number
+ * Add a new clause to this {@link Builder}. Note that the order in which clauses are added does
+ * not have any impact on matching documents or query performance.
+ *
+ * @throws IndexSearcher.TooManyClauses if the new number of clauses exceeds the maximum clause
+ * number
*/
public Builder add(BooleanClause clause) {
// We do the final deep check for max clauses count limit during
- //<code>IndexSearcher.rewrite</code> but do this check to short
+ // <code>IndexSearcher.rewrite</code> but do this check to short
// circuit in case a single query holds more than numClauses
if (clauses.size() >= IndexSearcher.maxClauseCount) {
throw new IndexSearcher.TooManyClauses();
@@ -118,29 +116,29 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
}
/**
- * Add a new clause to this {@link Builder}. Note that the order in which
- * clauses are added does not have any impact on matching documents or query
- * performance.
- * @throws IndexSearcher.TooManyClauses if the new number of clauses exceeds the maximum clause number
+ * Add a new clause to this {@link Builder}. Note that the order in which clauses are added does
+ * not have any impact on matching documents or query performance.
+ *
+ * @throws IndexSearcher.TooManyClauses if the new number of clauses exceeds the maximum clause
+ * number
*/
public Builder add(Query query, Occur occur) {
return add(new BooleanClause(query, occur));
}
- /** Create a new {@link BooleanQuery} based on the parameters that have
- * been set on this builder. */
+ /**
+ * Create a new {@link BooleanQuery} based on the parameters that have been set on this builder.
+ */
public BooleanQuery build() {
return new BooleanQuery(minimumNumberShouldMatch, clauses.toArray(new BooleanClause[0]));
}
-
}
private final int minimumNumberShouldMatch;
- private final List<BooleanClause> clauses; // used for toString() and getClauses()
+ private final List<BooleanClause> clauses; // used for toString() and getClauses()
private final Map<Occur, Collection<Query>> clauseSets; // used for equals/hashcode
- private BooleanQuery(int minimumNumberShouldMatch,
- BooleanClause[] clauses) {
+ private BooleanQuery(int minimumNumberShouldMatch, BooleanClause[] clauses) {
this.minimumNumberShouldMatch = minimumNumberShouldMatch;
this.clauses = Collections.unmodifiableList(Arrays.asList(clauses));
clauseSets = new EnumMap<>(Occur.class);
@@ -155,10 +153,7 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
}
}
- /**
- * Gets the minimum number of the optional BooleanClauses
- * which must be satisfied.
- */
+ /** Gets the minimum number of the optional BooleanClauses which must be satisfied. */
public int getMinimumNumberShouldMatch() {
return minimumNumberShouldMatch;
}
@@ -174,16 +169,17 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
}
/**
- * Whether this query is a pure disjunction, ie. it only has SHOULD clauses
- * and it is enough for a single clause to match for this boolean query to match.
+ * Whether this query is a pure disjunction, ie. it only has SHOULD clauses and it is enough for a
+ * single clause to match for this boolean query to match.
*/
boolean isPureDisjunction() {
- return clauses.size() == getClauses(Occur.SHOULD).size()
- && minimumNumberShouldMatch <= 1;
+ return clauses.size() == getClauses(Occur.SHOULD).size() && minimumNumberShouldMatch <= 1;
}
- /** Returns an iterator on the clauses in this query. It implements the {@link Iterable} interface to
- * make it possible to do:
+ /**
+ * Returns an iterator on the clauses in this query. It implements the {@link Iterable} interface
+ * to make it possible to do:
+ *
* <pre class="prettyprint">for (BooleanClause clause : booleanQuery) {}</pre>
*/
@Override
@@ -192,8 +188,9 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
}
private BooleanQuery rewriteNoScoring() {
- boolean keepShould = getMinimumNumberShouldMatch() > 0
- || (clauseSets.get(Occur.MUST).size() + clauseSets.get(Occur.FILTER).size() == 0);
+ boolean keepShould =
+ getMinimumNumberShouldMatch() > 0
+ || (clauseSets.get(Occur.MUST).size() + clauseSets.get(Occur.FILTER).size() == 0);
if (clauseSets.get(Occur.MUST).size() == 0 && keepShould) {
return this;
@@ -203,19 +200,22 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
newQuery.setMinimumNumberShouldMatch(getMinimumNumberShouldMatch());
for (BooleanClause clause : clauses) {
switch (clause.getOccur()) {
- case MUST: {
- newQuery.add(clause.getQuery(), Occur.FILTER);
- break;
- }
- case SHOULD: {
- if (keepShould) {
+ case MUST:
+ {
+ newQuery.add(clause.getQuery(), Occur.FILTER);
+ break;
+ }
+ case SHOULD:
+ {
+ if (keepShould) {
+ newQuery.add(clause);
+ }
+ break;
+ }
+ default:
+ {
newQuery.add(clause);
}
- break;
- }
- default: {
- newQuery.add(clause);
- }
}
}
@@ -223,7 +223,8 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
}
@Override
- public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
+ public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost)
+ throws IOException {
BooleanQuery query = this;
if (scoreMode.needsScores() == false) {
query = rewriteNoScoring();
@@ -236,7 +237,7 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
if (clauses.size() == 0) {
return new MatchNoDocsQuery("empty BooleanQuery");
}
-
+
// optimize 1-clause queries
if (clauses.size() == 1) {
BooleanClause c = clauses.get(0);
@@ -379,9 +380,9 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
shouldClauses.put(query, shouldClauses.getOrDefault(query, 0d) + boost);
}
if (shouldClauses.size() != clauseSets.get(Occur.SHOULD).size()) {
- BooleanQuery.Builder builder = new BooleanQuery.Builder()
- .setMinimumNumberShouldMatch(minimumNumberShouldMatch);
- for (Map.Entry<Query,Double> entry : shouldClauses.entrySet()) {
+ BooleanQuery.Builder builder =
+ new BooleanQuery.Builder().setMinimumNumberShouldMatch(minimumNumberShouldMatch);
+ for (Map.Entry<Query, Double> entry : shouldClauses.entrySet()) {
Query query = entry.getKey();
float boost = entry.getValue().floatValue();
if (boost != 1f) {
@@ -411,9 +412,9 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
mustClauses.put(query, mustClauses.getOrDefault(query, 0d) + boost);
}
if (mustClauses.size() != clauseSets.get(Occur.MUST).size()) {
- BooleanQuery.Builder builder = new BooleanQuery.Builder()
- .setMinimumNumberShouldMatch(minimumNumberShouldMatch);
- for (Map.Entry<Query,Double> entry : mustClauses.entrySet()) {
+ BooleanQuery.Builder builder =
+ new BooleanQuery.Builder().setMinimumNumberShouldMatch(minimumNumberShouldMatch);
+ for (Map.Entry<Query, Double> entry : mustClauses.entrySet()) {
Query query = entry.getKey();
float boost = entry.getValue().floatValue();
if (boost != 1f) {
@@ -435,8 +436,7 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
{
final Collection<Query> musts = clauseSets.get(Occur.MUST);
final Collection<Query> filters = clauseSets.get(Occur.FILTER);
- if (musts.size() == 1
- && filters.size() > 0) {
+ if (musts.size() == 1 && filters.size() > 0) {
Query must = musts.iterator().next();
float boost = 1f;
if (must instanceof BoostQuery) {
@@ -466,9 +466,10 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
}
// now add back the SHOULD clauses
- builder = new BooleanQuery.Builder()
- .setMinimumNumberShouldMatch(getMinimumNumberShouldMatch())
- .add(rewritten, Occur.MUST);
+ builder =
+ new BooleanQuery.Builder()
+ .setMinimumNumberShouldMatch(getMinimumNumberShouldMatch())
+ .add(rewritten, Occur.MUST);
for (Query query : clauseSets.get(Occur.SHOULD)) {
builder.add(query, Occur.SHOULD);
}
@@ -515,8 +516,7 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
for (Query q : clauseSets.get(occur)) {
q.visit(sub);
}
- }
- else {
+ } else {
QueryVisitor v = sub.getSubVisitor(occur, this);
for (Query q : clauseSets.get(occur)) {
q.visit(v);
@@ -540,7 +540,7 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
buffer.append(c.getOccur().toString());
Query subQuery = c.getQuery();
- if (subQuery instanceof BooleanQuery) { // wrap sub-bools in parens
+ if (subQuery instanceof BooleanQuery) { // wrap sub-bools in parens
buffer.append("(");
buffer.append(subQuery.toString(field));
buffer.append(")");
@@ -558,7 +558,7 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
buffer.append(")");
}
- if (getMinimumNumberShouldMatch()>0) {
+ if (getMinimumNumberShouldMatch() > 0) {
buffer.append('~');
buffer.append(getMinimumNumberShouldMatch());
}
@@ -567,26 +567,28 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
}
/**
- * Compares the specified object with this boolean query for equality.
- * Returns true if and only if the provided object<ul>
- * <li>is also a {@link BooleanQuery},</li>
- * <li>has the same value of {@link #getMinimumNumberShouldMatch()}</li>
- * <li>has the same {@link Occur#SHOULD} clauses, regardless of the order</li>
- * <li>has the same {@link Occur#MUST} clauses, regardless of the order</li>
- * <li>has the same set of {@link Occur#FILTER} clauses, regardless of the
- * order and regardless of duplicates</li>
- * <li>has the same set of {@link Occur#MUST_NOT} clauses, regardless of
- * the order and regardless of duplicates</li></ul>
+ * Compares the specified object with this boolean query for equality. Returns true if and only if
+ * the provided object
+ *
+ * <ul>
+ * <li>is also a {@link BooleanQuery},
+ * <li>has the same value of {@link #getMinimumNumberShouldMatch()}
+ * <li>has the same {@link Occur#SHOULD} clauses, regardless of the order
+ * <li>has the same {@link Occur#MUST} clauses, regardless of the order
+ * <li>has the same set of {@link Occur#FILTER} clauses, regardless of the order and regardless
+ * of duplicates
+ * <li>has the same set of {@link Occur#MUST_NOT} clauses, regardless of the order and
+ * regardless of duplicates
+ * </ul>
*/
@Override
public boolean equals(Object o) {
- return sameClassAs(o) &&
- equalsTo(getClass().cast(o));
+ return sameClassAs(o) && equalsTo(getClass().cast(o));
}
private boolean equalsTo(BooleanQuery other) {
- return getMinimumNumberShouldMatch() == other.getMinimumNumberShouldMatch() &&
- clauseSets.equals(other.clauseSets);
+ return getMinimumNumberShouldMatch() == other.getMinimumNumberShouldMatch()
+ && clauseSets.equals(other.clauseSets);
}
private int computeHashCode() {
@@ -610,5 +612,4 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
assert hashCode == computeHashCode();
return hashCode;
}
-
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/BooleanScorer.java b/lucene/core/src/java/org/apache/lucene/search/BooleanScorer.java
index e1b0399..9aa25b8 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BooleanScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BooleanScorer.java
@@ -16,19 +16,17 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.Objects;
-
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.PriorityQueue;
/**
- * {@link BulkScorer} that is used for pure disjunctions and disjunctions
- * that have low values of {@link BooleanQuery.Builder#setMinimumNumberShouldMatch(int)}
- * and dense clauses. This scorer scores documents by batches of 2048 docs.
+ * {@link BulkScorer} that is used for pure disjunctions and disjunctions that have low values of
+ * {@link BooleanQuery.Builder#setMinimumNumberShouldMatch(int)} and dense clauses. This scorer
+ * scores documents by batches of 2048 docs.
*/
final class BooleanScorer extends BulkScorer {
@@ -65,12 +63,13 @@ final class BooleanScorer extends BulkScorer {
// See MinShouldMatchSumScorer for an explanation
private static long cost(Collection<BulkScorer> scorers, int minShouldMatch) {
- final PriorityQueue<BulkScorer> pq = new PriorityQueue<BulkScorer>(scorers.size() - minShouldMatch + 1) {
- @Override
- protected boolean lessThan(BulkScorer a, BulkScorer b) {
- return a.cost() > b.cost();
- }
- };
+ final PriorityQueue<BulkScorer> pq =
+ new PriorityQueue<BulkScorer>(scorers.size() - minShouldMatch + 1) {
+ @Override
+ protected boolean lessThan(BulkScorer a, BulkScorer b) {
+ return a.cost() > b.cost();
+ }
+ };
for (BulkScorer scorer : scorers) {
pq.insertWithOverflow(scorer);
}
@@ -91,7 +90,6 @@ final class BooleanScorer extends BulkScorer {
protected boolean lessThan(BulkScorerAndDoc a, BulkScorerAndDoc b) {
return a.next < b.next;
}
-
}
static final class TailPriorityQueue extends PriorityQueue<BulkScorerAndDoc> {
@@ -109,7 +107,6 @@ final class BooleanScorer extends BulkScorer {
Objects.checkIndex(i, size());
return (BulkScorerAndDoc) getHeapArray()[1 + i];
}
-
}
final Bucket[] buckets = new Bucket[SIZE];
@@ -144,12 +141,18 @@ final class BooleanScorer extends BulkScorer {
final OrCollector orCollector = new OrCollector();
- BooleanScorer(BooleanWeight weight, Collection<BulkScorer> scorers, int minShouldMatch, boolean needsScores) {
+ BooleanScorer(
+ BooleanWeight weight,
+ Collection<BulkScorer> scorers,
+ int minShouldMatch,
+ boolean needsScores) {
if (minShouldMatch < 1 || minShouldMatch > scorers.size()) {
- throw new IllegalArgumentException("minShouldMatch should be within 1..num_scorers. Got " + minShouldMatch);
+ throw new IllegalArgumentException(
+ "minShouldMatch should be within 1..num_scorers. Got " + minShouldMatch);
}
if (scorers.size() <= 1) {
- throw new IllegalArgumentException("This scorer can only be used with two scorers or more, got " + scorers.size());
+ throw new IllegalArgumentException(
+ "This scorer can only be used with two scorers or more, got " + scorers.size());
}
for (int i = 0; i < buckets.length; i++) {
buckets[i] = new Bucket();
@@ -203,8 +206,15 @@ final class BooleanScorer extends BulkScorer {
}
}
- private void scoreWindowIntoBitSetAndReplay(LeafCollector collector, Bits acceptDocs,
- int base, int min, int max, BulkScorerAndDoc[] scorers, int numScorers) throws IOException {
+ private void scoreWindowIntoBitSetAndReplay(
+ LeafCollector collector,
+ Bits acceptDocs,
+ int base,
+ int min,
+ int max,
+ BulkScorerAndDoc[] scorers,
+ int numScorers)
+ throws IOException {
for (int i = 0; i < numScorers; ++i) {
final BulkScorerAndDoc scorer = scorers[i];
assert scorer.next < max;
@@ -236,7 +246,14 @@ final class BooleanScorer extends BulkScorer {
return headTop;
}
- private void scoreWindowMultipleScorers(LeafCollector collector, Bits acceptDocs, int windowBase, int windowMin, int windowMax, int maxFreq) throws IOException {
+ private void scoreWindowMultipleScorers(
+ LeafCollector collector,
+ Bits acceptDocs,
+ int windowBase,
+ int windowMin,
+ int windowMax,
+ int maxFreq)
+ throws IOException {
while (maxFreq < minShouldMatch && maxFreq + tail.size() >= minShouldMatch) {
// a match is still possible
final BulkScorerAndDoc candidate = tail.pop();
@@ -255,7 +272,8 @@ final class BooleanScorer extends BulkScorer {
}
tail.clear();
- scoreWindowIntoBitSetAndReplay(collector, acceptDocs, windowBase, windowMin, windowMax, leads, maxFreq);
+ scoreWindowIntoBitSetAndReplay(
+ collector, acceptDocs, windowBase, windowMin, windowMax, leads, maxFreq);
}
// Push back scorers into head and tail
@@ -267,8 +285,14 @@ final class BooleanScorer extends BulkScorer {
}
}
- private void scoreWindowSingleScorer(BulkScorerAndDoc bulkScorer, LeafCollector collector,
- Bits acceptDocs, int windowMin, int windowMax, int max) throws IOException {
+ private void scoreWindowSingleScorer(
+ BulkScorerAndDoc bulkScorer,
+ LeafCollector collector,
+ Bits acceptDocs,
+ int windowMin,
+ int windowMax,
+ int max)
+ throws IOException {
assert tail.size() == 0;
final int nextWindowBase = head.top().next & ~MASK;
final int end = Math.max(windowMax, Math.min(max, nextWindowBase));
@@ -279,8 +303,9 @@ final class BooleanScorer extends BulkScorer {
collector.setScorer(scoreAndDoc);
}
- private BulkScorerAndDoc scoreWindow(BulkScorerAndDoc top, LeafCollector collector,
- Bits acceptDocs, int min, int max) throws IOException {
+ private BulkScorerAndDoc scoreWindow(
+ BulkScorerAndDoc top, LeafCollector collector, Bits acceptDocs, int min, int max)
+ throws IOException {
final int windowBase = top.next & ~MASK; // find the window that the next match belongs to
final int windowMin = Math.max(min, windowBase);
final int windowMax = Math.min(max, windowBase + SIZE);
@@ -317,5 +342,4 @@ final class BooleanScorer extends BulkScorer {
return top.next;
}
-
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/BooleanWeight.java b/lucene/core/src/java/org/apache/lucene/search/BooleanWeight.java
index 59bdd32..dffdbf7 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BooleanWeight.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BooleanWeight.java
@@ -16,26 +16,22 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.EnumMap;
import java.util.List;
import java.util.Map;
-
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.util.Bits;
-/**
- * Expert: the Weight for BooleanQuery, used to
- * normalize, score and explain these queries.
- */
+/** Expert: the Weight for BooleanQuery, used to normalize, score and explain these queries. */
final class BooleanWeight extends Weight {
/** The Similarity implementation. */
final Similarity similarity;
+
final BooleanQuery query;
private static class WeightedBooleanClause {
@@ -51,14 +47,17 @@ final class BooleanWeight extends Weight {
final ArrayList<WeightedBooleanClause> weightedClauses;
final ScoreMode scoreMode;
- BooleanWeight(BooleanQuery query, IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
+ BooleanWeight(BooleanQuery query, IndexSearcher searcher, ScoreMode scoreMode, float boost)
+ throws IOException {
super(query);
this.query = query;
this.scoreMode = scoreMode;
this.similarity = searcher.getSimilarity();
weightedClauses = new ArrayList<>();
for (BooleanClause c : query) {
- Weight w = searcher.createWeight(c.getQuery(), c.isScoring() ? scoreMode : ScoreMode.COMPLETE_NO_SCORES, boost);
+ Weight w =
+ searcher.createWeight(
+ c.getQuery(), c.isScoring() ? scoreMode : ScoreMode.COMPLETE_NO_SCORES, boost);
weightedClauses.add(new WeightedBooleanClause(c, w));
}
}
@@ -78,10 +77,16 @@ final class BooleanWeight extends Weight {
if (c.isScoring()) {
subs.add(e);
} else if (c.isRequired()) {
- subs.add(Explanation.match(0f, "match on required clause, product of:",
- Explanation.match(0f, Occur.FILTER + " clause"), e));
+ subs.add(
+ Explanation.match(
+ 0f,
+ "match on required clause, product of:",
+ Explanation.match(0f, Occur.FILTER + " clause"),
+ e));
} else if (c.isProhibited()) {
- subs.add(Explanation.noMatch("match on prohibited clause (" + c.getQuery().toString() + ")", e));
+ subs.add(
+ Explanation.noMatch(
+ "match on prohibited clause (" + c.getQuery().toString() + ")", e));
fail = true;
}
if (!c.isProhibited()) {
@@ -91,16 +96,20 @@ final class BooleanWeight extends Weight {
shouldMatchCount++;
}
} else if (c.isRequired()) {
- subs.add(Explanation.noMatch("no match on required clause (" + c.getQuery().toString() + ")", e));
+ subs.add(
+ Explanation.noMatch(
+ "no match on required clause (" + c.getQuery().toString() + ")", e));
fail = true;
}
}
if (fail) {
- return Explanation.noMatch("Failure to meet condition(s) of required/prohibited clause(s)", subs);
+ return Explanation.noMatch(
+ "Failure to meet condition(s) of required/prohibited clause(s)", subs);
} else if (matchCount == 0) {
return Explanation.noMatch("No matching clauses", subs);
} else if (shouldMatchCount < minShouldMatch) {
- return Explanation.noMatch("Failure to match minimum number of optional clauses: " + minShouldMatch, subs);
+ return Explanation.noMatch(
+ "Failure to match minimum number of optional clauses: " + minShouldMatch, subs);
} else {
// Replicating the same floating-point errors as the scorer does is quite
// complex (essentially because of how ReqOptSumScorer casts intermediate
@@ -151,21 +160,23 @@ final class BooleanWeight extends Weight {
return new BulkScorer() {
@Override
- public int score(final LeafCollector collector, Bits acceptDocs, int min, int max) throws IOException {
- final LeafCollector noScoreCollector = new LeafCollector() {
- ScoreAndDoc fake = new ScoreAndDoc();
-
- @Override
- public void setScorer(Scorable scorer) throws IOException {
- collector.setScorer(fake);
- }
-
- @Override
- public void collect(int doc) throws IOException {
- fake.doc = doc;
- collector.collect(doc);
- }
- };
+ public int score(final LeafCollector collector, Bits acceptDocs, int min, int max)
+ throws IOException {
+ final LeafCollector noScoreCollector =
+ new LeafCollector() {
+ ScoreAndDoc fake = new ScoreAndDoc();
+
+ @Override
+ public void setScorer(Scorable scorer) throws IOException {
+ collector.setScorer(fake);
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ fake.doc = doc;
+ collector.collect(doc);
+ }
+ };
return scorer.score(noScoreCollector, acceptDocs, min, max);
}
@@ -206,7 +217,8 @@ final class BooleanWeight extends Weight {
return optional.get(0);
}
- return new BooleanScorer(this, optional, Math.max(1, query.getMinimumNumberShouldMatch()), scoreMode.needsScores());
+ return new BooleanScorer(
+ this, optional, Math.max(1, query.getMinimumNumberShouldMatch()), scoreMode.needsScores());
}
// Return a BulkScorer for the required clauses only,
@@ -236,12 +248,15 @@ final class BooleanWeight extends Weight {
return scorer;
}
- /** Try to build a boolean scorer for this weight. Returns null if {@link BooleanScorer}
- * cannot be used. */
+ /**
+ * Try to build a boolean scorer for this weight. Returns null if {@link BooleanScorer} cannot be
+ * used.
+ */
BulkScorer booleanScorer(LeafReaderContext context) throws IOException {
final int numOptionalClauses = query.getClauses(Occur.SHOULD).size();
- final int numRequiredClauses = query.getClauses(Occur.MUST).size() + query.getClauses(Occur.FILTER).size();
-
+ final int numRequiredClauses =
+ query.getClauses(Occur.MUST).size() + query.getClauses(Occur.FILTER).size();
+
BulkScorer positiveScorer;
if (numRequiredClauses == 0) {
positiveScorer = optionalBulkScorer(context);
@@ -300,9 +315,10 @@ final class BooleanWeight extends Weight {
if (prohibited.isEmpty()) {
return positiveScorer;
} else {
- Scorer prohibitedScorer = prohibited.size() == 1
- ? prohibited.get(0)
- : new DisjunctionSumScorer(this, prohibited, ScoreMode.COMPLETE_NO_SCORES);
+ Scorer prohibitedScorer =
+ prohibited.size() == 1
+ ? prohibited.get(0)
+ : new DisjunctionSumScorer(this, prohibited, ScoreMode.COMPLETE_NO_SCORES);
if (prohibitedScorer.twoPhaseIterator() != null) {
// ReqExclBulkScorer can't deal efficiently with two-phased prohibited clauses
return null;
@@ -347,8 +363,7 @@ final class BooleanWeight extends Weight {
}
for (WeightedBooleanClause wc : weightedClauses) {
Weight w = wc.weight;
- if (w.isCacheable(ctx) == false)
- return false;
+ if (w.isCacheable(ctx) == false) return false;
}
return true;
}
@@ -376,15 +391,17 @@ final class BooleanWeight extends Weight {
}
// scorer simplifications:
-
+
if (scorers.get(Occur.SHOULD).size() == minShouldMatch) {
// any optional clauses are in fact required
scorers.get(Occur.MUST).addAll(scorers.get(Occur.SHOULD));
scorers.get(Occur.SHOULD).clear();
minShouldMatch = 0;
}
-
- if (scorers.get(Occur.FILTER).isEmpty() && scorers.get(Occur.MUST).isEmpty() && scorers.get(Occur.SHOULD).isEmpty()) {
+
+ if (scorers.get(Occur.FILTER).isEmpty()
+ && scorers.get(Occur.MUST).isEmpty()
+ && scorers.get(Occur.SHOULD).isEmpty()) {
// no required and optional clauses.
return null;
} else if (scorers.get(Occur.SHOULD).size() < minShouldMatch) {
@@ -396,5 +413,4 @@ final class BooleanWeight extends Weight {
return new Boolean2ScorerSupplier(this, scorers, scoreMode, minShouldMatch);
}
-
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/BoostAttribute.java b/lucene/core/src/java/org/apache/lucene/search/BoostAttribute.java
index 9030b57..0a1570e 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BoostAttribute.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BoostAttribute.java
@@ -16,19 +16,21 @@
*/
package org.apache.lucene.search;
-
+import org.apache.lucene.index.Terms; // javadocs only
+import org.apache.lucene.index.TermsEnum; // javadocs only
import org.apache.lucene.util.Attribute;
import org.apache.lucene.util.AttributeSource; // javadocs only
-import org.apache.lucene.index.TermsEnum; // javadocs only
-import org.apache.lucene.index.Terms; // javadocs only
-/** Add this {@link Attribute} to a {@link TermsEnum} returned by {@link MultiTermQuery#getTermsEnum(Terms,AttributeSource)}
- * and update the boost on each returned term. This enables to control the boost factor
- * for each matching term in {@link MultiTermQuery#SCORING_BOOLEAN_REWRITE} or
- * {@link TopTermsRewrite} mode.
- * {@link FuzzyQuery} is using this to take the edit distance into account.
- * <p><b>Please note:</b> This attribute is intended to be added only by the TermsEnum
- * to itself in its constructor and consumed by the {@link MultiTermQuery.RewriteMethod}.
+/**
+ * Add this {@link Attribute} to a {@link TermsEnum} returned by {@link
+ * MultiTermQuery#getTermsEnum(Terms,AttributeSource)} and update the boost on each returned term.
+ * This enables to control the boost factor for each matching term in {@link
+ * MultiTermQuery#SCORING_BOOLEAN_REWRITE} or {@link TopTermsRewrite} mode. {@link FuzzyQuery} is
+ * using this to take the edit distance into account.
+ *
+ * <p><b>Please note:</b> This attribute is intended to be added only by the TermsEnum to itself in
+ * its constructor and consumed by the {@link MultiTermQuery.RewriteMethod}.
+ *
* @lucene.internal
*/
public interface BoostAttribute extends Attribute {
diff --git a/lucene/core/src/java/org/apache/lucene/search/BoostAttributeImpl.java b/lucene/core/src/java/org/apache/lucene/search/BoostAttributeImpl.java
index 9eced23..f7f218e3 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BoostAttributeImpl.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BoostAttributeImpl.java
@@ -16,11 +16,12 @@
*/
package org.apache.lucene.search;
-
import org.apache.lucene.util.AttributeImpl;
import org.apache.lucene.util.AttributeReflector;
-/** Implementation class for {@link BoostAttribute}.
+/**
+ * Implementation class for {@link BoostAttribute}.
+ *
* @lucene.internal
*/
public final class BoostAttributeImpl extends AttributeImpl implements BoostAttribute {
@@ -30,7 +31,7 @@ public final class BoostAttributeImpl extends AttributeImpl implements BoostAttr
public void setBoost(float boost) {
this.boost = boost;
}
-
+
@Override
public float getBoost() {
return boost;
@@ -40,7 +41,7 @@ public final class BoostAttributeImpl extends AttributeImpl implements BoostAttr
public void clear() {
boost = 1.0f;
}
-
+
@Override
public void copyTo(AttributeImpl target) {
((BoostAttribute) target).setBoost(boost);
diff --git a/lucene/core/src/java/org/apache/lucene/search/BoostQuery.java b/lucene/core/src/java/org/apache/lucene/search/BoostQuery.java
index 5f04363..172ea00 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BoostQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BoostQuery.java
@@ -16,28 +16,26 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
import java.util.Objects;
-
import org.apache.lucene.index.IndexReader;
/**
- * A {@link Query} wrapper that allows to give a boost to the wrapped query.
- * Boost values that are less than one will give less importance to this
- * query compared to other ones while values that are greater than one will
- * give more importance to the scores returned by this query.
+ * A {@link Query} wrapper that allows to give a boost to the wrapped query. Boost values that are
+ * less than one will give less importance to this query compared to other ones while values that
+ * are greater than one will give more importance to the scores returned by this query.
*
- * More complex boosts can be applied by using FunctionScoreQuery in the
- * lucene-queries module
+ * <p>More complex boosts can be applied by using FunctionScoreQuery in the lucene-queries module
*/
public final class BoostQuery extends Query {
private final Query query;
private final float boost;
- /** Sole constructor: wrap {@code query} in such a way that the produced
- * scores will be boosted by {@code boost}. */
+ /**
+ * Sole constructor: wrap {@code query} in such a way that the produced scores will be boosted by
+ * {@code boost}.
+ */
public BoostQuery(Query query, float boost) {
this.query = Objects.requireNonNull(query);
if (Float.isFinite(boost) == false || Float.compare(boost, 0f) < 0) {
@@ -46,29 +44,24 @@ public final class BoostQuery extends Query {
this.boost = boost;
}
- /**
- * Return the wrapped {@link Query}.
- */
+ /** Return the wrapped {@link Query}. */
public Query getQuery() {
return query;
}
- /**
- * Return the applied boost.
- */
+ /** Return the applied boost. */
public float getBoost() {
return boost;
}
@Override
public boolean equals(Object other) {
- return sameClassAs(other) &&
- equalsTo(getClass().cast(other));
+ return sameClassAs(other) && equalsTo(getClass().cast(other));
}
-
+
private boolean equalsTo(BoostQuery other) {
- return query.equals(other.query) &&
- Float.floatToIntBits(boost) == Float.floatToIntBits(other.boost);
+ return query.equals(other.query)
+ && Float.floatToIntBits(boost) == Float.floatToIntBits(other.boost);
}
@Override
@@ -121,8 +114,8 @@ public final class BoostQuery extends Query {
}
@Override
- public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
+ public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost)
+ throws IOException {
return query.createWeight(searcher, scoreMode, BoostQuery.this.boost * boost);
}
-
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/BulkScorer.java b/lucene/core/src/java/org/apache/lucene/search/BulkScorer.java
index 661c129..fb7805a 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BulkScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BulkScorer.java
@@ -16,24 +16,23 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
-
import org.apache.lucene.util.Bits;
-/** This class is used to score a range of documents at
- * once, and is returned by {@link Weight#bulkScorer}. Only
- * queries that have a more optimized means of scoring
- * across a range of documents need to override this.
- * Otherwise, a default implementation is wrapped around
- * the {@link Scorer} returned by {@link Weight#scorer}. */
-
+/**
+ * This class is used to score a range of documents at once, and is returned by {@link
+ * Weight#bulkScorer}. Only queries that have a more optimized means of scoring across a range of
+ * documents need to override this. Otherwise, a default implementation is wrapped around the {@link
+ * Scorer} returned by {@link Weight#scorer}.
+ */
public abstract class BulkScorer {
- /** Scores and collects all matching documents.
+ /**
+ * Scores and collects all matching documents.
+ *
* @param collector The collector to which all matching documents are passed.
- * @param acceptDocs {@link Bits} that represents the allowed documents to match, or
- * {@code null} if they are all allowed to match.
+ * @param acceptDocs {@link Bits} that represents the allowed documents to match, or {@code null}
+ * if they are all allowed to match.
*/
public void score(LeafCollector collector, Bits acceptDocs) throws IOException {
final int next = score(collector, acceptDocs, 0, DocIdSetIterator.NO_MORE_DOCS);
@@ -41,20 +40,25 @@ public abstract class BulkScorer {
}
/**
- * Collects matching documents in a range and return an estimation of the
- * next matching document which is on or after {@code max}.
- * <p>The return value must be:</p><ul>
- * <li>>= {@code max},</li>
- * <li>{@link DocIdSetIterator#NO_MORE_DOCS} if there are no more matches,</li>
- * <li><= the first matching document that is >= {@code max} otherwise.</li>
+ * Collects matching documents in a range and return an estimation of the next matching document
+ * which is on or after {@code max}.
+ *
+ * <p>The return value must be:
+ *
+ * <ul>
+ * <li>>= {@code max},
+ * <li>{@link DocIdSetIterator#NO_MORE_DOCS} if there are no more matches,
+ * <li><= the first matching document that is >= {@code max} otherwise.
* </ul>
- * <p>{@code min} is the minimum document to be considered for matching. All
- * documents strictly before this value must be ignored.</p>
- * <p>Although {@code max} would be a legal return value for this method, higher
- * values might help callers skip more efficiently over non-matching portions
- * of the docID space.</p>
- * <p>For instance, a {@link Scorer}-based implementation could look like
- * below:</p>
+ *
+ * <p>{@code min} is the minimum document to be considered for matching. All documents strictly
+ * before this value must be ignored.
+ *
+ * <p>Although {@code max} would be a legal return value for this method, higher values might help
+ * callers skip more efficiently over non-matching portions of the docID space.
+ *
+ * <p>For instance, a {@link Scorer}-based implementation could look like below:
+ *
* <pre class="prettyprint">
* private final Scorer scorer; // set via constructor
*
@@ -74,17 +78,16 @@ public abstract class BulkScorer {
* }
* </pre>
*
- * @param collector The collector to which all matching documents are passed.
- * @param acceptDocs {@link Bits} that represents the allowed documents to match, or
- * {@code null} if they are all allowed to match.
- * @param min Score starting at, including, this document
- * @param max Score up to, but not including, this doc
+ * @param collector The collector to which all matching documents are passed.
+ * @param acceptDocs {@link Bits} that represents the allowed documents to match, or {@code null}
+ * if they are all allowed to match.
+ * @param min Score starting at, including, this document
+ * @param max Score up to, but not including, this doc
* @return an under-estimation of the next matching doc after max
*/
- public abstract int score(LeafCollector collector, Bits acceptDocs, int min, int max) throws IOException;
+ public abstract int score(LeafCollector collector, Bits acceptDocs, int min, int max)
+ throws IOException;
- /**
- * Same as {@link DocIdSetIterator#cost()} for bulk scorers.
- */
+ /** Same as {@link DocIdSetIterator#cost()} for bulk scorers. */
public abstract long cost();
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java b/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java
index bc1c185..898cf73 100644
--- a/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java
@@ -19,27 +19,21 @@ package org.apache.lucene.search;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
-
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.util.ArrayUtil;
/**
- * Caches all docs, and optionally also scores, coming from
- * a search, and is then able to replay them to another
- * collector. You specify the max RAM this class may use.
- * Once the collection is done, call {@link #isCached}. If
- * this returns true, you can use {@link #replay(Collector)}
- * against a new collector. If it returns false, this means
- * too much RAM was required and you must instead re-run the
- * original search.
+ * Caches all docs, and optionally also scores, coming from a search, and is then able to replay
+ * them to another collector. You specify the max RAM this class may use. Once the collection is
+ * done, call {@link #isCached}. If this returns true, you can use {@link #replay(Collector)}
+ * against a new collector. If it returns false, this means too much RAM was required and you must
+ * instead re-run the original search.
*
- * <p><b>NOTE</b>: this class consumes 4 (or 8 bytes, if
- * scoring is cached) per collected document. If the result
- * set is large this can easily be a very substantial amount
- * of RAM!
+ * <p><b>NOTE</b>: this class consumes 4 (or 8 bytes, if scoring is cached) per collected document.
+ * If the result set is large this can easily be a very substantial amount of RAM!
*
- * <p>See the Lucene <code>modules/grouping</code> module for more
- * details including a full code example.</p>
+ * <p>See the Lucene <code>modules/grouping</code> module for more details including a full code
+ * example.
*
* @lucene.experimental
*/
@@ -57,13 +51,14 @@ public abstract class CachingCollector extends FilterCollector {
float score;
@Override
- public final float score() { return score; }
+ public final float score() {
+ return score;
+ }
@Override
public int docID() {
return doc;
}
-
}
private static class NoScoreCachingCollector extends CachingCollector {
@@ -84,7 +79,8 @@ public abstract class CachingCollector extends FilterCollector {
return new NoScoreCachingLeafCollector(in, maxDocsToCache);
}
- // note: do *not* override needScore to say false. Just because we aren't caching the score doesn't mean the
+ // note: do *not* override needScore to say false. Just because we aren't caching the score
+ // doesn't mean the
// wrapped collector doesn't need it to do its job.
public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException {
@@ -133,7 +129,8 @@ public abstract class CachingCollector extends FilterCollector {
public void replay(Collector other) throws IOException {
postCollection();
if (!isCached()) {
- throw new IllegalStateException("cannot replay: cache was cleared because too much RAM was required");
+ throw new IllegalStateException(
+ "cannot replay: cache was cleared because too much RAM was required");
}
assert docs.size() == contexts.size();
for (int i = 0; i < contexts.size(); ++i) {
@@ -142,7 +139,6 @@ public abstract class CachingCollector extends FilterCollector {
collect(collector, i);
}
}
-
}
private static class ScoreCachingCollector extends NoScoreCachingCollector {
@@ -165,7 +161,10 @@ public abstract class CachingCollector extends FilterCollector {
scores.add(coll.cachedScores());
}
- /** Ensure the scores are collected so they can be replayed, even if the wrapped collector doesn't need them. */
+ /**
+ * Ensure the scores are collected so they can be replayed, even if the wrapped collector
+ * doesn't need them.
+ */
@Override
public ScoreMode scoreMode() {
return ScoreMode.COMPLETE;
@@ -220,7 +219,8 @@ public abstract class CachingCollector extends FilterCollector {
if (docCount >= maxDocsToCache) {
invalidate();
} else {
- final int newLen = Math.min(ArrayUtil.oversize(docCount + 1, Integer.BYTES), maxDocsToCache);
+ final int newLen =
+ Math.min(ArrayUtil.oversize(docCount + 1, Integer.BYTES), maxDocsToCache);
grow(newLen);
}
}
@@ -239,7 +239,6 @@ public abstract class CachingCollector extends FilterCollector {
int[] cachedDocs() {
return docs == null ? null : ArrayUtil.copyOfSubArray(docs, 0, docCount);
}
-
}
private class ScoreCachingLeafCollector extends NoScoreCachingLeafCollector {
@@ -282,38 +281,33 @@ public abstract class CachingCollector extends FilterCollector {
}
/**
- * Creates a {@link CachingCollector} which does not wrap another collector.
- * The cached documents and scores can later be {@link #replay(Collector)
- * replayed}.
+ * Creates a {@link CachingCollector} which does not wrap another collector. The cached documents
+ * and scores can later be {@link #replay(Collector) replayed}.
*/
public static CachingCollector create(boolean cacheScores, double maxRAMMB) {
- Collector other = new SimpleCollector() {
+ Collector other =
+ new SimpleCollector() {
- @Override
- public void collect(int doc) {}
+ @Override
+ public void collect(int doc) {}
- @Override
- public ScoreMode scoreMode() {
- return ScoreMode.COMPLETE;
- }
-
- };
+ @Override
+ public ScoreMode scoreMode() {
+ return ScoreMode.COMPLETE;
+ }
+ };
return create(other, cacheScores, maxRAMMB);
}
/**
- * Create a new {@link CachingCollector} that wraps the given collector and
- * caches documents and scores up to the specified RAM threshold.
+ * Create a new {@link CachingCollector} that wraps the given collector and caches documents and
+ * scores up to the specified RAM threshold.
*
- * @param other
- * the Collector to wrap and delegate calls to.
- * @param cacheScores
- * whether to cache scores in addition to document IDs. Note that
- * this increases the RAM consumed per doc
- * @param maxRAMMB
- * the maximum RAM in MB to consume for caching the documents and
- * scores. If the collector exceeds the threshold, no documents and
- * scores are cached.
+ * @param other the Collector to wrap and delegate calls to.
+ * @param cacheScores whether to cache scores in addition to document IDs. Note that this
+ * increases the RAM consumed per doc
+ * @param maxRAMMB the maximum RAM in MB to consume for caching the documents and scores. If the
+ * collector exceeds the threshold, no documents and scores are cached.
*/
public static CachingCollector create(Collector other, boolean cacheScores, double maxRAMMB) {
int bytesPerDoc = Integer.BYTES;
@@ -325,21 +319,19 @@ public abstract class CachingCollector extends FilterCollector {
}
/**
- * Create a new {@link CachingCollector} that wraps the given collector and
- * caches documents and scores up to the specified max docs threshold.
+ * Create a new {@link CachingCollector} that wraps the given collector and caches documents and
+ * scores up to the specified max docs threshold.
*
- * @param other
- * the Collector to wrap and delegate calls to.
- * @param cacheScores
- * whether to cache scores in addition to document IDs. Note that
- * this increases the RAM consumed per doc
- * @param maxDocsToCache
- * the maximum number of documents for caching the documents and
- * possible the scores. If the collector exceeds the threshold,
- * no documents and scores are cached.
+ * @param other the Collector to wrap and delegate calls to.
+ * @param cacheScores whether to cache scores in addition to document IDs. Note that this
+ * increases the RAM consumed per doc
+ * @param maxDocsToCache the maximum number of documents for caching the documents and possible
+ * the scores. If the collector exceeds the threshold, no documents and scores are cached.
*/
public static CachingCollector create(Collector other, boolean cacheScores, int maxDocsToCache) {
- return cacheScores ? new ScoreCachingCollector(other, maxDocsToCache) : new NoScoreCachingCollector(other, maxDocsToCache);
+ return cacheScores
+ ? new ScoreCachingCollector(other, maxDocsToCache)
+ : new NoScoreCachingCollector(other, maxDocsToCache);
}
private boolean cached;
@@ -349,25 +341,19 @@ public abstract class CachingCollector extends FilterCollector {
cached = true;
}
- /**
- * Return true is this collector is able to replay collection.
- */
+ /** Return true is this collector is able to replay collection. */
public final boolean isCached() {
return cached;
}
/**
- * Replays the cached doc IDs (and scores) to the given Collector. If this
- * instance does not cache scores, then Scorer is not set on
- * {@code other.setScorer} as well as scores are not replayed.
+ * Replays the cached doc IDs (and scores) to the given Collector. If this instance does not cache
+ * scores, then Scorer is not set on {@code other.setScorer} as well as scores are not replayed.
*
- * @throws IllegalStateException
- * if this collector is not cached (i.e., if the RAM limits were too
- * low for the number of documents + scores to cache).
- * @throws IllegalArgumentException
- * if the given Collect's does not support out-of-order collection,
- * while the collector passed to the ctor does.
+ * @throws IllegalStateException if this collector is not cached (i.e., if the RAM limits were too
+ * low for the number of documents + scores to cache).
+ * @throws IllegalArgumentException if the given Collect's does not support out-of-order
+ * collection, while the collector passed to the ctor does.
*/
public abstract void replay(Collector other) throws IOException;
-
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/CollectionStatistics.java b/lucene/core/src/java/org/apache/lucene/search/CollectionStatistics.java
index 2dac059..d424abc 100644
--- a/lucene/core/src/java/org/apache/lucene/search/CollectionStatistics.java
+++ b/lucene/core/src/java/org/apache/lucene/search/CollectionStatistics.java
@@ -17,33 +17,34 @@
package org.apache.lucene.search;
import java.util.Objects;
-
import org.apache.lucene.index.IndexReader; // javadocs
-import org.apache.lucene.index.Terms; // javadocs
-
+import org.apache.lucene.index.Terms; // javadocs
/**
* Contains statistics for a collection (field).
- * <p>
- * This class holds statistics across all documents for scoring purposes:
+ *
+ * <p>This class holds statistics across all documents for scoring purposes:
+ *
* <ul>
- * <li> {@link #maxDoc()}: number of documents.
- * <li> {@link #docCount()}: number of documents that contain this field.
- * <li> {@link #sumDocFreq()}: number of postings-list entries.
- * <li> {@link #sumTotalTermFreq()}: number of tokens.
+ * <li>{@link #maxDoc()}: number of documents.
+ * <li>{@link #docCount()}: number of documents that contain this field.
+ * <li>{@link #sumDocFreq()}: number of postings-list entries.
+ * <li>{@link #sumTotalTermFreq()}: number of tokens.
* </ul>
- * <p>
- * The following conditions are always true:
+ *
+ * <p>The following conditions are always true:
+ *
* <ul>
- * <li> All statistics are positive integers: never zero or negative.
- * <li> {@code docCount} <= {@code maxDoc}
- * <li> {@code docCount} <= {@code sumDocFreq} <= {@code sumTotalTermFreq}
+ * <li>All statistics are positive integers: never zero or negative.
+ * <li>{@code docCount} <= {@code maxDoc}
+ * <li>{@code docCount} <= {@code sumDocFreq} <= {@code sumTotalTermFreq}
* </ul>
- * <p>
- * Values may include statistics on deleted documents that have not yet been merged away.
- * <p>
- * Be careful when performing calculations on these values because they are represented
- * as 64-bit integer values, you may need to cast to {@code double} for your use.
+ *
+ * <p>Values may include statistics on deleted documents that have not yet been merged away.
+ *
+ * <p>Be careful when performing calculations on these values because they are represented as 64-bit
+ * integer values, you may need to cast to {@code double} for your use.
+ *
* @lucene.experimental
*/
public class CollectionStatistics {
@@ -52,9 +53,10 @@ public class CollectionStatistics {
private final long docCount;
private final long sumTotalTermFreq;
private final long sumDocFreq;
-
+
/**
* Creates statistics instance for a collection (field).
+ *
* @param field Field's name
* @param maxDoc total number of documents.
* @param docCount number of documents containing the field.
@@ -66,7 +68,8 @@ public class CollectionStatistics {
* @throws IllegalArgumentException if {@code sumDocFreq} is less than {@code docCount}.
* @throws IllegalArgumentException if {@code sumTotalTermFreq} is less than {@code sumDocFreq}.
*/
- public CollectionStatistics(String field, long maxDoc, long docCount, long sumTotalTermFreq, long sumDocFreq) {
+ public CollectionStatistics(
+ String field, long maxDoc, long docCount, long sumTotalTermFreq, long sumDocFreq) {
Objects.requireNonNull(field);
if (maxDoc <= 0) {
throw new IllegalArgumentException("maxDoc must be positive, maxDoc: " + maxDoc);
@@ -75,19 +78,29 @@ public class CollectionStatistics {
throw new IllegalArgumentException("docCount must be positive, docCount: " + docCount);
}
if (docCount > maxDoc) {
- throw new IllegalArgumentException("docCount must not exceed maxDoc, docCount: " + docCount + ", maxDoc: " + maxDoc);
+ throw new IllegalArgumentException(
+ "docCount must not exceed maxDoc, docCount: " + docCount + ", maxDoc: " + maxDoc);
}
if (sumDocFreq <= 0) {
throw new IllegalArgumentException("sumDocFreq must be positive, sumDocFreq: " + sumDocFreq);
}
if (sumDocFreq < docCount) {
- throw new IllegalArgumentException("sumDocFreq must be at least docCount, sumDocFreq: " + sumDocFreq + ", docCount: " + docCount);
+ throw new IllegalArgumentException(
+ "sumDocFreq must be at least docCount, sumDocFreq: "
+ + sumDocFreq
+ + ", docCount: "
+ + docCount);
}
if (sumTotalTermFreq <= 0) {
- throw new IllegalArgumentException("sumTotalTermFreq must be positive, sumTotalTermFreq: " + sumTotalTermFreq);
+ throw new IllegalArgumentException(
+ "sumTotalTermFreq must be positive, sumTotalTermFreq: " + sumTotalTermFreq);
}
if (sumTotalTermFreq < sumDocFreq) {
- throw new IllegalArgumentException("sumTotalTermFreq must be at least sumDocFreq, sumTotalTermFreq: " + sumTotalTermFreq + ", sumDocFreq: " + sumDocFreq);
+ throw new IllegalArgumentException(
+ "sumTotalTermFreq must be at least sumDocFreq, sumTotalTermFreq: "
+ + sumTotalTermFreq
+ + ", sumDocFreq: "
+ + sumDocFreq);
}
this.field = field;
this.maxDoc = maxDoc;
@@ -95,64 +108,67 @@ public class CollectionStatistics {
this.sumTotalTermFreq = sumTotalTermFreq;
this.sumDocFreq = sumDocFreq;
}
-
+
/**
* The field's name.
- * <p>
- * This value is never {@code null}.
+ *
+ * <p>This value is never {@code null}.
+ *
* @return field's name, not {@code null}
*/
public final String field() {
return field;
}
-
+
/**
- * The total number of documents, regardless of
- * whether they all contain values for this field.
- * <p>
- * This value is always a positive number.
+ * The total number of documents, regardless of whether they all contain values for this field.
+ *
+ * <p>This value is always a positive number.
+ *
* @return total number of documents, in the range [1 .. {@link Long#MAX_VALUE}]
* @see IndexReader#maxDoc()
*/
public final long maxDoc() {
return maxDoc;
}
-
+
/**
- * The total number of documents that have at least
- * one term for this field.
- * <p>
- * This value is always a positive number, and never
- * exceeds {@link #maxDoc()}.
+ * The total number of documents that have at least one term for this field.
+ *
+ * <p>This value is always a positive number, and never exceeds {@link #maxDoc()}.
+ *
* @return total number of documents containing this field, in the range [1 .. {@link #maxDoc()}]
* @see Terms#getDocCount()
*/
public final long docCount() {
return docCount;
}
-
+
/**
- * The total number of tokens for this field.
- * This is the "word count" for this field across all documents.
- * It is the sum of {@link TermStatistics#totalTermFreq()} across all terms.
- * It is also the sum of each document's field length across all documents.
- * <p>
- * This value is always a positive number, and always at least {@link #sumDocFreq()}.
- * @return total number of tokens in the field, in the range [{@link #sumDocFreq()} .. {@link Long#MAX_VALUE}]
+ * The total number of tokens for this field. This is the "word count" for this field across all
+ * documents. It is the sum of {@link TermStatistics#totalTermFreq()} across all terms. It is also
+ * the sum of each document's field length across all documents.
+ *
+ * <p>This value is always a positive number, and always at least {@link #sumDocFreq()}.
+ *
+ * @return total number of tokens in the field, in the range [{@link #sumDocFreq()} .. {@link
+ * Long#MAX_VALUE}]
* @see Terms#getSumTotalTermFreq()
*/
public final long sumTotalTermFreq() {
return sumTotalTermFreq;
}
-
+
/**
- * The total number of posting list entries for this field.
- * This is the sum of term-document pairs: the sum of {@link TermStatistics#docFreq()} across all terms.
- * It is also the sum of each document's unique term count for this field across all documents.
- * <p>
- * This value is always a positive number, always at least {@link #docCount()}, and never
+ * The total number of posting list entries for this field. This is the sum of term-document
+ * pairs: the sum of {@link TermStatistics#docFreq()} across all terms. It is also the sum of each
+ * document's unique term count for this field across all documents.
+ *
+ * <p>This value is always a positive number, always at least {@link #docCount()}, and never
* exceeds {@link #sumTotalTermFreq()}.
- * @return number of posting list entries, in the range [{@link #docCount()} .. {@link #sumTotalTermFreq()}]
+ *
+ * @return number of posting list entries, in the range [{@link #docCount()} .. {@link
+ * #sumTotalTermFreq()}]
* @see Terms#getSumDocFreq()
*/
public final long sumDocFreq() {
diff --git a/lucene/core/src/java/org/apache/lucene/search/CollectionTerminatedException.java b/lucene/core/src/java/org/apache/lucene/search/CollectionTerminatedException.java
index e0a874e..2a7e044 100644
--- a/lucene/core/src/java/org/apache/lucene/search/CollectionTerminatedException.java
+++ b/lucene/core/src/java/org/apache/lucene/search/CollectionTerminatedException.java
@@ -16,13 +16,14 @@
*/
package org.apache.lucene.search;
-
-/** Throw this exception in {@link LeafCollector#collect(int)} to prematurely
- * terminate collection of the current leaf.
- * <p>Note: IndexSearcher swallows this exception and never re-throws it.
- * As a consequence, you should not catch it when calling
- * {@link IndexSearcher#search} as it is unnecessary and might hide misuse
- * of this exception. */
+/**
+ * Throw this exception in {@link LeafCollector#collect(int)} to prematurely terminate collection of
+ * the current leaf.
+ *
+ * <p>Note: IndexSearcher swallows this exception and never re-throws it. As a consequence, you
+ * should not catch it when calling {@link IndexSearcher#search} as it is unnecessary and might hide
+ * misuse of this exception.
+ */
@SuppressWarnings("serial")
public final class CollectionTerminatedException extends RuntimeException {
@@ -30,5 +31,4 @@ public final class CollectionTerminatedException extends RuntimeException {
public CollectionTerminatedException() {
super();
}
-
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/Collector.java b/lucene/core/src/java/org/apache/lucene/search/Collector.java
index 9818c67..3af2210 100644
--- a/lucene/core/src/java/org/apache/lucene/search/Collector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/Collector.java
@@ -16,49 +16,31 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
-
import org.apache.lucene.index.LeafReaderContext;
/**
- * <p>Expert: Collectors are primarily meant to be used to
- * gather raw results from a search, and implement sorting
- * or custom result filtering, collation, etc. </p>
+ * Expert: Collectors are primarily meant to be used to gather raw results from a search, and
+ * implement sorting or custom result filtering, collation, etc.
*
- * <p>Lucene's core collectors are derived from {@link Collector}
- * and {@link SimpleCollector}. Likely your application can
- * use one of these classes, or subclass {@link TopDocsCollector},
+ * <p>Lucene's core collectors are derived from {@link Collector} and {@link SimpleCollector}.
+ * Likely your application can use one of these classes, or subclass {@link TopDocsCollector},
* instead of implementing Collector directly:
*
* <ul>
- *
- * <li>{@link TopDocsCollector} is an abstract base class
- * that assumes you will retrieve the top N docs,
- * according to some criteria, after collection is
- * done. </li>
- *
- * <li>{@link TopScoreDocCollector} is a concrete subclass
- * {@link TopDocsCollector} and sorts according to score +
- * docID. This is used internally by the {@link
- * IndexSearcher} search methods that do not take an
- * explicit {@link Sort}. It is likely the most frequently
- * used collector.</li>
- *
- * <li>{@link TopFieldCollector} subclasses {@link
- * TopDocsCollector} and sorts according to a specified
- * {@link Sort} object (sort by field). This is used
- * internally by the {@link IndexSearcher} search methods
- * that take an explicit {@link Sort}.
- *
- * <li>{@link TimeLimitingCollector}, which wraps any other
- * Collector and aborts the search if it's taken too much
- * time.</li>
- *
- * <li>{@link PositiveScoresOnlyCollector} wraps any other
- * Collector and prevents collection of hits whose score
- * is <= 0.0</li>
- *
+ * <li>{@link TopDocsCollector} is an abstract base class that assumes you will retrieve the top N
+ * docs, according to some criteria, after collection is done.
+ * <li>{@link TopScoreDocCollector} is a concrete subclass {@link TopDocsCollector} and sorts
+ * according to score + docID. This is used internally by the {@link IndexSearcher} search
+ * methods that do not take an explicit {@link Sort}. It is likely the most frequently used
+ * collector.
+ * <li>{@link TopFieldCollector} subclasses {@link TopDocsCollector} and sorts according to a
+ * specified {@link Sort} object (sort by field). This is used internally by the {@link
+ * IndexSearcher} search methods that take an explicit {@link Sort}.
+ * <li>{@link TimeLimitingCollector}, which wraps any other Collector and aborts the search if
+ * it's taken too much time.
+ * <li>{@link PositiveScoresOnlyCollector} wraps any other Collector and prevents collection of
+ * hits whose score is <= 0.0
* </ul>
*
* @lucene.experimental
@@ -68,13 +50,10 @@ public interface Collector {
/**
* Create a new {@link LeafCollector collector} to collect the given context.
*
- * @param context
- * next atomic reader context
+ * @param context next atomic reader context
*/
LeafCollector getLeafCollector(LeafReaderContext context) throws IOException;
-
- /**
- * Indicates what features are required from the scorer.
- */
+
+ /** Indicates what features are required from the scorer. */
ScoreMode scoreMode();
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/CollectorManager.java b/lucene/core/src/java/org/apache/lucene/search/CollectorManager.java
index 6a2e9a6..d9969e0 100644
--- a/lucene/core/src/java/org/apache/lucene/search/CollectorManager.java
+++ b/lucene/core/src/java/org/apache/lucene/search/CollectorManager.java
@@ -16,41 +16,34 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
import java.util.Collection;
/**
- * A manager of collectors. This class is useful to parallelize execution of
- * search requests and has two main methods:
+ * A manager of collectors. This class is useful to parallelize execution of search requests and has
+ * two main methods:
+ *
* <ul>
- * <li>{@link #newCollector()} which must return a NEW collector which
- * will be used to collect a certain set of leaves.</li>
- * <li>{@link #reduce(Collection)} which will be used to reduce the
- * results of individual collections into a meaningful result.
- * This method is only called after all leaves have been fully
- * collected.</li>
+ * <li>{@link #newCollector()} which must return a NEW collector which will be used to collect a
+ * certain set of leaves.
+ * <li>{@link #reduce(Collection)} which will be used to reduce the results of individual
+ * collections into a meaningful result. This method is only called after all leaves have been
+ * fully collected.
* </ul>
*
* @see IndexSearcher#search(Query, CollectorManager)
* @lucene.experimental
*/
public interface CollectorManager<C extends Collector, T> {
-
- /**
- * Return a new {@link Collector}. This must return a different instance on
- * each call.
- */
+
+ /** Return a new {@link Collector}. This must return a different instance on each call. */
C newCollector() throws IOException;
/**
- * Reduce the results of individual collectors into a meaningful result.
- * For instance a {@link TopDocsCollector} would compute the
- * {@link TopDocsCollector#topDocs() top docs} of each collector and then
- * merge them using {@link TopDocs#merge(int, TopDocs[])}.
- * This method must be called after collection is finished on all provided
- * collectors.
+ * Reduce the results of individual collectors into a meaningful result. For instance a {@link
+ * TopDocsCollector} would compute the {@link TopDocsCollector#topDocs() top docs} of each
+ * collector and then merge them using {@link TopDocs#merge(int, TopDocs[])}. This method must be
+ * called after collection is finished on all provided collectors.
*/
T reduce(Collection<C> collectors) throws IOException;
-
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/ConjunctionDISI.java b/lucene/core/src/java/org/apache/lucene/search/ConjunctionDISI.java
index 30bdabb..9fb778d 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ConjunctionDISI.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ConjunctionDISI.java
@@ -16,32 +16,33 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
-
import org.apache.lucene.search.spans.Spans;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.BitSet;
import org.apache.lucene.util.BitSetIterator;
import org.apache.lucene.util.CollectionUtil;
-/** A conjunction of DocIdSetIterators.
- * Requires that all of its sub-iterators must be on the same document all the time.
- * This iterates over the doc ids that are present in each given DocIdSetIterator.
- * <br>Public only for use in {@link org.apache.lucene.search.spans}.
+/**
+ * A conjunction of DocIdSetIterators. Requires that all of its sub-iterators must be on the same
+ * document all the time. This iterates over the doc ids that are present in each given
+ * DocIdSetIterator. <br>
+ * Public only for use in {@link org.apache.lucene.search.spans}.
+ *
* @lucene.internal
*/
public final class ConjunctionDISI extends DocIdSetIterator {
- /** Create a conjunction over the provided {@link Scorer}s. Note that the
- * returned {@link DocIdSetIterator} might leverage two-phase iteration in
- * which case it is possible to retrieve the {@link TwoPhaseIterator} using
- * {@link TwoPhaseIterator#unwrap}. */
+ /**
+ * Create a conjunction over the provided {@link Scorer}s. Note that the returned {@link
+ * DocIdSetIterator} might leverage two-phase iteration in which case it is possible to retrieve
+ * the {@link TwoPhaseIterator} using {@link TwoPhaseIterator#unwrap}.
+ */
public static DocIdSetIterator intersectScorers(Collection<Scorer> scorers) {
if (scorers.size() < 2) {
throw new IllegalArgumentException("Cannot make a ConjunctionDISI of less than 2 iterators");
@@ -55,10 +56,11 @@ public final class ConjunctionDISI extends DocIdSetIterator {
return createConjunction(allIterators, twoPhaseIterators);
}
- /** Create a conjunction over the provided DocIdSetIterators. Note that the
- * returned {@link DocIdSetIterator} might leverage two-phase iteration in
- * which case it is possible to retrieve the {@link TwoPhaseIterator} using
- * {@link TwoPhaseIterator#unwrap}. */
+ /**
+ * Create a conjunction over the provided DocIdSetIterators. Note that the returned {@link
+ * DocIdSetIterator} might leverage two-phase iteration in which case it is possible to retrieve
+ * the {@link TwoPhaseIterator} using {@link TwoPhaseIterator#unwrap}.
+ */
public static DocIdSetIterator intersectIterators(List<DocIdSetIterator> iterators) {
if (iterators.size() < 2) {
throw new IllegalArgumentException("Cannot make a ConjunctionDISI of less than 2 iterators");
@@ -72,10 +74,11 @@ public final class ConjunctionDISI extends DocIdSetIterator {
return createConjunction(allIterators, twoPhaseIterators);
}
- /** Create a conjunction over the provided {@link Spans}. Note that the
- * returned {@link DocIdSetIterator} might leverage two-phase iteration in
- * which case it is possible to retrieve the {@link TwoPhaseIterator} using
- * {@link TwoPhaseIterator#unwrap}. */
+ /**
+ * Create a conjunction over the provided {@link Spans}. Note that the returned {@link
+ * DocIdSetIterator} might leverage two-phase iteration in which case it is possible to retrieve
+ * the {@link TwoPhaseIterator} using {@link TwoPhaseIterator#unwrap}.
+ */
public static DocIdSetIterator intersectSpans(List<Spans> spanList) {
if (spanList.size() < 2) {
throw new IllegalArgumentException("Cannot make a ConjunctionDISI of less than 2 iterators");
@@ -89,8 +92,14 @@ public final class ConjunctionDISI extends DocIdSetIterator {
return createConjunction(allIterators, twoPhaseIterators);
}
- /** Adds the scorer, possibly splitting up into two phases or collapsing if it is another conjunction */
- private static void addScorer(Scorer scorer, List<DocIdSetIterator> allIterators, List<TwoPhaseIterator> twoPhaseIterators) {
+ /**
+ * Adds the scorer, possibly splitting up into two phases or collapsing if it is another
+ * conjunction
+ */
+ private static void addScorer(
+ Scorer scorer,
+ List<DocIdSetIterator> allIterators,
+ List<TwoPhaseIterator> twoPhaseIterators) {
TwoPhaseIterator twoPhaseIter = scorer.twoPhaseIterator();
if (twoPhaseIter != null) {
addTwoPhaseIterator(twoPhaseIter, allIterators, twoPhaseIterators);
@@ -100,7 +109,8 @@ public final class ConjunctionDISI extends DocIdSetIterator {
}
/** Adds the Spans. */
- private static void addSpans(Spans spans, List<DocIdSetIterator> allIterators, List<TwoPhaseIterator> twoPhaseIterators) {
+ private static void addSpans(
+ Spans spans, List<DocIdSetIterator> allIterators, List<TwoPhaseIterator> twoPhaseIterators) {
TwoPhaseIterator twoPhaseIter = spans.asTwoPhaseIterator();
if (twoPhaseIter != null) {
addTwoPhaseIterator(twoPhaseIter, allIterators, twoPhaseIterators);
@@ -109,13 +119,18 @@ public final class ConjunctionDISI extends DocIdSetIterator {
}
}
- private static void addIterator(DocIdSetIterator disi, List<DocIdSetIterator> allIterators, List<TwoPhaseIterator> twoPhaseIterators) {
+ private static void addIterator(
+ DocIdSetIterator disi,
+ List<DocIdSetIterator> allIterators,
+ List<TwoPhaseIterator> twoPhaseIterators) {
TwoPhaseIterator twoPhase = TwoPhaseIterator.unwrap(disi);
if (twoPhase != null) {
addTwoPhaseIterator(twoPhase, allIterators, twoPhaseIterators);
- } else if (disi.getClass() == ConjunctionDISI.class) { // Check for exactly this class for collapsing
+ } else if (disi.getClass()
+ == ConjunctionDISI.class) { // Check for exactly this class for collapsing
ConjunctionDISI conjunction = (ConjunctionDISI) disi;
- // subconjuctions have already split themselves into two phase iterators and others, so we can take those
+ // subconjuctions have already split themselves into two phase iterators and others, so we can
+ // take those
// iterators as they are and move them up to this conjunction
allIterators.add(conjunction.lead1);
allIterators.add(conjunction.lead2);
@@ -129,25 +144,35 @@ public final class ConjunctionDISI extends DocIdSetIterator {
}
}
- private static void addTwoPhaseIterator(TwoPhaseIterator twoPhaseIter, List<DocIdSetIterator> allIterators, List<TwoPhaseIterator> twoPhaseIterators) {
+ private static void addTwoPhaseIterator(
+ TwoPhaseIterator twoPhaseIter,
+ List<DocIdSetIterator> allIterators,
+ List<TwoPhaseIterator> twoPhaseIterators) {
addIterator(twoPhaseIter.approximation(), allIterators, twoPhaseIterators);
- if (twoPhaseIter.getClass() == ConjunctionTwoPhaseIterator.class) { // Check for exactly this class for collapsing
- Collections.addAll(twoPhaseIterators, ((ConjunctionTwoPhaseIterator) twoPhaseIter).twoPhaseIterators);
+ if (twoPhaseIter.getClass()
+ == ConjunctionTwoPhaseIterator.class) { // Check for exactly this class for collapsing
+ Collections.addAll(
+ twoPhaseIterators, ((ConjunctionTwoPhaseIterator) twoPhaseIter).twoPhaseIterators);
} else {
twoPhaseIterators.add(twoPhaseIter);
}
}
private static DocIdSetIterator createConjunction(
- List<DocIdSetIterator> allIterators,
- List<TwoPhaseIterator> twoPhaseIterators) {
+ List<DocIdSetIterator> allIterators, List<TwoPhaseIterator> twoPhaseIterators) {
// check that all sub-iterators are on the same doc ID
- int curDoc = allIterators.size() > 0 ? allIterators.get(0).docID() : twoPhaseIterators.get(0).approximation.docID();
+ int curDoc =
+ allIterators.size() > 0
+ ? allIterators.get(0).docID()
+ : twoPhaseIterators.get(0).approximation.docID();
boolean iteratorsOnTheSameDoc = allIterators.stream().allMatch(it -> it.docID() == curDoc);
- iteratorsOnTheSameDoc = iteratorsOnTheSameDoc && twoPhaseIterators.stream().allMatch(it -> it.approximation().docID() == curDoc);
+ iteratorsOnTheSameDoc =
+ iteratorsOnTheSameDoc
+ && twoPhaseIterators.stream().allMatch(it -> it.approximation().docID() == curDoc);
if (iteratorsOnTheSameDoc == false) {
- throw new IllegalArgumentException("Sub-iterators of ConjunctionDISI are not on the same document!");
+ throw new IllegalArgumentException(
+ "Sub-iterators of ConjunctionDISI are not on the same document!");
}
long minCost = allIterators.stream().mapToLong(DocIdSetIterator::cost).min().getAsLong();
@@ -176,7 +201,9 @@ public final class ConjunctionDISI extends DocIdSetIterator {
}
if (twoPhaseIterators.isEmpty() == false) {
- disi = TwoPhaseIterator.asDocIdSetIterator(new ConjunctionTwoPhaseIterator(disi, twoPhaseIterators));
+ disi =
+ TwoPhaseIterator.asDocIdSetIterator(
+ new ConjunctionTwoPhaseIterator(disi, twoPhaseIterators));
}
return disi;
@@ -190,19 +217,22 @@ public final class ConjunctionDISI extends DocIdSetIterator {
// Sort the array the first time to allow the least frequent DocsEnum to
// lead the matching.
- CollectionUtil.timSort(iterators, new Comparator<DocIdSetIterator>() {
- @Override
- public int compare(DocIdSetIterator o1, DocIdSetIterator o2) {
- return Long.compare(o1.cost(), o2.cost());
- }
- });
+ CollectionUtil.timSort(
+ iterators,
+ new Comparator<DocIdSetIterator>() {
+ @Override
+ public int compare(DocIdSetIterator o1, DocIdSetIterator o2) {
+ return Long.compare(o1.cost(), o2.cost());
+ }
+ });
lead1 = iterators.get(0);
lead2 = iterators.get(1);
others = iterators.subList(2, iterators.size()).toArray(new DocIdSetIterator[0]);
}
private int doNext(int doc) throws IOException {
- advanceHead: for(;;) {
+ advanceHead:
+ for (; ; ) {
assert doc == lead1.docID();
// find agreement between the two iterators with the lower costs
@@ -238,7 +268,8 @@ public final class ConjunctionDISI extends DocIdSetIterator {
@Override
public int advance(int target) throws IOException {
- assert assertItersOnSameDoc() : "Sub-iterators of ConjunctionDISI are not one the same document!";
+ assert assertItersOnSameDoc()
+ : "Sub-iterators of ConjunctionDISI are not one the same document!";
return doNext(lead1.advance(target));
}
@@ -249,7 +280,8 @@ public final class ConjunctionDISI extends DocIdSetIterator {
@Override
public int nextDoc() throws IOException {
- assert assertItersOnSameDoc() : "Sub-iterators of ConjunctionDISI are not on the same document!";
+ assert assertItersOnSameDoc()
+ : "Sub-iterators of ConjunctionDISI are not on the same document!";
return doNext(lead1.nextDoc());
}
@@ -300,18 +332,21 @@ public final class ConjunctionDISI extends DocIdSetIterator {
@Override
public int nextDoc() throws IOException {
- assert assertItersOnSameDoc() : "Sub-iterators of ConjunctionDISI are not on the same document!";
+ assert assertItersOnSameDoc()
+ : "Sub-iterators of ConjunctionDISI are not on the same document!";
return doNext(lead.nextDoc());
}
@Override
public int advance(int target) throws IOException {
- assert assertItersOnSameDoc() : "Sub-iterators of ConjunctionDISI are not on the same document!";
+ assert assertItersOnSameDoc()
+ : "Sub-iterators of ConjunctionDISI are not on the same document!";
return doNext(lead.advance(target));
}
private int doNext(int doc) throws IOException {
- advanceLead: for (;; doc = lead.nextDoc()) {
+ advanceLead:
+ for (; ; doc = lead.nextDoc()) {
if (doc >= minLength) {
return NO_MORE_DOCS;
}
@@ -341,33 +376,34 @@ public final class ConjunctionDISI extends DocIdSetIterator {
}
return iteratorsOnTheSameDoc;
}
-
}
- /**
- * {@link TwoPhaseIterator} implementing a conjunction.
- */
+ /** {@link TwoPhaseIterator} implementing a conjunction. */
private static final class ConjunctionTwoPhaseIterator extends TwoPhaseIterator {
private final TwoPhaseIterator[] twoPhaseIterators;
private final float matchCost;
- private ConjunctionTwoPhaseIterator(DocIdSetIterator approximation,
- List<? extends TwoPhaseIterator> twoPhaseIterators) {
+ private ConjunctionTwoPhaseIterator(
+ DocIdSetIterator approximation, List<? extends TwoPhaseIterator> twoPhaseIterators) {
super(approximation);
assert twoPhaseIterators.size() > 0;
- CollectionUtil.timSort(twoPhaseIterators, new Comparator<TwoPhaseIterator>() {
- @Override
- public int compare(TwoPhaseIterator o1, TwoPhaseIterator o2) {
- return Float.compare(o1.matchCost(), o2.matchCost());
- }
- });
+ CollectionUtil.timSort(
+ twoPhaseIterators,
+ new Comparator<TwoPhaseIterator>() {
+ @Override
+ public int compare(TwoPhaseIterator o1, TwoPhaseIterator o2) {
+ return Float.compare(o1.matchCost(), o2.matchCost());
+ }
+ });
- this.twoPhaseIterators = twoPhaseIterators.toArray(new TwoPhaseIterator[twoPhaseIterators.size()]);
+ this.twoPhaseIterators =
+ twoPhaseIterators.toArray(new TwoPhaseIterator[twoPhaseIterators.size()]);
// Compute the matchCost as the total matchCost of the sub iterators.
- // TODO: This could be too high because the matching is done cheapest first: give the lower matchCosts a higher weight.
+ // TODO: This could be too high because the matching is done cheapest first: give the lower
+ // matchCosts a higher weight.
float totalMatchCost = 0;
for (TwoPhaseIterator tpi : twoPhaseIterators) {
totalMatchCost += tpi.matchCost();
@@ -389,7 +425,5 @@ public final class ConjunctionDISI extends DocIdSetIterator {
public float matchCost() {
return matchCost;
}
-
}
-
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java b/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java
index 7ba4aa3..b0a1a9a 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java
@@ -16,7 +16,6 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
@@ -28,8 +27,12 @@ class ConjunctionScorer extends Scorer {
final Scorer[] scorers;
final Collection<Scorer> required;
- /** Create a new {@link ConjunctionScorer}, note that {@code scorers} must be a subset of {@code required}. */
- ConjunctionScorer(Weight weight, Collection<Scorer> required, Collection<Scorer> scorers) throws IOException {
+ /**
+ * Create a new {@link ConjunctionScorer}, note that {@code scorers} must be a subset of {@code
+ * required}.
+ */
+ ConjunctionScorer(Weight weight, Collection<Scorer> required, Collection<Scorer> scorers)
+ throws IOException {
super(weight);
assert required.containsAll(scorers);
this.disi = ConjunctionDISI.intersectScorers(required);
diff --git a/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java b/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java
index 26d4fe2..015c821 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java
@@ -16,24 +16,20 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
import java.util.Objects;
-
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.util.Bits;
/**
- * A query that wraps another query and simply returns a constant score equal to
- * 1 for every document that matches the query.
- * It therefore simply strips of all scores and always returns 1.
+ * A query that wraps another query and simply returns a constant score equal to 1 for every
+ * document that matches the query. It therefore simply strips of all scores and always returns 1.
*/
public final class ConstantScoreQuery extends Query {
private final Query query;
- /** Strips off scores from the passed in Query. The hits will get a constant score
- * of 1. */
+ /** Strips off scores from the passed in Query. The hits will get a constant score of 1. */
public ConstantScoreQuery(Query query) {
this.query = Objects.requireNonNull(query, "Query must not be null");
}
@@ -67,10 +63,10 @@ public final class ConstantScoreQuery extends Query {
query.visit(visitor.getSubVisitor(BooleanClause.Occur.FILTER, this));
}
- /** We return this as our {@link BulkScorer} so that if the CSQ
- * wraps a query with its own optimized top-level
- * scorer (e.g. BooleanScorer) we can use that
- * top-level scorer. */
+ /**
+ * We return this as our {@link BulkScorer} so that if the CSQ wraps a query with its own
+ * optimized top-level scorer (e.g. BooleanScorer) we can use that top-level scorer.
+ */
protected static class ConstantBulkScorer extends BulkScorer {
final BulkScorer bulkScorer;
final Weight weight;
@@ -83,7 +79,8 @@ public final class ConstantScoreQuery extends Query {
}
@Override
- public int score(LeafCollector collector, Bits acceptDocs, int min, int max) throws IOException {
+ public int score(LeafCollector collector, Bits acceptDocs, int min, int max)
+ throws IOException {
return bulkScorer.score(wrapCollector(collector), acceptDocs, min, max);
}
@@ -92,12 +89,13 @@ public final class ConstantScoreQuery extends Query {
@Override
public void setScorer(Scorable scorer) throws IOException {
// we must wrap again here, but using the scorer passed in as parameter:
- in.setScorer(new FilterScorable(scorer) {
- @Override
- public float score() {
- return theScore;
- }
- });
+ in.setScorer(
+ new FilterScorable(scorer) {
+ @Override
+ public float score() {
+ return theScore;
+ }
+ });
}
};
}
@@ -109,7 +107,8 @@ public final class ConstantScoreQuery extends Query {
}
@Override
- public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
+ public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost)
+ throws IOException {
final Weight innerWeight = searcher.createWeight(query, ScoreMode.COMPLETE_NO_SCORES, 1f);
if (scoreMode.needsScores()) {
return new ConstantScoreWeight(this, boost) {
@@ -137,7 +136,8 @@ public final class ConstantScoreQuery extends Query {
final Scorer innerScorer = innerScorerSupplier.get(leadCost);
final TwoPhaseIterator twoPhaseIterator = innerScorer.twoPhaseIterator();
if (twoPhaseIterator == null) {
- return new ConstantScoreScorer(innerWeight, score(), scoreMode, innerScorer.iterator());
+ return new ConstantScoreScorer(
+ innerWeight, score(), scoreMode, innerScorer.iterator());
} else {
return new ConstantScoreScorer(innerWeight, score(), scoreMode, twoPhaseIterator);
}
@@ -168,7 +168,6 @@ public final class ConstantScoreQuery extends Query {
public boolean isCacheable(LeafReaderContext ctx) {
return innerWeight.isCacheable(ctx);
}
-
};
} else {
return innerWeight;
@@ -177,16 +176,12 @@ public final class ConstantScoreQuery extends Query {
@Override
public String toString(String field) {
- return new StringBuilder("ConstantScore(")
- .append(query.toString(field))
- .append(')')
- .toString();
+ return new StringBuilder("ConstantScore(").append(query.toString(field)).append(')').toString();
}
@Override
public boolean equals(Object other) {
- return sameClassAs(other) &&
- query.equals(((ConstantScoreQuery) other).query);
+ return sameClassAs(other) && query.equals(((ConstantScoreQuery) other).query);
}
@Override
diff --git a/lucene/core/src/java/org/apache/lucene/search/ConstantScoreScorer.java b/lucene/core/src/java/org/apache/lucene/search/ConstantScoreScorer.java
index 17c8362..759300e 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ConstantScoreScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ConstantScoreScorer.java
@@ -16,11 +16,11 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
/**
* A constant-scoring {@link Scorer}.
+ *
* @lucene.internal
*/
public final class ConstantScoreScorer extends Scorer {
@@ -60,44 +60,54 @@ public final class ConstantScoreScorer extends Scorer {
private final TwoPhaseIterator twoPhaseIterator;
private final DocIdSetIterator disi;
- /** Constructor based on a {@link DocIdSetIterator} which will be used to
- * drive iteration. Two phase iteration will not be supported.
- * @param weight the parent weight
- * @param score the score to return on each document
- * @param scoreMode the score mode
- * @param disi the iterator that defines matching documents */
- public ConstantScoreScorer(Weight weight, float score, ScoreMode scoreMode, DocIdSetIterator disi) {
+ /**
+ * Constructor based on a {@link DocIdSetIterator} which will be used to drive iteration. Two
+ * phase iteration will not be supported.
+ *
+ * @param weight the parent weight
+ * @param score the score to return on each document
+ * @param scoreMode the score mode
+ * @param disi the iterator that defines matching documents
+ */
+ public ConstantScoreScorer(
+ Weight weight, float score, ScoreMode scoreMode, DocIdSetIterator disi) {
super(weight);
this.score = score;
this.scoreMode = scoreMode;
- this.approximation = scoreMode == ScoreMode.TOP_SCORES ? new DocIdSetIteratorWrapper(disi) : disi;
+ this.approximation =
+ scoreMode == ScoreMode.TOP_SCORES ? new DocIdSetIteratorWrapper(disi) : disi;
this.twoPhaseIterator = null;
this.disi = this.approximation;
}
- /** Constructor based on a {@link TwoPhaseIterator}. In that case the
- * {@link Scorer} will support two-phase iteration.
- * @param weight the parent weight
- * @param score the score to return on each document
- * @param scoreMode the score mode
- * @param twoPhaseIterator the iterator that defines matching documents */
- public ConstantScoreScorer(Weight weight, float score, ScoreMode scoreMode, TwoPhaseIterator twoPhaseIterator) {
+ /**
+ * Constructor based on a {@link TwoPhaseIterator}. In that case the {@link Scorer} will support
+ * two-phase iteration.
+ *
+ * @param weight the parent weight
+ * @param score the score to return on each document
+ * @param scoreMode the score mode
+ * @param twoPhaseIterator the iterator that defines matching documents
+ */
+ public ConstantScoreScorer(
+ Weight weight, float score, ScoreMode scoreMode, TwoPhaseIterator twoPhaseIterator) {
super(weight);
this.score = score;
this.scoreMode = scoreMode;
if (scoreMode == ScoreMode.TOP_SCORES) {
this.approximation = new DocIdSetIteratorWrapper(twoPhaseIterator.approximation());
- this.twoPhaseIterator = new TwoPhaseIterator(this.approximation) {
- @Override
- public boolean matches() throws IOException {
- return twoPhaseIterator.matches();
- }
-
- @Override
- public float matchCost() {
- return twoPhaseIterator.matchCost();
- }
- };
+ this.twoPhaseIterator =
+ new TwoPhaseIterator(this.approximation) {
+ @Override
+ public boolean matches() throws IOException {
+ return twoPhaseIterator.matches();
+ }
+
+ @Override
+ public float matchCost() {
+ return twoPhaseIterator.matchCost();
+ }
+ };
} else {
this.approximation = twoPhaseIterator.approximation();
this.twoPhaseIterator = twoPhaseIterator;
@@ -136,5 +146,4 @@ public final class ConstantScoreScorer extends Scorer {
public float score() throws IOException {
return score;
}
-
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/ConstantScoreWeight.java b/lucene/core/src/java/org/apache/lucene/search/ConstantScoreWeight.java
index 7ee4cce..4aa8d31 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ConstantScoreWeight.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ConstantScoreWeight.java
@@ -16,15 +16,13 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
-
import org.apache.lucene.index.LeafReaderContext;
/**
- * A Weight that has a constant score equal to the boost of the wrapped query.
- * This is typically useful when building queries which do not produce
- * meaningful scores and are mostly useful for filtering.
+ * A Weight that has a constant score equal to the boost of the wrapped query. This is typically
+ * useful when building queries which do not produce meaningful scores and are mostly useful for
+ * filtering.
*
* @lucene.internal
*/
@@ -58,11 +56,9 @@ public abstract class ConstantScoreWeight extends Weight {
}
if (exists) {
- return Explanation.match(
- score, getQuery().toString() + (score == 1f ? "" : "^" + score));
+ return Explanation.match(score, getQuery().toString() + (score == 1f ? "" : "^" + score));
} else {
return Explanation.noMatch(getQuery().toString() + " doesn't match id " + doc);
}
}
-
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/ControlledRealTimeReopenThread.java b/lucene/core/src/java/org/apache/lucene/search/ControlledRealTimeReopenThread.java
index ccd954d..611317a 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ControlledRealTimeReopenThread.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ControlledRealTimeReopenThread.java
@@ -16,25 +16,22 @@
*/
package org.apache.lucene.search;
-
import java.io.Closeable;
import java.io.IOException;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
-
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.util.ThreadInterruptedException;
-/** Utility class that runs a thread to manage periodicc
- * reopens of a {@link ReferenceManager}, with methods to wait for a specific
- * index changes to become visible. When a given search request needs to see a specific
- * index change, call the {#waitForGeneration} to wait for
- * that change to be visible. Note that this will only
- * scale well if most searches do not need to wait for a
- * specific index generation.
+/**
+ * Utility class that runs a thread to manage periodicc reopens of a {@link ReferenceManager}, with
+ * methods to wait for a specific index changes to become visible. When a given search request needs
+ * to see a specific index change, call the {#waitForGeneration} to wait for that change to be
+ * visible. Note that this will only scale well if most searches do not need to wait for a specific
+ * index generation.
*
- * @lucene.experimental */
-
+ * @lucene.experimental
+ */
public class ControlledRealTimeReopenThread<T> extends Thread implements Closeable {
private final ReferenceManager<T> manager;
private final long targetMaxStaleNS;
@@ -47,31 +44,34 @@ public class ControlledRealTimeReopenThread<T> extends Thread implements Closeab
private final ReentrantLock reopenLock = new ReentrantLock();
private final Condition reopenCond = reopenLock.newCondition();
-
+
/**
- * Create ControlledRealTimeReopenThread, to periodically
- * reopen the {@link ReferenceManager}.
+ * Create ControlledRealTimeReopenThread, to periodically reopen the {@link ReferenceManager}.
*
- * @param targetMaxStaleSec Maximum time until a new
- * reader must be opened; this sets the upper bound
- * on how slowly reopens may occur, when no
- * caller is waiting for a specific generation to
- * become visible.
- *
- * @param targetMinStaleSec Mininum time until a new
- * reader can be opened; this sets the lower bound
- * on how quickly reopens may occur, when a caller
- * is waiting for a specific generation to
- * become visible.
+ * @param targetMaxStaleSec Maximum time until a new reader must be opened; this sets the upper
+ * bound on how slowly reopens may occur, when no caller is waiting for a specific generation
+ * to become visible.
+ * @param targetMinStaleSec Mininum time until a new reader can be opened; this sets the lower
+ * bound on how quickly reopens may occur, when a caller is waiting for a specific generation
+ * to become visible.
*/
- public ControlledRealTimeReopenThread(IndexWriter writer, ReferenceManager<T> manager, double targetMaxStaleSec, double targetMinStaleSec) {
+ public ControlledRealTimeReopenThread(
+ IndexWriter writer,
+ ReferenceManager<T> manager,
+ double targetMaxStaleSec,
+ double targetMinStaleSec) {
if (targetMaxStaleSec < targetMinStaleSec) {
- throw new IllegalArgumentException("targetMaxScaleSec (= " + targetMaxStaleSec + ") < targetMinStaleSec (=" + targetMinStaleSec + ")");
+ throw new IllegalArgumentException(
+ "targetMaxScaleSec (= "
+ + targetMaxStaleSec
+ + ") < targetMinStaleSec (="
+ + targetMinStaleSec
+ + ")");
}
this.writer = writer;
this.manager = manager;
- this.targetMaxStaleNS = (long) (1000000000*targetMaxStaleSec);
- this.targetMinStaleNS = (long) (1000000000*targetMinStaleSec);
+ this.targetMaxStaleNS = (long) (1000000000 * targetMaxStaleSec);
+ this.targetMinStaleNS = (long) (1000000000 * targetMinStaleSec);
manager.addListener(new HandleRefresh());
}
@@ -95,7 +95,7 @@ public class ControlledRealTimeReopenThread<T> extends Thread implements Closeab
@Override
public synchronized void close() {
- //System.out.println("NRT: set finish");
+ // System.out.println("NRT: set finish");
finish = true;
@@ -119,13 +119,11 @@ public class ControlledRealTimeReopenThread<T> extends Thread implements Closeab
}
/**
- * Waits for the target generation to become visible in
- * the searcher.
- * If the current searcher is older than the
- * target generation, this method will block
- * until the searcher is reopened, by another via
- * {@link ReferenceManager#maybeRefresh} or until the {@link ReferenceManager} is closed.
- *
+ * Waits for the target generation to become visible in the searcher. If the current searcher is
+ * older than the target generation, this method will block until the searcher is reopened, by
+ * another via {@link ReferenceManager#maybeRefresh} or until the {@link ReferenceManager} is
+ * closed.
+ *
* @param targetGen the generation to wait for
*/
public void waitForGeneration(long targetGen) throws InterruptedException {
@@ -133,25 +131,21 @@ public class ControlledRealTimeReopenThread<T> extends Thread implements Closeab
}
/**
- * Waits for the target generation to become visible in
- * the searcher, up to a maximum specified milli-seconds.
- * If the current searcher is older than the target
- * generation, this method will block until the
- * searcher has been reopened by another thread via
- * {@link ReferenceManager#maybeRefresh}, the given waiting time has elapsed, or until
- * the {@link ReferenceManager} is closed.
- * <p>
- * NOTE: if the waiting time elapses before the requested target generation is
- * available the current {@link SearcherManager} is returned instead.
- *
- * @param targetGen
- * the generation to wait for
- * @param maxMS
- * maximum milliseconds to wait, or -1 to wait indefinitely
- * @return true if the targetGeneration is now available,
- * or false if maxMS wait time was exceeded
+ * Waits for the target generation to become visible in the searcher, up to a maximum specified
+ * milli-seconds. If the current searcher is older than the target generation, this method will
+ * block until the searcher has been reopened by another thread via {@link
+ * ReferenceManager#maybeRefresh}, the given waiting time has elapsed, or until the {@link
+ * ReferenceManager} is closed.
+ *
+ * <p>NOTE: if the waiting time elapses before the requested target generation is available the
+ * current {@link SearcherManager} is returned instead.
+ *
+ * @param targetGen the generation to wait for
+ * @param maxMS maximum milliseconds to wait, or -1 to wait indefinitely
+ * @return true if the targetGeneration is now available, or false if maxMS wait time was exceeded
*/
- public synchronized boolean waitForGeneration(long targetGen, int maxMS) throws InterruptedException {
+ public synchronized boolean waitForGeneration(long targetGen, int maxMS)
+ throws InterruptedException {
if (targetGen > searchingGen) {
// Notify the reopen thread that the waitingGen has
// changed, so it may wake up and realize it should
@@ -168,13 +162,13 @@ public class ControlledRealTimeReopenThread<T> extends Thread implements Closeab
reopenLock.unlock();
}
- long startMS = System.nanoTime()/1000000;
+ long startMS = System.nanoTime() / 1000000;
while (targetGen > searchingGen) {
if (maxMS < 0) {
wait();
} else {
- long msLeft = (startMS + maxMS) - System.nanoTime()/1000000;
+ long msLeft = (startMS + maxMS) - System.nanoTime() / 1000000;
if (msLeft <= 0) {
return false;
} else {
@@ -193,7 +187,7 @@ public class ControlledRealTimeReopenThread<T> extends Thread implements Closeab
// case clock shift messes up nanoTime?
long lastReopenStartNS = System.nanoTime();
- //System.out.println("reopen: start");
+ // System.out.println("reopen: start");
while (!finish) {
// TODO: try to guestimate how long reopen might
@@ -208,7 +202,8 @@ public class ControlledRealTimeReopenThread<T> extends Thread implements Closeab
try {
// True if we have someone waiting for reopened searcher:
boolean hasWaiting = waitingGen > searchingGen;
- final long nextReopenStartNS = lastReopenStartNS + (hasWaiting ? targetMinStaleNS : targetMaxStaleNS);
+ final long nextReopenStartNS =
+ lastReopenStartNS + (hasWaiting ? targetMinStaleNS : targetMaxStaleNS);
final long sleepNS = nextReopenStartNS - System.nanoTime();
diff --git a/lucene/core/src/java/org/apache/lucene/search/DisiPriorityQueue.java b/lucene/core/src/java/org/apache/lucene/search/DisiPriorityQueue.java
index 0692a7b..5981b03 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DisiPriorityQueue.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DisiPriorityQueue.java
@@ -16,16 +16,15 @@
*/
package org.apache.lucene.search;
-
import java.util.Arrays;
import java.util.Iterator;
-
import org.apache.lucene.util.PriorityQueue;
/**
- * A priority queue of DocIdSetIterators that orders by current doc ID.
- * This specialization is needed over {@link PriorityQueue} because the
- * pluggable comparison function makes the rebalancing quite slow.
+ * A priority queue of DocIdSetIterators that orders by current doc ID. This specialization is
+ * needed over {@link PriorityQueue} because the pluggable comparison function makes the rebalancing
+ * quite slow.
+ *
* @lucene.internal
*/
public final class DisiPriorityQueue implements Iterable<DisiWrapper> {
@@ -79,8 +78,7 @@ public final class DisiPriorityQueue implements Iterable<DisiWrapper> {
return w1;
}
- private DisiWrapper topList(DisiWrapper list, DisiWrapper[] heap,
- int size, int i) {
+ private DisiWrapper topList(DisiWrapper list, DisiWrapper[] heap, int size, int i) {
final DisiWrapper w = heap[i];
if (w.doc == list.doc) {
list = prepend(w, list);
@@ -165,7 +163,4 @@ public final class DisiPriorityQueue implements Iterable<DisiWrapper> {
public Iterator<DisiWrapper> iterator() {
return Arrays.asList(heap).subList(0, size).iterator();
}
-
}
-
-
diff --git a/lucene/core/src/java/org/apache/lucene/search/DisiWrapper.java b/lucene/core/src/java/org/apache/lucene/search/DisiWrapper.java
index fac94180..6adb650 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DisiWrapper.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DisiWrapper.java
@@ -16,11 +16,11 @@
*/
package org.apache.lucene.search;
-
import org.apache.lucene.search.spans.Spans;
/**
* Wrapper used in {@link DisiPriorityQueue}.
+ *
* @lucene.internal
*/
public class DisiWrapper {
@@ -53,7 +53,7 @@ public class DisiWrapper {
this.cost = iterator.cost();
this.doc = -1;
this.twoPhaseView = scorer.twoPhaseIterator();
-
+
if (twoPhaseView != null) {
approximation = twoPhaseView.approximation();
matchCost = twoPhaseView.matchCost();
@@ -70,7 +70,7 @@ public class DisiWrapper {
this.cost = iterator.cost();
this.doc = -1;
this.twoPhaseView = spans.asTwoPhaseIterator();
-
+
if (twoPhaseView != null) {
approximation = twoPhaseView.approximation();
matchCost = twoPhaseView.matchCost();
@@ -82,4 +82,3 @@ public class DisiWrapper {
this.lastApproxMatchDoc = -2;
}
}
-
diff --git a/lucene/core/src/java/org/apache/lucene/search/DisjunctionDISIApproximation.java b/lucene/core/src/java/org/apache/lucene/search/DisjunctionDISIApproximation.java
index 395c152..805eadf 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DisjunctionDISIApproximation.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DisjunctionDISIApproximation.java
@@ -19,8 +19,9 @@ package org.apache.lucene.search;
import java.io.IOException;
/**
- * A {@link DocIdSetIterator} which is a disjunction of the approximations of
- * the provided iterators.
+ * A {@link DocIdSetIterator} which is a disjunction of the approximations of the provided
+ * iterators.
+ *
* @lucene.internal
*/
public class DisjunctionDISIApproximation extends DocIdSetIterator {
@@ -44,7 +45,7 @@ public class DisjunctionDISIApproximation extends DocIdSetIterator {
@Override
public int docID() {
- return subIterators.top().doc;
+ return subIterators.top().doc;
}
@Override
@@ -70,5 +71,3 @@ public class DisjunctionDISIApproximation extends DocIdSetIterator {
return top.doc;
}
}
-
-
diff --git a/lucene/core/src/java/org/apache/lucene/search/DisjunctionMatchesIterator.java b/lucene/core/src/java/org/apache/lucene/search/DisjunctionMatchesIterator.java
index 9adbaf4..925d130 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DisjunctionMatchesIterator.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DisjunctionMatchesIterator.java
@@ -21,7 +21,6 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
-
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Term;
@@ -34,22 +33,29 @@ import org.apache.lucene.util.PriorityQueue;
/**
* A {@link MatchesIterator} that combines matches from a set of sub-iterators
*
- * Matches are sorted by their start positions, and then by their end positions, so that
- * prefixes sort first. Matches may overlap, or be duplicated if they appear in more
- * than one of the sub-iterators.
+ * <p>Matches are sorted by their start positions, and then by their end positions, so that prefixes
+ * sort first. Matches may overlap, or be duplicated if they appear in more than one of the
+ * sub-iterators.
*/
final class DisjunctionMatchesIterator implements MatchesIterator {
/**
* Create a {@link DisjunctionMatchesIterator} over a list of terms
*
- * Only terms that have at least one match in the given document will be included
+ * <p>Only terms that have at least one match in the given document will be included
*/
- static MatchesIterator fromTerms(LeafReaderContext context, int doc, Query query, String field, List<Term> terms) throws IOException {
+ static MatchesIterator fromTerms(
+ LeafReaderContext context, int doc, Query query, String field, List<Term> terms)
+ throws IOException {
Objects.requireNonNull(field);
for (Term term : terms) {
if (Objects.equals(field, term.field()) == false) {
- throw new IllegalArgumentException("Tried to generate iterator from terms in multiple fields: expected [" + field + "] but got [" + term.field() + "]");
+ throw new IllegalArgumentException(
+ "Tried to generate iterator from terms in multiple fields: expected ["
+ + field
+ + "] but got ["
+ + term.field()
+ + "]");
}
}
return fromTermsEnum(context, doc, query, field, asBytesRefIterator(terms));
@@ -58,34 +64,36 @@ final class DisjunctionMatchesIterator implements MatchesIterator {
private static BytesRefIterator asBytesRefIterator(List<Term> terms) {
return new BytesRefIterator() {
int i = 0;
+
@Override
public BytesRef next() {
- if (i >= terms.size())
- return null;
+ if (i >= terms.size()) return null;
return terms.get(i++).bytes();
}
};
}
/**
- * Create a {@link DisjunctionMatchesIterator} over a list of terms extracted from a {@link BytesRefIterator}
+ * Create a {@link DisjunctionMatchesIterator} over a list of terms extracted from a {@link
+ * BytesRefIterator}
*
- * Only terms that have at least one match in the given document will be included
+ * <p>Only terms that have at least one match in the given document will be included
*/
- static MatchesIterator fromTermsEnum(LeafReaderContext context, int doc, Query query, String field, BytesRefIterator terms) throws IOException {
+ static MatchesIterator fromTermsEnum(
+ LeafReaderContext context, int doc, Query query, String field, BytesRefIterator terms)
+ throws IOException {
Objects.requireNonNull(field);
Terms t = context.reader().terms(field);
- if (t == null)
- return null;
+ if (t == null) return null;
TermsEnum te = t.iterator();
PostingsEnum reuse = null;
for (BytesRef term = terms.next(); term != null; term = terms.next()) {
if (te.seekExact(term)) {
PostingsEnum pe = te.postings(reuse, PostingsEnum.OFFSETS);
if (pe.advance(doc) == doc) {
- return new TermsEnumDisjunctionMatchesIterator(new TermMatchesIterator(query, pe), terms, te, doc, query);
- }
- else {
+ return new TermsEnumDisjunctionMatchesIterator(
+ new TermMatchesIterator(query, pe), terms, te, doc, query);
+ } else {
reuse = pe;
}
}
@@ -105,7 +113,8 @@ final class DisjunctionMatchesIterator implements MatchesIterator {
private MatchesIterator it = null;
- TermsEnumDisjunctionMatchesIterator(MatchesIterator first, BytesRefIterator terms, TermsEnum te, int doc, Query query) {
+ TermsEnumDisjunctionMatchesIterator(
+ MatchesIterator first, BytesRefIterator terms, TermsEnum te, int doc, Query query) {
this.first = first;
this.terms = terms;
this.te = te;
@@ -172,10 +181,8 @@ final class DisjunctionMatchesIterator implements MatchesIterator {
}
static MatchesIterator fromSubIterators(List<MatchesIterator> mis) throws IOException {
- if (mis.size() == 0)
- return null;
- if (mis.size() == 1)
- return mis.get(0);
+ if (mis.size() == 0) return null;
+ if (mis.size() == 1) return mis.get(0);
return new DisjunctionMatchesIterator(mis);
}
@@ -184,14 +191,15 @@ final class DisjunctionMatchesIterator implements MatchesIterator {
private boolean started = false;
private DisjunctionMatchesIterator(List<MatchesIterator> matches) throws IOException {
- queue = new PriorityQueue<MatchesIterator>(matches.size()){
- @Override
- protected boolean lessThan(MatchesIterator a, MatchesIterator b) {
- return a.startPosition() < b.startPosition() ||
- (a.startPosition() == b.startPosition() && a.endPosition() < b.endPosition()) ||
- (a.startPosition() == b.startPosition() && a.endPosition() == b.endPosition());
- }
- };
+ queue =
+ new PriorityQueue<MatchesIterator>(matches.size()) {
+ @Override
+ protected boolean lessThan(MatchesIterator a, MatchesIterator b) {
+ return a.startPosition() < b.startPosition()
+ || (a.startPosition() == b.startPosition() && a.endPosition() < b.endPosition())
+ || (a.startPosition() == b.startPosition() && a.endPosition() == b.endPosition());
+ }
+ };
for (MatchesIterator mi : matches) {
if (mi.next()) {
queue.add(mi);
diff --git a/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java b/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java
index d934847..97ca0fd 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java
@@ -24,23 +24,23 @@ import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Objects;
-
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
/**
- * A query that generates the union of documents produced by its subqueries, and that scores each document with the maximum
- * score for that document as produced by any subquery, plus a tie breaking increment for any additional matching subqueries.
- * This is useful when searching for a word in multiple fields with different boost factors (so that the fields cannot be
- * combined equivalently into a single search field). We want the primary score to be the one associated with the highest boost,
- * not the sum of the field scores (as BooleanQuery would give).
- * If the query is "albino elephant" this ensures that "albino" matching one field and "elephant" matching
- * another gets a higher score than "albino" matching both fields.
- * To get this result, use both BooleanQuery and DisjunctionMaxQuery: for each term a DisjunctionMaxQuery searches for it in
- * each field, while the set of these DisjunctionMaxQuery's is combined into a BooleanQuery.
- * The tie breaker capability allows results that include the same term in multiple fields to be judged better than results that
- * include this term in only the best of those multiple fields, without confusing this with the better case of two different terms
- * in the multiple fields.
+ * A query that generates the union of documents produced by its subqueries, and that scores each
+ * document with the maximum score for that document as produced by any subquery, plus a tie
+ * breaking increment for any additional matching subqueries. This is useful when searching for a
+ * word in multiple fields with different boost factors (so that the fields cannot be combined
+ * equivalently into a single search field). We want the primary score to be the one associated with
+ * the highest boost, not the sum of the field scores (as BooleanQuery would give). If the query is
+ * "albino elephant" this ensures that "albino" matching one field and "elephant" matching another
+ * gets a higher score than "albino" matching both fields. To get this result, use both BooleanQuery
+ * and DisjunctionMaxQuery: for each term a DisjunctionMaxQuery searches for it in each field, while
+ * the set of these DisjunctionMaxQuery's is combined into a BooleanQuery. The tie breaker
+ * capability allows results that include the same term in multiple fields to be judged better than
+ * results that include this term in only the best of those multiple fields, without confusing this
+ * with the better case of two different terms in the multiple fields.
*/
public final class DisjunctionMaxQuery extends Query implements Iterable<Query> {
@@ -52,11 +52,13 @@ public final class DisjunctionMaxQuery extends Query implements Iterable<Query>
/**
* Creates a new DisjunctionMaxQuery
+ *
* @param disjuncts a {@code Collection<Query>} of all the disjuncts to add
- * @param tieBreakerMultiplier the score of each non-maximum disjunct for a document is multiplied by this weight
- * and added into the final score. If non-zero, the value should be small, on the order of 0.1, which says that
- * 10 occurrences of word in a lower-scored field that is also in a higher scored field is just as good as a unique
- * word in the lower scored field (i.e., one that is not in any higher scored field.
+ * @param tieBreakerMultiplier the score of each non-maximum disjunct for a document is multiplied
+ * by this weight and added into the final score. If non-zero, the value should be small, on
+ * the order of 0.1, which says that 10 occurrences of word in a lower-scored field that is
+ * also in a higher scored field is just as good as a unique word in the lower scored field
+ * (i.e., one that is not in any higher scored field.
*/
public DisjunctionMaxQuery(Collection<Query> disjuncts, float tieBreakerMultiplier) {
Objects.requireNonNull(disjuncts, "Collection of Querys must not be null");
@@ -72,36 +74,36 @@ public final class DisjunctionMaxQuery extends Query implements Iterable<Query>
public Iterator<Query> iterator() {
return getDisjuncts().iterator();
}
-
- /**
- * @return the disjuncts.
- */
+
+ /** @return the disjuncts. */
public List<Query> getDisjuncts() {
return Collections.unmodifiableList(Arrays.asList(disjuncts));
}
- /**
- * @return tie breaker value for multiple matches.
- */
+ /** @return tie breaker value for multiple matches. */
public float getTieBreakerMultiplier() {
return tieBreakerMultiplier;
}
/**
- * Expert: the Weight for DisjunctionMaxQuery, used to
- * normalize, score and explain these queries.
+ * Expert: the Weight for DisjunctionMaxQuery, used to normalize, score and explain these queries.
*
- * <p>NOTE: this API and implementation is subject to
- * change suddenly in the next release.</p>
+ * <p>NOTE: this API and implementation is subject to change suddenly in the next release.
*/
protected class DisjunctionMaxWeight extends Weight {
/** The Weights for our subqueries, in 1-1 correspondence with disjuncts */
- protected final ArrayList<Weight> weights = new ArrayList<>(); // The Weight's for our subqueries, in 1-1 correspondence with disjuncts
+ protected final ArrayList<Weight> weights =
+ new ArrayList<>(); // The Weight's for our subqueries, in 1-1 correspondence with disjuncts
+
private final ScoreMode scoreMode;
- /** Construct the Weight for this Query searched by searcher. Recursively construct subquery weights. */
- public DisjunctionMaxWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
+ /**
+ * Construct the Weight for this Query searched by searcher. Recursively construct subquery
+ * weights.
+ */
+ public DisjunctionMaxWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost)
+ throws IOException {
super(DisjunctionMaxQuery.this);
for (Query disjunctQuery : disjuncts) {
weights.add(searcher.createWeight(disjunctQuery, scoreMode, boost));
@@ -152,8 +154,7 @@ public final class DisjunctionMaxQuery extends Query implements Iterable<Query>
return false;
}
for (Weight w : weights) {
- if (w.isCacheable(ctx) == false)
- return false;
+ if (w.isCacheable(ctx) == false) return false;
}
return true;
}
@@ -181,24 +182,30 @@ public final class DisjunctionMaxQuery extends Query implements Iterable<Query>
}
if (match) {
final float score = (float) (max + otherSum * tieBreakerMultiplier);
- final String desc = tieBreakerMultiplier == 0.0f ? "max of:" : "max plus " + tieBreakerMultiplier + " times others of:";
+ final String desc =
+ tieBreakerMultiplier == 0.0f
+ ? "max of:"
+ : "max plus " + tieBreakerMultiplier + " times others of:";
return Explanation.match(score, desc, subs);
} else {
return Explanation.noMatch("No matching clause");
}
}
-
- } // end of DisjunctionMaxWeight inner class
+ } // end of DisjunctionMaxWeight inner class
/** Create the Weight used to score us */
@Override
- public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
+ public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost)
+ throws IOException {
return new DisjunctionMaxWeight(searcher, scoreMode, boost);
}
- /** Optimize our representation and our subqueries representations
+ /**
+ * Optimize our representation and our subqueries representations
+ *
* @param reader the IndexReader we query
- * @return an optimized copy of us (which may not be a copy if there is nothing to optimize) */
+ * @return an optimized copy of us (which may not be a copy if there is nothing to optimize)
+ */
@Override
public Query rewrite(IndexReader reader) throws IOException {
if (disjuncts.length == 1) {
@@ -236,23 +243,25 @@ public final class DisjunctionMaxQuery extends Query implements Iterable<Query>
}
}
- /** Prettyprint us.
+ /**
+ * Prettyprint us.
+ *
* @param field the field to which we are applied
- * @return a string that shows what we do, of the form "(disjunct1 | disjunct2 | ... | disjunctn)^boost"
+ * @return a string that shows what we do, of the form "(disjunct1 | disjunct2 | ... |
+ * disjunctn)^boost"
*/
@Override
public String toString(String field) {
StringBuilder buffer = new StringBuilder();
buffer.append("(");
- for (int i = 0 ; i < disjuncts.length; i++) {
+ for (int i = 0; i < disjuncts.length; i++) {
Query subquery = disjuncts[i];
- if (subquery instanceof BooleanQuery) { // wrap sub-bools in parens
+ if (subquery instanceof BooleanQuery) { // wrap sub-bools in parens
buffer.append("(");
buffer.append(subquery.toString(field));
buffer.append(")");
- }
- else buffer.append(subquery.toString(field));
- if (i != disjuncts.length-1) buffer.append(" | ");
+ } else buffer.append(subquery.toString(field));
+ if (i != disjuncts.length - 1) buffer.append(" | ");
}
buffer.append(")");
if (tieBreakerMultiplier != 0.0f) {
@@ -262,22 +271,26 @@ public final class DisjunctionMaxQuery extends Query implements Iterable<Query>
return buffer.toString();
}
- /** Return true if we represent the same query as other
+ /**
+ * Return true if we represent the same query as other
+ *
* @param other another object
- * @return true if other is a DisjunctionMaxQuery with the same boost and the same subqueries, in the same order, as us
+ * @return true if other is a DisjunctionMaxQuery with the same boost and the same subqueries, in
+ * the same order, as us
*/
@Override
public boolean equals(Object other) {
- return sameClassAs(other) &&
- equalsTo(getClass().cast(other));
+ return sameClassAs(other) && equalsTo(getClass().cast(other));
}
-
+
private boolean equalsTo(DisjunctionMaxQuery other) {
- return tieBreakerMultiplier == other.tieBreakerMultiplier &&
- Arrays.equals(disjuncts, other.disjuncts);
+ return tieBreakerMultiplier == other.tieBreakerMultiplier
+ && Arrays.equals(disjuncts, other.disjuncts);
}
- /** Compute a hash code for hashing us
+ /**
+ * Compute a hash code for hashing us
+ *
* @return the hash code
*/
@Override
@@ -287,6 +300,4 @@ public final class DisjunctionMaxQuery extends Query implements Iterable<Query>
h = 31 * h + Arrays.hashCode(disjuncts);
return h;
}
-
-
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxScorer.java b/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxScorer.java
index 63d0285..affe771 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxScorer.java
@@ -18,14 +18,13 @@ package org.apache.lucene.search;
import java.io.IOException;
import java.util.List;
-
import org.apache.lucene.util.MathUtil;
/**
- * The Scorer for DisjunctionMaxQuery. The union of all documents generated by the subquery scorers
- * is generated in document number order. The score for each document is the maximum of the scores computed
- * by the subquery scorers that generate that document, plus tieBreakerMultiplier times the sum of the scores
- * for the other subqueries that generate the document.
+ * The Scorer for DisjunctionMaxQuery. The union of all documents generated by the subquery scorers
+ * is generated in document number order. The score for each document is the maximum of the scores
+ * computed by the subquery scorers that generate that document, plus tieBreakerMultiplier times the
+ * sum of the scores for the other subqueries that generate the document.
*/
final class DisjunctionMaxScorer extends DisjunctionScorer {
private final List<Scorer> subScorers;
@@ -36,16 +35,15 @@ final class DisjunctionMaxScorer extends DisjunctionScorer {
/**
* Creates a new instance of DisjunctionMaxScorer
- *
- * @param weight
- * The Weight to be used.
- * @param tieBreakerMultiplier
- * Multiplier applied to non-maximum-scoring subqueries for a
- * document as they are summed into the result.
- * @param subScorers
- * The sub scorers this Scorer should iterate on
+ *
+ * @param weight The Weight to be used.
+ * @param tieBreakerMultiplier Multiplier applied to non-maximum-scoring subqueries for a document
+ * as they are summed into the result.
+ * @param subScorers The sub scorers this Scorer should iterate on
*/
- DisjunctionMaxScorer(Weight weight, float tieBreakerMultiplier, List<Scorer> subScorers, ScoreMode scoreMode) throws IOException {
+ DisjunctionMaxScorer(
+ Weight weight, float tieBreakerMultiplier, List<Scorer> subScorers, ScoreMode scoreMode)
+ throws IOException {
super(weight, subScorers, scoreMode);
this.subScorers = subScorers;
this.tieBreakerMultiplier = tieBreakerMultiplier;
diff --git a/lucene/core/src/java/org/apache/lucene/search/DisjunctionScoreBlockBoundaryPropagator.java b/lucene/core/src/java/org/apache/lucene/search/DisjunctionScoreBlockBoundaryPropagator.java
index a8fd206..20d4dcf 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DisjunctionScoreBlockBoundaryPropagator.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DisjunctionScoreBlockBoundaryPropagator.java
@@ -22,24 +22,25 @@ import java.util.Collection;
import java.util.Comparator;
/**
- * A helper to propagate block boundaries for disjunctions.
- * Because a disjunction matches if any of its sub clauses matches, it is
- * tempting to return the minimum block boundary across all clauses. The problem
- * is that it might then make the query slow when the minimum competitive score
- * is high and low-scoring clauses don't drive iteration anymore. So this class
- * computes block boundaries only across clauses whose maximum score is greater
- * than or equal to the minimum competitive score, or the maximum scoring clause
- * if there is no such clause.
+ * A helper to propagate block boundaries for disjunctions. Because a disjunction matches if any of
+ * its sub clauses matches, it is tempting to return the minimum block boundary across all clauses.
+ * The problem is that it might then make the query slow when the minimum competitive score is high
+ * and low-scoring clauses don't drive iteration anymore. So this class computes block boundaries
+ * only across clauses whose maximum score is greater than or equal to the minimum competitive
+ * score, or the maximum scoring clause if there is no such clause.
*/
final class DisjunctionScoreBlockBoundaryPropagator {
- private static final Comparator<Scorer> MAX_SCORE_COMPARATOR = Comparator.comparing((Scorer s) -> {
- try {
- return s.getMaxScore(DocIdSetIterator.NO_MORE_DOCS);
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- }).thenComparing(Comparator.comparing(s -> s.iterator().cost()));
+ private static final Comparator<Scorer> MAX_SCORE_COMPARATOR =
+ Comparator.comparing(
+ (Scorer s) -> {
+ try {
+ return s.getMaxScore(DocIdSetIterator.NO_MORE_DOCS);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ })
+ .thenComparing(Comparator.comparing(s -> s.iterator().cost()));
private final Scorer[] scorers;
private final float[] maxScores;
@@ -58,9 +59,7 @@ final class DisjunctionScoreBlockBoundaryPropagator {
}
}
- /**
- * See {@link Scorer#advanceShallow(int)}.
- */
+ /** See {@link Scorer#advanceShallow(int)}. */
int advanceShallow(int target) throws IOException {
// For scorers that are below the lead index, just propagate.
for (int i = 0; i < leadIndex; ++i) {
@@ -98,8 +97,8 @@ final class DisjunctionScoreBlockBoundaryPropagator {
}
/**
- * Set the minimum competitive score to filter out clauses that score less
- * than this threshold.
+ * Set the minimum competitive score to filter out clauses that score less than this threshold.
+ *
* @see Scorer#setMinCompetitiveScore
*/
void setMinCompetitiveScore(float minScore) throws IOException {
@@ -108,5 +107,4 @@ final class DisjunctionScoreBlockBoundaryPropagator {
leadIndex++;
}
}
-
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java b/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java
index b1f2fd2..e7c7542 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java
@@ -16,17 +16,13 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
-
import org.apache.lucene.util.PriorityQueue;
-/**
- * Base class for Scorers that score disjunctions.
- */
+/** Base class for Scorers that score disjunctions. */
abstract class DisjunctionScorer extends Scorer {
private final boolean needsScores;
@@ -36,7 +32,8 @@ abstract class DisjunctionScorer extends Scorer {
private final BlockMaxDISI blockMaxApprox;
private final TwoPhase twoPhase;
- protected DisjunctionScorer(Weight weight, List<Scorer> subScorers, ScoreMode scoreMode) throws IOException {
+ protected DisjunctionScorer(Weight weight, List<Scorer> subScorers, ScoreMode scoreMode)
+ throws IOException {
super(weight);
if (subScorers.size() <= 1) {
throw new IllegalArgumentException("There must be at least 2 subScorers");
@@ -51,7 +48,8 @@ abstract class DisjunctionScorer extends Scorer {
for (Scorer scorer : subScorers) {
scorer.advanceShallow(0);
}
- this.blockMaxApprox = new BlockMaxDISI(new DisjunctionDISIApproximation(this.subScorers), this);
+ this.blockMaxApprox =
+ new BlockMaxDISI(new DisjunctionDISIApproximation(this.subScorers), this);
this.approximation = blockMaxApprox;
} else {
this.approximation = new DisjunctionDISIApproximation(this.subScorers);
@@ -105,12 +103,13 @@ abstract class DisjunctionScorer extends Scorer {
private TwoPhase(DocIdSetIterator approximation, float matchCost) {
super(approximation);
this.matchCost = matchCost;
- unverifiedMatches = new PriorityQueue<DisiWrapper>(DisjunctionScorer.this.subScorers.size()) {
- @Override
- protected boolean lessThan(DisiWrapper a, DisiWrapper b) {
- return a.matchCost < b.matchCost;
- }
- };
+ unverifiedMatches =
+ new PriorityQueue<DisiWrapper>(DisjunctionScorer.this.subScorers.size()) {
+ @Override
+ protected boolean lessThan(DisiWrapper a, DisiWrapper b) {
+ return a.matchCost < b.matchCost;
+ }
+ };
}
DisiWrapper getSubMatches() throws IOException {
@@ -124,20 +123,20 @@ abstract class DisjunctionScorer extends Scorer {
unverifiedMatches.clear();
return verifiedMatches;
}
-
+
@Override
public boolean matches() throws IOException {
verifiedMatches = null;
unverifiedMatches.clear();
-
+
for (DisiWrapper w = subScorers.topList(); w != null; ) {
DisiWrapper next = w.next;
-
+
if (w.twoPhaseView == null) {
// implicitly verified, move it to verifiedMatches
w.next = verifiedMatches;
verifiedMatches = w;
-
+
if (needsScores == false) {
// we can stop here
return true;
@@ -147,11 +146,11 @@ abstract class DisjunctionScorer extends Scorer {
}
w = next;
}
-
+
if (verifiedMatches != null) {
return true;
}
-
+
// verify subs that have an two-phase iterator
// least-costly ones first
while (unverifiedMatches.size() > 0) {
@@ -162,10 +161,10 @@ abstract class DisjunctionScorer extends Scorer {
return true;
}
}
-
+
return false;
}
-
+
@Override
public float matchCost() {
return matchCost;
@@ -174,7 +173,7 @@ abstract class DisjunctionScorer extends Scorer {
@Override
public final int docID() {
- return subScorers.top().doc;
+ return subScorers.top().doc;
}
BlockMaxDISI getBlockMaxApprox() {
@@ -205,5 +204,4 @@ abstract class DisjunctionScorer extends Scorer {
}
return children;
}
-
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/DisjunctionSumScorer.java b/lucene/core/src/java/org/apache/lucene/search/DisjunctionSumScorer.java
index 18835ed..6909af7 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DisjunctionSumScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DisjunctionSumScorer.java
@@ -16,19 +16,20 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
import java.util.List;
-/** A Scorer for OR like queries, counterpart of <code>ConjunctionScorer</code>.
- */
+/** A Scorer for OR like queries, counterpart of <code>ConjunctionScorer</code>. */
final class DisjunctionSumScorer extends DisjunctionScorer {
- /** Construct a <code>DisjunctionScorer</code>.
+ /**
+ * Construct a <code>DisjunctionScorer</code>.
+ *
* @param weight The weight to be used.
* @param subScorers Array of at least two subscorers.
*/
- DisjunctionSumScorer(Weight weight, List<Scorer> subScorers, ScoreMode scoreMode) throws IOException {
+ DisjunctionSumScorer(Weight weight, List<Scorer> subScorers, ScoreMode scoreMode)
+ throws IOException {
super(weight, subScorers, scoreMode);
}
@@ -39,7 +40,7 @@ final class DisjunctionSumScorer extends DisjunctionScorer {
for (DisiWrapper w = topList; w != null; w = w.next) {
score += w.scorer.score();
}
- return (float)score;
+ return (float) score;
}
@Override
@@ -48,5 +49,4 @@ final class DisjunctionSumScorer extends DisjunctionScorer {
// we actually care about block scores.
return Float.MAX_VALUE;
}
-
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/DocIdSet.java b/lucene/core/src/java/org/apache/lucene/search/DocIdSet.java
index 05870d9..aa2149b 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DocIdSet.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DocIdSet.java
@@ -16,41 +16,42 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
-
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.Bits;
/**
- * A DocIdSet contains a set of doc ids. Implementing classes must
- * only implement {@link #iterator} to provide access to the set.
+ * A DocIdSet contains a set of doc ids. Implementing classes must only implement {@link #iterator}
+ * to provide access to the set.
*/
public abstract class DocIdSet implements Accountable {
/** An empty {@code DocIdSet} instance */
- public static final DocIdSet EMPTY = new DocIdSet() {
-
- @Override
- public DocIdSetIterator iterator() {
- return DocIdSetIterator.empty();
- }
-
- // we explicitly provide no random access, as this filter is 100% sparse and iterator exits faster
- @Override
- public Bits bits() {
- return null;
- }
+ public static final DocIdSet EMPTY =
+ new DocIdSet() {
+
+ @Override
+ public DocIdSetIterator iterator() {
+ return DocIdSetIterator.empty();
+ }
- @Override
- public long ramBytesUsed() {
- return 0L;
- }
- };
+ // we explicitly provide no random access, as this filter is 100% sparse and iterator exits
+ // faster
+ @Override
+ public Bits bits() {
+ return null;
+ }
- /** Provides a {@link DocIdSetIterator} to access the set.
- * This implementation can return <code>null</code> if there
- * are no docs that match. */
+ @Override
+ public long ramBytesUsed() {
+ return 0L;
+ }
+ };
+
+ /**
+ * Provides a {@link DocIdSetIterator} to access the set. This implementation can return <code>
+ * null</code> if there are no docs that match.
+ */
public abstract DocIdSetIterator iterator() throws IOException;
// TODO: somehow this class should express the cost of
@@ -60,21 +61,19 @@ public abstract class DocIdSet implements Accountable {
// this is the opposite of what bits() is for now
// (down-low filtering using e.g. FixedBitSet)
- /** Optionally provides a {@link Bits} interface for random access
- * to matching documents.
- * @return {@code null}, if this {@code DocIdSet} does not support random access.
- * In contrast to {@link #iterator()}, a return value of {@code null}
- * <b>does not</b> imply that no documents match the filter!
- * The default implementation does not provide random access, so you
- * only need to implement this method if your DocIdSet can
- * guarantee random access to every docid in O(1) time without
- * external disk access (as {@link Bits} interface cannot throw
- * {@link IOException}). This is generally true for bit sets
- * like {@link org.apache.lucene.util.FixedBitSet}, which return
- * itself if they are used as {@code DocIdSet}.
+ /**
+ * Optionally provides a {@link Bits} interface for random access to matching documents.
+ *
+ * @return {@code null}, if this {@code DocIdSet} does not support random access. In contrast to
+ * {@link #iterator()}, a return value of {@code null} <b>does not</b> imply that no documents
+ * match the filter! The default implementation does not provide random access, so you only
+ * need to implement this method if your DocIdSet can guarantee random access to every docid
+ * in O(1) time without external disk access (as {@link Bits} interface cannot throw {@link
+ * IOException}). This is generally true for bit sets like {@link
+ * org.apache.lucene.util.FixedBitSet}, which return itself if they are used as {@code
+ * DocIdSet}.
*/
public Bits bits() throws IOException {
return null;
}
-
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/DocIdSetIterator.java b/lucene/core/src/java/org/apache/lucene/search/DocIdSetIterator.java
index 4b9bf26..cfde6b3 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DocIdSetIterator.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DocIdSetIterator.java
@@ -16,23 +16,21 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
/**
- * This abstract class defines methods to iterate over a set of non-decreasing
- * doc ids. Note that this class assumes it iterates on doc Ids, and therefore
- * {@link #NO_MORE_DOCS} is set to {@value #NO_MORE_DOCS} in order to be used as
- * a sentinel object. Implementations of this class are expected to consider
- * {@link Integer#MAX_VALUE} as an invalid value.
+ * This abstract class defines methods to iterate over a set of non-decreasing doc ids. Note that
+ * this class assumes it iterates on doc Ids, and therefore {@link #NO_MORE_DOCS} is set to {@value
+ * #NO_MORE_DOCS} in order to be used as a sentinel object. Implementations of this class are
+ * expected to consider {@link Integer#MAX_VALUE} as an invalid value.
*/
public abstract class DocIdSetIterator {
-
+
/** An empty {@code DocIdSetIterator} instance */
public static final DocIdSetIterator empty() {
return new DocIdSetIterator() {
boolean exhausted = false;
-
+
@Override
public int advance(int target) {
assert !exhausted;
@@ -40,18 +38,19 @@ public abstract class DocIdSetIterator {
exhausted = true;
return NO_MORE_DOCS;
}
-
+
@Override
public int docID() {
return exhausted ? NO_MORE_DOCS : -1;
}
+
@Override
public int nextDoc() {
assert !exhausted;
exhausted = true;
return NO_MORE_DOCS;
}
-
+
@Override
public long cost() {
return 0;
@@ -59,8 +58,7 @@ public abstract class DocIdSetIterator {
};
}
- /** A {@link DocIdSetIterator} that matches all documents up to
- * {@code maxDoc - 1}. */
+ /** A {@link DocIdSetIterator} that matches all documents up to {@code maxDoc - 1}. */
public static final DocIdSetIterator all(int maxDoc) {
return new DocIdSetIterator() {
int doc = -1;
@@ -91,11 +89,14 @@ public abstract class DocIdSetIterator {
};
}
- /** A {@link DocIdSetIterator} that matches a range documents from
- * minDocID (inclusive) to maxDocID (exclusive). */
+ /**
+ * A {@link DocIdSetIterator} that matches a range documents from minDocID (inclusive) to maxDocID
+ * (exclusive).
+ */
public static final DocIdSetIterator range(int minDoc, int maxDoc) {
if (minDoc >= maxDoc) {
- throw new IllegalArgumentException("minDoc must be < maxDoc but got minDoc=" + minDoc + " maxDoc=" + maxDoc);
+ throw new IllegalArgumentException(
+ "minDoc must be < maxDoc but got minDoc=" + minDoc + " maxDoc=" + maxDoc);
}
if (minDoc < 0) {
throw new IllegalArgumentException("minDoc must be >= 0 but got minDoc=" + minDoc);
@@ -116,11 +117,11 @@ public abstract class DocIdSetIterator {
@Override
public int advance(int target) throws IOException {
if (target < minDoc) {
- doc = minDoc;
+ doc = minDoc;
} else if (target >= maxDoc) {
- doc = NO_MORE_DOCS;
+ doc = NO_MORE_DOCS;
} else {
- doc = target;
+ doc = target;
}
return doc;
}
@@ -133,49 +134,46 @@ public abstract class DocIdSetIterator {
}
/**
- * When returned by {@link #nextDoc()}, {@link #advance(int)} and
- * {@link #docID()} it means there are no more docs in the iterator.
+ * When returned by {@link #nextDoc()}, {@link #advance(int)} and {@link #docID()} it means there
+ * are no more docs in the iterator.
*/
public static final int NO_MORE_DOCS = Integer.MAX_VALUE;
/**
* Returns the following:
+ *
* <ul>
- * <li><code>-1</code> if {@link #nextDoc()} or
- * {@link #advance(int)} were not called yet.
- * <li>{@link #NO_MORE_DOCS} if the iterator has exhausted.
- * <li>Otherwise it should return the doc ID it is currently on.
+ * <li><code>-1</code> if {@link #nextDoc()} or {@link #advance(int)} were not called yet.
+ * <li>{@link #NO_MORE_DOCS} if the iterator has exhausted.
+ * <li>Otherwise it should return the doc ID it is currently on.
* </ul>
+ *
* <p>
- *
+ *
* @since 2.9
*/
public abstract int docID();
/**
- * Advances to the next document in the set and returns the doc it is
- * currently on, or {@link #NO_MORE_DOCS} if there are no more docs in the
- * set.<br>
- *
- * <b>NOTE:</b> after the iterator has exhausted you should not call this
- * method, as it may result in unpredicted behavior.
- *
+ * Advances to the next document in the set and returns the doc it is currently on, or {@link
+ * #NO_MORE_DOCS} if there are no more docs in the set.<br>
+ * <b>NOTE:</b> after the iterator has exhausted you should not call this method, as it may result
+ * in unpredicted behavior.
+ *
* @since 2.9
*/
public abstract int nextDoc() throws IOException;
- /**
- * Advances to the first beyond the current whose document number is greater
- * than or equal to <i>target</i>, and returns the document number itself.
- * Exhausts the iterator and returns {@link #NO_MORE_DOCS} if <i>target</i>
- * is greater than the highest document number in the set.
- * <p>
- * The behavior of this method is <b>undefined</b> when called with
- * <code> target ≤ current</code>, or after the iterator has exhausted.
- * Both cases may result in unpredicted behavior.
- * <p>
- * When <code> target > current</code> it behaves as if written:
- *
+ /**
+ * Advances to the first beyond the current whose document number is greater than or equal to
+ * <i>target</i>, and returns the document number itself. Exhausts the iterator and returns {@link
+ * #NO_MORE_DOCS} if <i>target</i> is greater than the highest document number in the set.
+ *
+ * <p>The behavior of this method is <b>undefined</b> when called with <code> target ≤ current
+ * </code>, or after the iterator has exhausted. Both cases may result in unpredicted behavior.
+ *
+ * <p>When <code> target > current</code> it behaves as if written:
+ *
* <pre class="prettyprint">
* int advance(int target) {
* int doc;
@@ -184,21 +182,23 @@ public abstract class DocIdSetIterator {
* return doc;
* }
* </pre>
- *
+ *
* Some implementations are considerably more efficient than that.
- * <p>
- * <b>NOTE:</b> this method may be called with {@link #NO_MORE_DOCS} for
- * efficiency by some Scorers. If your implementation cannot efficiently
- * determine that it should exhaust, it is recommended that you check for that
- * value in each call to this method.
+ *
+ * <p><b>NOTE:</b> this method may be called with {@link #NO_MORE_DOCS} for efficiency by some
+ * Scorers. If your implementation cannot efficiently determine that it should exhaust, it is
+ * recommended that you check for that value in each call to this method.
+ *
* <p>
*
* @since 2.9
*/
public abstract int advance(int target) throws IOException;
- /** Slow (linear) implementation of {@link #advance} relying on
- * {@link #nextDoc()} to advance beyond the target position. */
+ /**
+ * Slow (linear) implementation of {@link #advance} relying on {@link #nextDoc()} to advance
+ * beyond the target position.
+ */
protected final int slowAdvance(int target) throws IOException {
assert docID() < target;
int doc;
@@ -210,11 +210,9 @@ public abstract class DocIdSetIterator {
/**
* Returns the estimated cost of this {@link DocIdSetIterator}.
- * <p>
- * This is generally an upper bound of the number of documents this iterator
- * might match, but may be a rough heuristic, hardcoded value, or otherwise
- * completely inaccurate.
+ *
+ * <p>This is generally an upper bound of the number of documents this iterator might match, but
+ * may be a rough heuristic, hardcoded value, or otherwise completely inaccurate.
*/
public abstract long cost();
-
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/DocValuesFieldExistsQuery.java b/lucene/core/src/java/org/apache/lucene/search/DocValuesFieldExistsQuery.java
index 1ac5752..851c5bf 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DocValuesFieldExistsQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DocValuesFieldExistsQuery.java
@@ -16,25 +16,22 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
import java.util.Objects;
-
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
/**
- * A {@link Query} that matches documents that have a value for a given field
- * as reported by doc values iterators.
+ * A {@link Query} that matches documents that have a value for a given field as reported by doc
+ * values iterators.
*/
public final class DocValuesFieldExistsQuery extends Query {
private final String field;
- /** Create a query that will match documents which have a value for the given
- * {@code field}. */
+ /** Create a query that will match documents which have a value for the given {@code field}. */
public DocValuesFieldExistsQuery(String field) {
this.field = Objects.requireNonNull(field);
}
@@ -45,8 +42,7 @@ public final class DocValuesFieldExistsQuery extends Query {
@Override
public boolean equals(Object other) {
- return sameClassAs(other) &&
- field.equals(((DocValuesFieldExistsQuery) other).field);
+ return sameClassAs(other) && field.equals(((DocValuesFieldExistsQuery) other).field);
}
@Override
@@ -82,15 +78,15 @@ public final class DocValuesFieldExistsQuery extends Query {
public boolean isCacheable(LeafReaderContext ctx) {
return DocValues.isCacheable(ctx, field);
}
-
};
}
/**
- * Returns a {@link DocIdSetIterator} from the given field or null if the field doesn't exist
- * in the reader or if the reader has no doc values for the field.
+ * Returns a {@link DocIdSetIterator} from the given field or null if the field doesn't exist in
+ * the reader or if the reader has no doc values for the field.
*/
- public static DocIdSetIterator getDocValuesDocIdSetIterator(String field, LeafReader reader) throws IOException {
+ public static DocIdSetIterator getDocValuesDocIdSetIterator(String field, LeafReader reader)
+ throws IOException {
FieldInfo fieldInfo = reader.getFieldInfos().fieldInfo(field);
final DocIdSetIterator iterator;
if (fieldInfo != null) {
diff --git a/lucene/core/src/java/org/apache/lucene/search/DocValuesRewriteMethod.java b/lucene/core/src/java/org/apache/lucene/search/DocValuesRewriteMethod.java
index e431e50..f1dadb5 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DocValuesRewriteMethod.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DocValuesRewriteMethod.java
@@ -16,9 +16,7 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
-
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
@@ -29,47 +27,47 @@ import org.apache.lucene.util.LongBitSet;
/**
* Rewrites MultiTermQueries into a filter, using DocValues for term enumeration.
- * <p>
- * This can be used to perform these queries against an unindexed docvalues field.
+ *
+ * <p>This can be used to perform these queries against an unindexed docvalues field.
+ *
* @lucene.experimental
*/
public final class DocValuesRewriteMethod extends MultiTermQuery.RewriteMethod {
-
+
@Override
public Query rewrite(IndexReader reader, MultiTermQuery query) {
return new ConstantScoreQuery(new MultiTermQueryDocValuesWrapper(query));
}
-
+
static class MultiTermQueryDocValuesWrapper extends Query {
-
+
protected final MultiTermQuery query;
-
- /**
- * Wrap a {@link MultiTermQuery} as a Filter.
- */
+
+ /** Wrap a {@link MultiTermQuery} as a Filter. */
protected MultiTermQueryDocValuesWrapper(MultiTermQuery query) {
this.query = query;
}
-
+
@Override
public String toString(String field) {
// query.toString should be ok for the filter, too, if the query boost is 1.0f
return query.toString(field);
}
-
+
@Override
public final boolean equals(final Object other) {
- return sameClassAs(other) &&
- query.equals(((MultiTermQueryDocValuesWrapper) other).query);
+ return sameClassAs(other) && query.equals(((MultiTermQueryDocValuesWrapper) other).query);
}
@Override
public final int hashCode() {
return 31 * classHash() + query.hashCode();
}
-
+
/** Returns the field name for this query */
- public final String getField() { return query.getField(); }
+ public final String getField() {
+ return query.getField();
+ }
@Override
public void visit(QueryVisitor visitor) {
@@ -77,65 +75,71 @@ public final class DocValuesRewriteMethod extends MultiTermQuery.RewriteMethod {
visitor.getSubVisitor(BooleanClause.Occur.FILTER, query);
}
}
-
+
@Override
- public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
+ public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost)
+ throws IOException {
return new ConstantScoreWeight(this, boost) {
@Override
public Matches matches(LeafReaderContext context, int doc) throws IOException {
final SortedSetDocValues fcsi = DocValues.getSortedSet(context.reader(), query.field);
- return MatchesUtils.forField(query.field, () -> DisjunctionMatchesIterator.fromTermsEnum(context, doc, query, query.field, getTermsEnum(fcsi)));
+ return MatchesUtils.forField(
+ query.field,
+ () ->
+ DisjunctionMatchesIterator.fromTermsEnum(
+ context, doc, query, query.field, getTermsEnum(fcsi)));
}
private TermsEnum getTermsEnum(SortedSetDocValues fcsi) throws IOException {
- return query.getTermsEnum(new Terms() {
+ return query.getTermsEnum(
+ new Terms() {
- @Override
- public TermsEnum iterator() throws IOException {
- return fcsi.termsEnum();
- }
+ @Override
+ public TermsEnum iterator() throws IOException {
+ return fcsi.termsEnum();
+ }
- @Override
- public long getSumTotalTermFreq() {
- throw new UnsupportedOperationException();
- }
+ @Override
+ public long getSumTotalTermFreq() {
+ throw new UnsupportedOperationException();
+ }
- @Override
- public long getSumDocFreq() {
- throw new UnsupportedOperationException();
- }
+ @Override
+ public long getSumDocFreq() {
+ throw new UnsupportedOperationException();
+ }
- @Override
- public int getDocCount() {
- throw new UnsupportedOperationException();
- }
+ @Override
+ public int getDocCount() {
+ throw new UnsupportedOperationException();
+ }
- @Override
- public long size() {
- return -1;
- }
+ @Override
+ public long size() {
+ return -1;
+ }
- @Override
- public boolean hasFreqs() {
- return false;
- }
+ @Override
+ public boolean hasFreqs() {
+ return false;
+ }
- @Override
- public boolean hasOffsets() {
- return false;
- }
+ @Override
+ public boolean hasOffsets() {
+ return false;
+ }
- @Override
- public boolean hasPositions() {
- return false;
- }
+ @Override
+ public boolean hasPositions() {
+ return false;
+ }
- @Override
- public boolean hasPayloads() {
- return false;
- }
- });
+ @Override
+ public boolean hasPayloads() {
+ return false;
+ }
+ });
}
@Override
@@ -157,38 +161,42 @@ public final class DocValuesRewriteMethod extends MultiTermQuery.RewriteMethod {
}
} while (termsEnum.next() != null);
- return new ConstantScoreScorer(this, score(), scoreMode, new TwoPhaseIterator(fcsi) {
-
- @Override
- public boolean matches() throws IOException {
- for (long ord = fcsi.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = fcsi.nextOrd()) {
- if (termSet.get(ord)) {
- return true;
+ return new ConstantScoreScorer(
+ this,
+ score(),
+ scoreMode,
+ new TwoPhaseIterator(fcsi) {
+
+ @Override
+ public boolean matches() throws IOException {
+ for (long ord = fcsi.nextOrd();
+ ord != SortedSetDocValues.NO_MORE_ORDS;
+ ord = fcsi.nextOrd()) {
+ if (termSet.get(ord)) {
+ return true;
+ }
+ }
+ return false;
}
- }
- return false;
- }
- @Override
- public float matchCost() {
- return 3; // lookup in a bitset
- }
- });
+ @Override
+ public float matchCost() {
+ return 3; // lookup in a bitset
+ }
+ });
}
@Override
public boolean isCacheable(LeafReaderContext ctx) {
return DocValues.isCacheable(ctx, query.field);
}
-
};
}
}
@Override
public boolean equals(Object other) {
- return other != null &&
- getClass() == other.getClass();
+ return other != null && getClass() == other.getClass();
}
@Override
diff --git a/lucene/core/src/java/org/apache/lucene/search/DoubleValues.java b/lucene/core/src/java/org/apache/lucene/search/DoubleValues.java
index 3a02970..4ccf8eb 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DoubleValues.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DoubleValues.java
@@ -19,25 +19,20 @@ package org.apache.lucene.search;
import java.io.IOException;
-/**
- * Per-segment, per-document double values, which can be calculated at search-time
- */
+/** Per-segment, per-document double values, which can be calculated at search-time */
public abstract class DoubleValues {
- /**
- * Get the double value for the current document
- */
+ /** Get the double value for the current document */
public abstract double doubleValue() throws IOException;
/**
* Advance this instance to the given document id
+ *
* @return true if there is a value for this document
*/
public abstract boolean advanceExact(int doc) throws IOException;
- /**
- * Wrap a DoubleValues instance, returning a default if the wrapped instance has no value
- */
+ /** Wrap a DoubleValues instance, returning a default if the wrapped instance has no value */
public static DoubleValues withDefault(DoubleValues in, double missingValue) {
return new DoubleValues() {
@@ -57,18 +52,19 @@ public abstract class DoubleValues {
}
/**
- * An empty DoubleValues instance that always returns {@code false} from {@link #advanceExact(int)}
+ * An empty DoubleValues instance that always returns {@code false} from {@link
+ * #advanceExact(int)}
*/
- public static final DoubleValues EMPTY = new DoubleValues() {
- @Override
- public double doubleValue() throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public boolean advanceExact(int doc) throws IOException {
- return false;
- }
- };
+ public static final DoubleValues EMPTY =
+ new DoubleValues() {
+ @Override
+ public double doubleValue() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+ @Override
+ public boolean advanceExact(int doc) throws IOException {
+ return false;
+ }
+ };
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/DoubleValuesSource.java b/lucene/core/src/java/org/apache/lucene/search/DoubleValuesSource.java
index 368bc11..df21726 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DoubleValuesSource.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DoubleValuesSource.java
@@ -21,7 +21,6 @@ import java.io.IOException;
import java.util.Objects;
import java.util.function.DoubleToLongFunction;
import java.util.function.LongToDoubleFunction;
-
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
@@ -31,33 +30,32 @@ import org.apache.lucene.search.comparators.DoubleComparator;
/**
* Base class for producing {@link DoubleValues}
*
- * To obtain a {@link DoubleValues} object for a leaf reader, clients should call
- * {@link #rewrite(IndexSearcher)} against the top-level searcher, and then
- * call {@link #getValues(LeafReaderContext, DoubleValues)} on the resulting
- * DoubleValuesSource.
+ * <p>To obtain a {@link DoubleValues} object for a leaf reader, clients should call {@link
+ * #rewrite(IndexSearcher)} against the top-level searcher, and then call {@link
+ * #getValues(LeafReaderContext, DoubleValues)} on the resulting DoubleValuesSource.
*
- * DoubleValuesSource objects for NumericDocValues fields can be obtained by calling
- * {@link #fromDoubleField(String)}, {@link #fromFloatField(String)}, {@link #fromIntField(String)}
- * or {@link #fromLongField(String)}, or from {@link #fromField(String, LongToDoubleFunction)} if
+ * <p>DoubleValuesSource objects for NumericDocValues fields can be obtained by calling {@link
+ * #fromDoubleField(String)}, {@link #fromFloatField(String)}, {@link #fromIntField(String)} or
+ * {@link #fromLongField(String)}, or from {@link #fromField(String, LongToDoubleFunction)} if
* special long-to-double encoding is required.
*
- * Scores may be used as a source for value calculations by wrapping a {@link Scorer} using
- * {@link #fromScorer(Scorable)} and passing the resulting DoubleValues to {@link #getValues(LeafReaderContext, DoubleValues)}.
- * The scores can then be accessed using the {@link #SCORES} DoubleValuesSource.
+ * <p>Scores may be used as a source for value calculations by wrapping a {@link Scorer} using
+ * {@link #fromScorer(Scorable)} and passing the resulting DoubleValues to {@link
+ * #getValues(LeafReaderContext, DoubleValues)}. The scores can then be accessed using the {@link
+ * #SCORES} DoubleValuesSource.
*/
public abstract class DoubleValuesSource implements SegmentCacheable {
/**
* Returns a {@link DoubleValues} instance for the passed-in LeafReaderContext and scores
*
- * If scores are not needed to calculate the values (ie {@link #needsScores() returns false}, callers
- * may safely pass {@code null} for the {@code scores} parameter.
+ * <p>If scores are not needed to calculate the values (ie {@link #needsScores() returns false},
+ * callers may safely pass {@code null} for the {@code scores} parameter.
*/
- public abstract DoubleValues getValues(LeafReaderContext ctx, DoubleValues scores) throws IOException;
+ public abstract DoubleValues getValues(LeafReaderContext ctx, DoubleValues scores)
+ throws IOException;
- /**
- * Return true if document scores are needed to calculate values
- */
+ /** Return true if document scores are needed to calculate values */
public abstract boolean needsScores();
/**
@@ -68,30 +66,35 @@ public abstract class DoubleValuesSource implements SegmentCacheable {
* @return an Explanation for the value
* @throws IOException if an {@link IOException} occurs
*/
- public Explanation explain(LeafReaderContext ctx, int docId, Explanation scoreExplanation) throws IOException {
- DoubleValues dv = getValues(ctx, DoubleValuesSource.constant(scoreExplanation.getValue().doubleValue()).getValues(ctx, null));
- if (dv.advanceExact(docId))
- return Explanation.match(dv.doubleValue(), this.toString());
+ public Explanation explain(LeafReaderContext ctx, int docId, Explanation scoreExplanation)
+ throws IOException {
+ DoubleValues dv =
+ getValues(
+ ctx,
+ DoubleValuesSource.constant(scoreExplanation.getValue().doubleValue())
+ .getValues(ctx, null));
+ if (dv.advanceExact(docId)) return Explanation.match(dv.doubleValue(), this.toString());
return Explanation.noMatch(this.toString());
}
/**
* Return a DoubleValuesSource specialised for the given IndexSearcher
*
- * Implementations should assume that this will only be called once.
- * IndexReader-independent implementations can just return {@code this}
+ * <p>Implementations should assume that this will only be called once. IndexReader-independent
+ * implementations can just return {@code this}
*
- * Queries that use DoubleValuesSource objects should call rewrite() during
- * {@link Query#createWeight(IndexSearcher, ScoreMode, float)} rather than during
- * {@link Query#rewrite(IndexReader)} to avoid IndexReader reference leakage.
+ * <p>Queries that use DoubleValuesSource objects should call rewrite() during {@link
+ * Query#createWeight(IndexSearcher, ScoreMode, float)} rather than during {@link
+ * Query#rewrite(IndexReader)} to avoid IndexReader reference leakage.
*
- * For the same reason, implementations that cache references to the IndexSearcher
- * should return a new object from this method.
+ * <p>For the same reason, implementations that cache references to the IndexSearcher should
+ * return a new object from this method.
*/
public abstract DoubleValuesSource rewrite(IndexSearcher reader) throws IOException;
/**
* Create a sort field based on the value of this producer
+ *
* @param reverse true if the sort should be decreasing
*/
public SortField getSortField(boolean reverse) {
@@ -107,9 +110,7 @@ public abstract class DoubleValuesSource implements SegmentCacheable {
@Override
public abstract String toString();
- /**
- * Convert to a LongValuesSource by casting the double values to longs
- */
+ /** Convert to a LongValuesSource by casting the double values to longs */
public final LongValuesSource toLongValuesSource() {
return new LongDoubleValuesSource(this);
}
@@ -170,7 +171,6 @@ public abstract class DoubleValuesSource implements SegmentCacheable {
public LongValuesSource rewrite(IndexSearcher searcher) throws IOException {
return inner.rewrite(searcher).toLongValuesSource();
}
-
}
/**
@@ -183,30 +183,22 @@ public abstract class DoubleValuesSource implements SegmentCacheable {
return new FieldValuesSource(field, decoder);
}
- /**
- * Creates a DoubleValuesSource that wraps a double-valued field
- */
+ /** Creates a DoubleValuesSource that wraps a double-valued field */
public static DoubleValuesSource fromDoubleField(String field) {
return fromField(field, Double::longBitsToDouble);
}
- /**
- * Creates a DoubleValuesSource that wraps a float-valued field
- */
+ /** Creates a DoubleValuesSource that wraps a float-valued field */
public static DoubleValuesSource fromFloatField(String field) {
- return fromField(field, (v) -> (double)Float.intBitsToFloat((int)v));
+ return fromField(field, (v) -> (double) Float.intBitsToFloat((int) v));
}
- /**
- * Creates a DoubleValuesSource that wraps a long-valued field
- */
+ /** Creates a DoubleValuesSource that wraps a long-valued field */
public static DoubleValuesSource fromLongField(String field) {
return fromField(field, (v) -> (double) v);
}
- /**
- * Creates a DoubleValuesSource that wraps an int-valued field
- */
+ /** Creates a DoubleValuesSource that wraps an int-valued field */
public static DoubleValuesSource fromIntField(String field) {
return fromLongField(field);
}
@@ -214,55 +206,56 @@ public abstract class DoubleValuesSource implements SegmentCacheable {
/**
* A DoubleValuesSource that exposes a document's score
*
- * If this source is used as part of a values calculation, then callers must not
- * pass {@code null} as the {@link DoubleValues} parameter on {@link #getValues(LeafReaderContext, DoubleValues)}
+ * <p>If this source is used as part of a values calculation, then callers must not pass {@code
+ * null} as the {@link DoubleValues} parameter on {@link #getValues(LeafReaderContext,
+ * DoubleValues)}
*/
- public static final DoubleValuesSource SCORES = new DoubleValuesSource() {
- @Override
- public DoubleValues getValues(LeafReaderContext ctx, DoubleValues scores) throws IOException {
- assert scores != null;
- return scores;
- }
+ public static final DoubleValuesSource SCORES =
+ new DoubleValuesSource() {
+ @Override
+ public DoubleValues getValues(LeafReaderContext ctx, DoubleValues scores)
+ throws IOException {
+ assert scores != null;
+ return scores;
+ }
- @Override
- public boolean needsScores() {
- return true;
- }
+ @Override
+ public boolean needsScores() {
+ return true;
+ }
- @Override
- public boolean isCacheable(LeafReaderContext ctx) {
- return false;
- }
+ @Override
+ public boolean isCacheable(LeafReaderContext ctx) {
+ return false;
+ }
- @Override
- public Explanation explain(LeafReaderContext ctx, int docId, Explanation scoreExplanation) {
- return scoreExplanation;
- }
+ @Override
+ public Explanation explain(LeafReaderContext ctx, int docId, Explanation scoreExplanation) {
+ return scoreExplanation;
+ }
- @Override
- public int hashCode() {
- return 0;
- }
+ @Override
+ public int hashCode() {
+ return 0;
+ }
- @Override
- public boolean equals(Object obj) {
- return obj == this;
- }
+ @Override
+ public boolean equals(Object obj) {
+ return obj == this;
+ }
- @Override
- public String toString() {
- return "scores";
- }
+ @Override
+ public String toString() {
+ return "scores";
+ }
- @Override
- public DoubleValuesSource rewrite(IndexSearcher searcher) {
- return this;
- }
- };
+ @Override
+ public DoubleValuesSource rewrite(IndexSearcher searcher) {
+ return this;
+ }
+ };
- /**
- * Creates a DoubleValuesSource that always returns a constant value
- */
+ /** Creates a DoubleValuesSource that always returns a constant value */
public static DoubleValuesSource constant(double value) {
return new ConstantValuesSource(value);
}
@@ -274,17 +267,18 @@ public abstract class DoubleValuesSource implements SegmentCacheable {
private ConstantValuesSource(double value) {
this.value = value;
- this.doubleValues = new DoubleValues() {
- @Override
- public double doubleValue() {
- return value;
- }
+ this.doubleValues =
+ new DoubleValues() {
+ @Override
+ public double doubleValue() {
+ return value;
+ }
- @Override
- public boolean advanceExact(int doc) {
- return true;
- }
- };
+ @Override
+ public boolean advanceExact(int doc) {
+ return true;
+ }
+ };
}
@Override
@@ -292,7 +286,6 @@ public abstract class DoubleValuesSource implements SegmentCacheable {
return this;
}
-
@Override
public DoubleValues getValues(LeafReaderContext ctx, DoubleValues scores) throws IOException {
return doubleValues;
@@ -303,7 +296,6 @@ public abstract class DoubleValuesSource implements SegmentCacheable {
return false;
}
-
@Override
public boolean isCacheable(LeafReaderContext ctx) {
return true;
@@ -331,12 +323,9 @@ public abstract class DoubleValuesSource implements SegmentCacheable {
public String toString() {
return "constant(" + value + ")";
}
-
}
- /**
- * Returns a DoubleValues instance that wraps scores returned by a Scorer
- */
+ /** Returns a DoubleValues instance that wraps scores returned by a Scorer */
public static DoubleValues fromScorer(Scorable scorer) {
return new DoubleValues() {
@Override
@@ -367,8 +356,7 @@ public abstract class DoubleValuesSource implements SegmentCacheable {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
FieldValuesSource that = (FieldValuesSource) o;
- return Objects.equals(field, that.field) &&
- Objects.equals(decoder, that.decoder);
+ return Objects.equals(field, that.field) && Objects.equals(decoder, that.decoder);
}
@Override
@@ -408,18 +396,17 @@ public abstract class DoubleValuesSource implements SegmentCacheable {
}
@Override
- public Explanation explain(LeafReaderContext ctx, int docId, Explanation scoreExplanation) throws IOException {
+ public Explanation explain(LeafReaderContext ctx, int docId, Explanation scoreExplanation)
+ throws IOException {
DoubleValues values = getValues(ctx, null);
if (values.advanceExact(docId))
return Explanation.match(values.doubleValue(), this.toString());
- else
- return Explanation.noMatch(this.toString());
+ else return Explanation.noMatch(this.toString());
}
public DoubleValuesSource rewrite(IndexSearcher searcher) throws IOException {
return this;
}
-
}
private static class DoubleValuesSortField extends SortField {
@@ -435,7 +422,8 @@ public abstract class DoubleValuesSource implements SegmentCacheable {
public void setMissingValue(Object missingValue) {
if (missingValue instanceof Number) {
this.missingValue = missingValue;
- ((DoubleValuesComparatorSource) getComparatorSource()).setMissingValue(((Number) missingValue).doubleValue());
+ ((DoubleValuesComparatorSource) getComparatorSource())
+ .setMissingValue(((Number) missingValue).doubleValue());
} else {
super.setMissingValue(missingValue);
}
@@ -450,8 +438,7 @@ public abstract class DoubleValuesSource implements SegmentCacheable {
public String toString() {
StringBuilder buffer = new StringBuilder("<");
buffer.append(getField()).append(">");
- if (reverse)
- buffer.append("!");
+ if (reverse) buffer.append("!");
return buffer.toString();
}
@@ -487,8 +474,8 @@ public abstract class DoubleValuesSource implements SegmentCacheable {
}
@Override
- public FieldComparator<Double> newComparator(String fieldname, int numHits,
- int sortPos, boolean reversed) {
+ public FieldComparator<Double> newComparator(
+ String fieldname, int numHits, int sortPos, boolean reversed) {
return new DoubleComparator(numHits, fieldname, missingValue, reversed, sortPos) {
@Override
public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException {
@@ -496,9 +483,10 @@ public abstract class DoubleValuesSource implements SegmentCacheable {
return new DoubleComparator.DoubleLeafComparator(context) {
LeafReaderContext ctx;
-
+
@Override
- protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) {
+ protected NumericDocValues getNumericDocValues(
+ LeafReaderContext context, String field) {
ctx = context;
return asNumericDocValues(holder, Double::doubleToLongBits);
}
@@ -514,7 +502,8 @@ public abstract class DoubleValuesSource implements SegmentCacheable {
}
}
- private static NumericDocValues asNumericDocValues(DoubleValuesHolder in, DoubleToLongFunction converter) {
+ private static NumericDocValues asNumericDocValues(
+ DoubleValuesHolder in, DoubleToLongFunction converter) {
return new NumericDocValues() {
@Override
public long longValue() throws IOException {
@@ -548,9 +537,7 @@ public abstract class DoubleValuesSource implements SegmentCacheable {
};
}
- /**
- * Create a DoubleValuesSource that returns the score of a particular query
- */
+ /** Create a DoubleValuesSource that returns the score of a particular query */
public static DoubleValuesSource fromQuery(Query query) {
return new QueryDoubleValuesSource(query);
}
@@ -579,7 +566,6 @@ public abstract class DoubleValuesSource implements SegmentCacheable {
@Override
public DoubleValues getValues(LeafReaderContext ctx, DoubleValues scores) throws IOException {
throw new UnsupportedOperationException("This DoubleValuesSource must be rewritten");
-
}
@Override
@@ -589,7 +575,8 @@ public abstract class DoubleValuesSource implements SegmentCacheable {
@Override
public DoubleValuesSource rewrite(IndexSearcher searcher) throws IOException {
- return new WeightDoubleValuesSource(searcher.rewrite(query).createWeight(searcher, ScoreMode.COMPLETE, 1f));
+ return new WeightDoubleValuesSource(
+ searcher.rewrite(query).createWeight(searcher, ScoreMode.COMPLETE, 1f));
}
@Override
@@ -614,12 +601,12 @@ public abstract class DoubleValuesSource implements SegmentCacheable {
@Override
public DoubleValues getValues(LeafReaderContext ctx, DoubleValues scores) throws IOException {
Scorer scorer = weight.scorer(ctx);
- if (scorer == null)
- return DoubleValues.EMPTY;
+ if (scorer == null) return DoubleValues.EMPTY;
return new DoubleValues() {
private final TwoPhaseIterator tpi = scorer.twoPhaseIterator();
- private final DocIdSetIterator disi = (tpi == null) ? scorer.iterator() : tpi.approximation();
+ private final DocIdSetIterator disi =
+ (tpi == null) ? scorer.iterator() : tpi.approximation();
@Override
public double doubleValue() throws IOException {
@@ -637,7 +624,8 @@ public abstract class DoubleValuesSource implements SegmentCacheable {
}
@Override
- public Explanation explain(LeafReaderContext ctx, int docId, Explanation scoreExplanation) throws IOException {
+ public Explanation explain(LeafReaderContext ctx, int docId, Explanation scoreExplanation)
+ throws IOException {
return weight.explain(ctx, docId);
}
@@ -674,5 +662,4 @@ public abstract class DoubleValuesSource implements SegmentCacheable {
return false;
}
}
-
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/ExactPhraseMatcher.java b/lucene/core/src/java/org/apache/lucene/search/ExactPhraseMatcher.java
index a61f4fc..9818af9 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ExactPhraseMatcher.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ExactPhraseMatcher.java
@@ -16,7 +16,6 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
@@ -24,7 +23,6 @@ import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.stream.Collectors;
-
import org.apache.lucene.index.Impact;
import org.apache.lucene.index.Impacts;
import org.apache.lucene.index.ImpactsEnum;
@@ -33,7 +31,9 @@ import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.search.similarities.Similarity.SimScorer;
import org.apache.lucene.util.PriorityQueue;
-/** Expert: Find exact phrases
+/**
+ * Expert: Find exact phrases
+ *
* @lucene.internal
*/
public final class ExactPhraseMatcher extends PhraseMatcher {
@@ -54,24 +54,33 @@ public final class ExactPhraseMatcher extends PhraseMatcher {
private final ImpactsDISI impactsApproximation;
/** Expert: Creates ExactPhraseMatcher instance */
- public ExactPhraseMatcher(PhraseQuery.PostingsAndFreq[] postings, ScoreMode scoreMode, SimScorer scorer, float matchCost) {
+ public ExactPhraseMatcher(
+ PhraseQuery.PostingsAndFreq[] postings,
+ ScoreMode scoreMode,
+ SimScorer scorer,
+ float matchCost) {
super(matchCost);
- final DocIdSetIterator approximation = ConjunctionDISI.intersectIterators(Arrays.stream(postings).map(p -> p.postings).collect(Collectors.toList()));
- final ImpactsSource impactsSource = mergeImpacts(Arrays.stream(postings).map(p -> p.impacts).toArray(ImpactsEnum[]::new));
+ final DocIdSetIterator approximation =
+ ConjunctionDISI.intersectIterators(
+ Arrays.stream(postings).map(p -> p.postings).collect(Collectors.toList()));
+ final ImpactsSource impactsSource =
+ mergeImpacts(Arrays.stream(postings).map(p -> p.impacts).toArray(ImpactsEnum[]::new));
if (scoreMode == ScoreMode.TOP_SCORES) {
- this.approximation = this.impactsApproximation = new ImpactsDISI(approximation, impactsSource, scorer);
+ this.approximation =
+ this.impactsApproximation = new ImpactsDISI(approximation, impactsSource, scorer);
} else {
this.approximation = approximation;
this.impactsApproximation = new ImpactsDISI(approximation, impactsSource, scorer);
}
List<PostingsAndPosition> postingsAndPositions = new ArrayList<>();
- for(PhraseQuery.PostingsAndFreq posting : postings) {
+ for (PhraseQuery.PostingsAndFreq posting : postings) {
postingsAndPositions.add(new PostingsAndPosition(posting.postings, posting.position));
}
- this.postings = postingsAndPositions.toArray(new PostingsAndPosition[postingsAndPositions.size()]);
+ this.postings =
+ postingsAndPositions.toArray(new PostingsAndPosition[postingsAndPositions.size()]);
}
@Override
@@ -93,10 +102,12 @@ public final class ExactPhraseMatcher extends PhraseMatcher {
return minFreq;
}
- /** Advance the given pos enum to the first doc on or after {@code target}.
- * Return {@code false} if the enum was exhausted before reaching
- * {@code target} and {@code true} otherwise. */
- private static boolean advancePosition(PostingsAndPosition posting, int target) throws IOException {
+ /**
+ * Advance the given pos enum to the first doc on or after {@code target}. Return {@code false} if
+ * the enum was exhausted before reaching {@code target} and {@code true} otherwise.
+ */
+ private static boolean advancePosition(PostingsAndPosition posting, int target)
+ throws IOException {
while (posting.pos < target) {
if (posting.upTo == posting.freq) {
return false;
@@ -123,8 +134,7 @@ public final class ExactPhraseMatcher extends PhraseMatcher {
if (lead.upTo < lead.freq) {
lead.pos = lead.postings.nextPosition();
lead.upTo += 1;
- }
- else {
+ } else {
return false;
}
advanceHead:
@@ -177,9 +187,7 @@ public final class ExactPhraseMatcher extends PhraseMatcher {
return postings[postings.length - 1].postings.endOffset();
}
- /**
- * Merge impacts for multiple terms of an exact phrase.
- */
+ /** Merge impacts for multiple terms of an exact phrase. */
static ImpactsSource mergeImpacts(ImpactsEnum[] impactsEnums) {
// Iteration of block boundaries uses the impacts enum with the lower cost.
// This is consistent with BlockMaxConjunctionScorer.
@@ -235,8 +243,8 @@ public final class ExactPhraseMatcher extends PhraseMatcher {
}
/**
- * Return the minimum level whose impacts are valid up to {@code docIdUpTo},
- * or {@code -1} if there is no such level.
+ * Return the minimum level whose impacts are valid up to {@code docIdUpTo}, or {@code -1}
+ * if there is no such level.
*/
private int getLevel(Impacts impacts, int docIdUpTo) {
for (int level = 0, numLevels = impacts.numLevels(); level < numLevels; ++level) {
@@ -251,12 +259,13 @@ public final class ExactPhraseMatcher extends PhraseMatcher {
public List<Impact> getImpacts(int level) {
final int docIdUpTo = getDocIdUpTo(level);
- PriorityQueue<SubIterator> pq = new PriorityQueue<SubIterator>(impacts.length) {
- @Override
- protected boolean lessThan(SubIterator a, SubIterator b) {
- return a.current.freq < b.current.freq;
- }
- };
+ PriorityQueue<SubIterator> pq =
+ new PriorityQueue<SubIterator>(impacts.length) {
+ @Override
+ protected boolean lessThan(SubIterator a, SubIterator b) {
+ return a.current.freq < b.current.freq;
+ }
+ };
boolean hasImpacts = false;
List<Impact> onlyImpactList = null;
@@ -309,8 +318,10 @@ public final class ExactPhraseMatcher extends PhraseMatcher {
}
}
- outer: while (true) {
- if (mergedImpacts.size() > 0 && mergedImpacts.get(mergedImpacts.size() - 1).norm == currentNorm) {
+ outer:
+ while (true) {
+ if (mergedImpacts.size() > 0
+ && mergedImpacts.get(mergedImpacts.size() - 1).norm == currentNorm) {
mergedImpacts.get(mergedImpacts.size() - 1).freq = currentFreq;
} else {
mergedImpacts.add(new Impact(currentFreq, currentNorm));
@@ -345,5 +356,4 @@ public final class ExactPhraseMatcher extends PhraseMatcher {
}
};
}
-
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/Explanation.java b/lucene/core/src/java/org/apache/lucene/search/Explanation.java
index f5d2ec4..460a20e 100644
--- a/lucene/core/src/java/org/apache/lucene/search/Explanation.java
+++ b/lucene/core/src/java/org/apache/lucene/search/Explanation.java
@@ -28,45 +28,45 @@ public final class Explanation {
/**
* Create a new explanation for a match.
- * @param value the contribution to the score of the document
+ *
+ * @param value the contribution to the score of the document
* @param description how {@code value} was computed
- * @param details sub explanations that contributed to this explanation
+ * @param details sub explanations that contributed to this explanation
*/
- public static Explanation match(Number value, String description, Collection<Explanation> details) {
+ public static Explanation match(
+ Number value, String description, Collection<Explanation> details) {
return new Explanation(true, value, description, details);
}
/**
* Create a new explanation for a match.
- * @param value the contribution to the score of the document
+ *
+ * @param value the contribution to the score of the document
* @param description how {@code value} was computed
- * @param details sub explanations that contributed to this explanation
+ * @param details sub explanations that contributed to this explanation
*/
public static Explanation match(Number value, String description, Explanation... details) {
return new Explanation(true, value, description, Arrays.asList(details));
}
- /**
- * Create a new explanation for a document which does not match.
- */
+ /** Create a new explanation for a document which does not match. */
public static Explanation noMatch(String description, Collection<Explanation> details) {
return new Explanation(false, 0f, description, details);
}
- /**
- * Create a new explanation for a document which does not match.
- */
+ /** Create a new explanation for a document which does not match. */
public static Explanation noMatch(String description, Explanation... details) {
return new Explanation(false, 0f, description, Arrays.asList(details));
}
- private final boolean match; // whether the document matched
- private final Number value; // the value of this node
- private final String description; // what it represents
- private final List<Explanation> details; // sub-explanations
+ private final boolean match; // whether the document matched
+ private final Number value; // the value of this node
+ private final String description; // what it represents
+ private final List<Explanation> details; // sub-explanations
- /** Create a new explanation */
- private Explanation(boolean match, Number value, String description, Collection<Explanation> details) {
+ /** Create a new explanation */
+ private Explanation(
+ boolean match, Number value, String description, Collection<Explanation> details) {
this.match = match;
this.value = Objects.requireNonNull(value);
this.description = Objects.requireNonNull(description);
@@ -76,23 +76,25 @@ public final class Explanation {
}
}
- /**
- * Indicates whether or not this Explanation models a match.
- */
+ /** Indicates whether or not this Explanation models a match. */
public boolean isMatch() {
return match;
}
-
+
/** The value assigned to this explanation node. */
- public Number getValue() { return value; }
+ public Number getValue() {
+ return value;
+ }
/** A description of this explanation node. */
- public String getDescription() { return description; }
+ public String getDescription() {
+ return description;
+ }
private String getSummary() {
return getValue() + " = " + getDescription();
}
-
+
/** The sub-nodes of this explanation node. */
public Explanation[] getDetails() {
return details.toArray(new Explanation[0]);
@@ -113,8 +115,8 @@ public final class Explanation {
buffer.append("\n");
Explanation[] details = getDetails();
- for (int i = 0 ; i < details.length; i++) {
- buffer.append(details[i].toString(depth+1));
+ for (int i = 0; i < details.length; i++) {
+ buffer.append(details[i].toString(depth + 1));
}
return buffer.toString();
@@ -125,15 +127,14 @@ public final class Explanation {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Explanation that = (Explanation) o;
- return match == that.match &&
- Objects.equals(value, that.value) &&
- Objects.equals(description, that.description) &&
- Objects.equals(details, that.details);
+ return match == that.match
+ && Objects.equals(value, that.value)
+ && Objects.equals(description, that.description)
+ && Objects.equals(details, that.details);
}
@Override
public int hashCode() {
return Objects.hash(match, value, description, details);
}
-
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/FieldComparator.java b/lucene/core/src/java/org/apache/lucene/search/FieldComparator.java
index 9534998..90db5ac 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FieldComparator.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FieldComparator.java
@@ -16,10 +16,7 @@
*/
package org.apache.lucene.search;
-
-
import java.io.IOException;
-
import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.LeafReaderContext;
@@ -28,46 +25,32 @@ import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
/**
- * Expert: a FieldComparator compares hits so as to determine their
- * sort order when collecting the top results with {@link
- * TopFieldCollector}. The concrete public FieldComparator
- * classes here correspond to the SortField types.
+ * Expert: a FieldComparator compares hits so as to determine their sort order when collecting the
+ * top results with {@link TopFieldCollector}. The concrete public FieldComparator classes here
+ * correspond to the SortField types.
*
- * <p>The document IDs passed to these methods must only
- * move forwards, since they are using doc values iterators
- * to retrieve sort values.</p>
+ * <p>The document IDs passed to these methods must only move forwards, since they are using doc
+ * values iterators to retrieve sort values.
*
- * <p>This API is designed to achieve high performance
- * sorting, by exposing a tight interaction with {@link
- * FieldValueHitQueue} as it visits hits. Whenever a hit is
- * competitive, it's enrolled into a virtual slot, which is
- * an int ranging from 0 to numHits-1. Segment transitions are
- * handled by creating a dedicated per-segment
- * {@link LeafFieldComparator} which also needs to interact
- * with the {@link FieldValueHitQueue} but can optimize based
- * on the segment to collect.</p>
- *
- * <p>The following functions need to be implemented</p>
- * <ul>
- * <li> {@link #compare} Compare a hit at 'slot a'
- * with hit 'slot b'.
- *
- * <li> {@link #setTopValue} This method is called by
- * {@link TopFieldCollector} to notify the
- * FieldComparator of the top most value, which is
- * used by future calls to
- * {@link LeafFieldComparator#compareTop}.
- *
- * <li> {@link #getLeafComparator(org.apache.lucene.index.LeafReaderContext)} Invoked
- * when the search is switching to the next segment.
- * You may need to update internal state of the
- * comparator, for example retrieving new values from
- * DocValues.
+ * <p>This API is designed to achieve high performance sorting, by exposing a tight interaction with
+ * {@link FieldValueHitQueue} as it visits hits. Whenever a hit is competitive, it's enrolled into a
+ * virtual slot, which is an int ranging from 0 to numHits-1. Segment transitions are handled by
+ * creating a dedicated per-segment {@link LeafFieldComparator} which also needs to interact with
+ * the {@link FieldValueHitQueue} but can optimize based on the segment to collect.
*
- * <li> {@link #value} Return the sort value stored in
- * the specified slot. This is only called at the end
- * of the search, in order to populate {@link
- * FieldDoc#fields} when returning the top results.
+ * <p>The following functions need to be implemented
+ *
+ * <ul>
+ * <li>{@link #compare} Compare a hit at 'slot a' with hit 'slot b'.
+ * <li>{@link #setTopValue} This method is called by {@link TopFieldCollector} to notify the
+ * FieldComparator of the top most value, which is used by future calls to {@link
+ * LeafFieldComparator#compareTop}.
+ * <li>{@link #getLeafComparator(org.apache.lucene.index.LeafReaderContext)} Invoked when the
+ * search is switching to the next segment. You may need to update internal state of the
+ * comparator, for example retrieving new values from DocValues.
+ * <li>{@link #value} Return the sort value stored in the specified slot. This is only called at
+ * the end of the search, in order to populate {@link FieldDoc#fields} when returning the top
+ * results.
* </ul>
*
* @see LeafFieldComparator
@@ -77,20 +60,18 @@ public abstract class FieldComparator<T> {
/**
* Compare hit at slot1 with hit at slot2.
- *
+ *
* @param slot1 first slot to compare
* @param slot2 second slot to compare
- * @return any {@code N < 0} if slot2's value is sorted after
- * slot1, any {@code N > 0} if the slot2's value is sorted before
- * slot1 and {@code 0} if they are equal
+ * @return any {@code N < 0} if slot2's value is sorted after slot1, any {@code N > 0} if the
+ * slot2's value is sorted before slot1 and {@code 0} if they are equal
*/
public abstract int compare(int slot1, int slot2);
/**
- * Record the top value, for future calls to {@link
- * LeafFieldComparator#compareTop}. This is only called for searches that
- * use searchAfter (deep paging), and is called before any
- * calls to {@link #getLeafComparator(LeafReaderContext)}.
+ * Record the top value, for future calls to {@link LeafFieldComparator#compareTop}. This is only
+ * called for searches that use searchAfter (deep paging), and is called before any calls to
+ * {@link #getLeafComparator(LeafReaderContext)}.
*/
public abstract void setTopValue(T value);
@@ -103,23 +84,24 @@ public abstract class FieldComparator<T> {
public abstract T value(int slot);
/**
- * Get a per-segment {@link LeafFieldComparator} to collect the given
- * {@link org.apache.lucene.index.LeafReaderContext}. All docIDs supplied to
- * this {@link LeafFieldComparator} are relative to the current reader (you
- * must add docBase if you need to map it to a top-level docID).
- *
+ * Get a per-segment {@link LeafFieldComparator} to collect the given {@link
+ * org.apache.lucene.index.LeafReaderContext}. All docIDs supplied to this {@link
+ * LeafFieldComparator} are relative to the current reader (you must add docBase if you need to
+ * map it to a top-level docID).
+ *
* @param context current reader context
* @return the comparator to use for this segment
* @throws IOException if there is a low-level IO error
*/
- public abstract LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException;
-
- /** Returns a negative integer if first is less than second,
- * 0 if they are equal and a positive integer otherwise. Default
- * impl to assume the type implements Comparable and
- * invoke .compareTo; be sure to override this method if
- * your FieldComparator's type isn't a Comparable or
- * if your values may sometimes be null */
+ public abstract LeafFieldComparator getLeafComparator(LeafReaderContext context)
+ throws IOException;
+
+ /**
+ * Returns a negative integer if first is less than second, 0 if they are equal and a positive
+ * integer otherwise. Default impl to assume the type implements Comparable and invoke .compareTo;
+ * be sure to override this method if your FieldComparator's type isn't a Comparable or if your
+ * values may sometimes be null
+ */
@SuppressWarnings("unchecked")
public int compareValues(T first, T second) {
if (first == null) {
@@ -136,30 +118,27 @@ public abstract class FieldComparator<T> {
}
/**
- * Informs the comparator that sort is done on this single field.
- * This is useful to enable some optimizations for skipping non-competitive documents.
+ * Informs the comparator that sort is done on this single field. This is useful to enable some
+ * optimizations for skipping non-competitive documents.
*/
- public void setSingleSort() {
- }
+ public void setSingleSort() {}
/**
- * Informs the comparator that the skipping of documents should be disabled.
- * This function is called by TopFieldCollector in cases when the skipping functionality
- * should not be applied or not necessary. An example could be when
- * search sort is a part of the index sort, and can be already efficiently
- * handled by TopFieldCollector, and doing extra work for skipping in the comparator
- * is redundant.
+ * Informs the comparator that the skipping of documents should be disabled. This function is
+ * called by TopFieldCollector in cases when the skipping functionality should not be applied or
+ * not necessary. An example could be when search sort is a part of the index sort, and can be
+ * already efficiently handled by TopFieldCollector, and doing extra work for skipping in the
+ * comparator is redundant.
*/
- public void disableSkipping() {
- }
+ public void disableSkipping() {}
- /** Sorts by descending relevance. NOTE: if you are
- * sorting only by descending relevance and then
- * secondarily by ascending docID, performance is faster
- * using {@link TopScoreDocCollector} directly (which {@link
- * IndexSearcher#search} uses when no {@link Sort} is
- * specified). */
- public static final class RelevanceComparator extends FieldComparator<Float> implements LeafFieldComparator {
+ /**
+ * Sorts by descending relevance. NOTE: if you are sorting only by descending relevance and then
+ * secondarily by ascending docID, performance is faster using {@link TopScoreDocCollector}
+ * directly (which {@link IndexSearcher#search} uses when no {@link Sort} is specified).
+ */
+ public static final class RelevanceComparator extends FieldComparator<Float>
+ implements LeafFieldComparator {
private final float[] scores;
private float bottom;
private Scorable scorer;
@@ -192,7 +171,7 @@ public abstract class FieldComparator<T> {
public LeafFieldComparator getLeafComparator(LeafReaderContext context) {
return this;
}
-
+
@Override
public void setBottom(final int bottom) {
this.bottom = scores[bottom];
@@ -214,7 +193,7 @@ public abstract class FieldComparator<T> {
this.scorer = scorer;
}
}
-
+
@Override
public Float value(int slot) {
return Float.valueOf(scores[slot]);
@@ -235,71 +214,72 @@ public abstract class FieldComparator<T> {
return Float.compare(docValue, topValue);
}
}
-
- /** Sorts by field's natural Term sort order, using
- * ordinals. This is functionally equivalent to {@link
- * org.apache.lucene.search.FieldComparator.TermValComparator}, but it first resolves the string
- * to their relative ordinal positions (using the index
- * returned by {@link org.apache.lucene.index.LeafReader#getSortedDocValues(String)}), and
- * does most comparisons using the ordinals. For medium
- * to large results, this comparator will be much faster
- * than {@link org.apache.lucene.search.FieldComparator.TermValComparator}. For very small
- * result sets it may be slower. */
- public static class TermOrdValComparator extends FieldComparator<BytesRef> implements LeafFieldComparator {
+
+ /**
+ * Sorts by field's natural Term sort order, using ordinals. This is functionally equivalent to
+ * {@link org.apache.lucene.search.FieldComparator.TermValComparator}, but it first resolves the
+ * string to their relative ordinal positions (using the index returned by {@link
+ * org.apache.lucene.index.LeafReader#getSortedDocValues(String)}), and does most comparisons
+ * using the ordinals. For medium to large results, this comparator will be much faster than
+ * {@link org.apache.lucene.search.FieldComparator.TermValComparator}. For very small result sets
+ * it may be slower.
+ */
+ public static class TermOrdValComparator extends FieldComparator<BytesRef>
+ implements LeafFieldComparator {
/* Ords for each slot.
- @lucene.internal */
+ @lucene.internal */
final int[] ords;
/* Values for each slot.
- @lucene.internal */
+ @lucene.internal */
final BytesRef[] values;
private final BytesRefBuilder[] tempBRs;
/* Which reader last copied a value into the slot. When
- we compare two slots, we just compare-by-ord if the
- readerGen is the same; else we must compare the
- values (slower).
- @lucene.internal */
+ we compare two slots, we just compare-by-ord if the
+ readerGen is the same; else we must compare the
+ values (slower).
+ @lucene.internal */
final int[] readerGen;
/* Gen of current reader we are on.
- @lucene.internal */
+ @lucene.internal */
int currentReaderGen = -1;
/* Current reader's doc ord/values.
- @lucene.internal */
+ @lucene.internal */
SortedDocValues termsIndex;
private final String field;
/* Bottom slot, or -1 if queue isn't full yet
- @lucene.internal */
+ @lucene.internal */
int bottomSlot = -1;
/* Bottom ord (same as ords[bottomSlot] once bottomSlot
- is set). Cached for faster compares.
- @lucene.internal */
+ is set). Cached for faster compares.
+ @lucene.internal */
int bottomOrd;
/* True if current bottom slot matches the current
- reader.
- @lucene.internal */
+ reader.
+ @lucene.internal */
boolean bottomSameReader;
/* Bottom value (same as values[bottomSlot] once
- bottomSlot is set). Cached for faster compares.
- @lucene.internal */
+ bottomSlot is set). Cached for faster compares.
+ @lucene.internal */
BytesRef bottomValue;
/** Set by setTopValue. */
BytesRef topValue;
+
boolean topSameReader;
int topOrd;
- /** -1 if missing values are sorted first, 1 if they are
- * sorted last */
+ /** -1 if missing values are sorted first, 1 if they are sorted last */
final int missingSortCmp;
-
+
/** Which ordinal to use for a missing value. */
final int missingOrd;
@@ -308,9 +288,10 @@ public abstract class FieldComparator<T> {
this(numHits, field, false);
}
- /** Creates this, with control over how missing values
- * are sorted. Pass sortMissingLast=true to put
- * missing values at the end. */
+ /**
+ * Creates this, with control over how missing values are sorted. Pass sortMissingLast=true to
+ * put missing values at the end.
+ */
public TermOrdValComparator(int numHits, String field, boolean sortMissingLast) {
ords = new int[numHits];
values = new BytesRef[numHits];
@@ -390,12 +371,13 @@ public abstract class FieldComparator<T> {
ords[slot] = ord;
readerGen[slot] = currentReaderGen;
}
-
+
/** Retrieves the SortedDocValues for the field in this segment */
- protected SortedDocValues getSortedDocValues(LeafReaderContext context, String field) throws IOException {
+ protected SortedDocValues getSortedDocValues(LeafReaderContext context, String field)
+ throws IOException {
return DocValues.getSorted(context.reader(), field);
}
-
+
@Override
public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException {
termsIndex = getSortedDocValues(context, field);
@@ -409,13 +391,14 @@ public abstract class FieldComparator<T> {
topOrd = ord;
} else {
topSameReader = false;
- topOrd = -ord-2;
+ topOrd = -ord - 2;
}
} else {
topOrd = missingOrd;
topSameReader = true;
}
- //System.out.println(" getLeafComparator topOrd=" + topOrd + " topSameReader=" + topSameReader);
+ // System.out.println(" getLeafComparator topOrd=" + topOrd + " topSameReader=" +
+ // topSameReader);
if (bottomSlot != -1) {
// Recompute bottomOrd/SameReader
@@ -424,7 +407,7 @@ public abstract class FieldComparator<T> {
return this;
}
-
+
@Override
public void setBottom(final int bottom) throws IOException {
bottomSlot = bottom;
@@ -449,7 +432,7 @@ public abstract class FieldComparator<T> {
bottomOrd = ord;
// exact value match
bottomSameReader = true;
- readerGen[bottomSlot] = currentReaderGen;
+ readerGen[bottomSlot] = currentReaderGen;
ords[bottomSlot] = bottomOrd;
}
}
@@ -461,7 +444,7 @@ public abstract class FieldComparator<T> {
// null is fine: it means the last doc of the prior
// search was missing this value
topValue = value;
- //System.out.println("setTopValue " + topValue);
+ // System.out.println("setTopValue " + topValue);
}
@Override
@@ -480,7 +463,7 @@ public abstract class FieldComparator<T> {
if (topSameReader) {
// ord is precisely comparable, even in the equal
// case
- //System.out.println("compareTop doc=" + doc + " ord=" + ord + " ret=" + (topOrd-ord));
+ // System.out.println("compareTop doc=" + doc + " ord=" + ord + " ret=" + (topOrd-ord));
return topOrd - ord;
} else if (ord <= topOrd) {
// the equals case always means doc is < value
@@ -507,13 +490,15 @@ public abstract class FieldComparator<T> {
@Override
public void setScorer(Scorable scorer) {}
}
-
- /** Sorts by field's natural Term sort order. All
- * comparisons are done using BytesRef.compareTo, which is
- * slow for medium to large result sets but possibly
- * very fast for very small results sets. */
- public static class TermValComparator extends FieldComparator<BytesRef> implements LeafFieldComparator {
-
+
+ /**
+ * Sorts by field's natural Term sort order. All comparisons are done using BytesRef.compareTo,
+ * which is slow for medium to large result sets but possibly very fast for very small results
+ * sets.
+ */
+ public static class TermValComparator extends FieldComparator<BytesRef>
+ implements LeafFieldComparator {
+
private final BytesRef[] values;
private final BytesRefBuilder[] tempBRs;
private BinaryDocValues docTerms;
@@ -566,7 +551,8 @@ public abstract class FieldComparator<T> {
}
/** Retrieves the BinaryDocValues for the field in this segment */
- protected BinaryDocValues getBinaryDocValues(LeafReaderContext context, String field) throws IOException {
+ protected BinaryDocValues getBinaryDocValues(LeafReaderContext context, String field)
+ throws IOException {
return DocValues.getBinary(context.reader(), field);
}
@@ -575,7 +561,7 @@ public abstract class FieldComparator<T> {
docTerms = getBinaryDocValues(context, field);
return this;
}
-
+
@Override
public void setBottom(final int bottom) {
this.bottom = values[bottom];
diff --git a/lucene/core/src/java/org/apache/lucene/search/FieldComparatorSource.java b/lucene/core/src/java/org/apache/lucene/search/FieldComparatorSource.java
index e7db0ba..1539926 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FieldComparatorSource.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FieldComparatorSource.java
@@ -16,22 +16,19 @@
*/
package org.apache.lucene.search;
-
/**
* Provides a {@link FieldComparator} for custom field sorting.
*
* @lucene.experimental
- *
*/
public abstract class FieldComparatorSource {
/**
* Creates a comparator for the field in the given index.
- *
- * @param fieldname
- * Name of the field to create comparator for.
+ *
+ * @param fieldname Name of the field to create comparator for.
* @return FieldComparator.
*/
- public abstract FieldComparator<?> newComparator(String fieldname, int numHits, int sortPos, boolean reversed);
-
+ public abstract FieldComparator<?> newComparator(
+ String fieldname, int numHits, int sortPos, boolean reversed);
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/FieldDoc.java b/lucene/core/src/java/org/apache/lucene/search/FieldDoc.java
index 6125404..111db38 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FieldDoc.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FieldDoc.java
@@ -16,35 +16,30 @@
*/
package org.apache.lucene.search;
-
import java.util.Arrays;
/**
- * Expert: A ScoreDoc which also contains information about
- * how to sort the referenced document. In addition to the
- * document number and score, this object contains an array
- * of values for the document from the field(s) used to sort.
- * For example, if the sort criteria was to sort by fields
- * "a", "b" then "c", the <code>fields</code> object array
- * will have three elements, corresponding respectively to
- * the term values for the document in fields "a", "b" and "c".
- * The class of each element in the array will be either
- * Integer, Float or String depending on the type of values
- * in the terms of each field.
+ * Expert: A ScoreDoc which also contains information about how to sort the referenced document. In
+ * addition to the document number and score, this object contains an array of values for the
+ * document from the field(s) used to sort. For example, if the sort criteria was to sort by fields
+ * "a", "b" then "c", the <code>fields</code> object array will have three elements, corresponding
+ * respectively to the term values for the document in fields "a", "b" and "c". The class of each
+ * element in the array will be either Integer, Float or String depending on the type of values in
+ * the terms of each field.
*
* <p>Created: Feb 11, 2004 1:23:38 PM
*
- * @since lucene 1.4
+ * @since lucene 1.4
* @see ScoreDoc
* @see TopFieldDocs
*/
public class FieldDoc extends ScoreDoc {
- /** Expert: The values which are used to sort the referenced document.
- * The order of these will match the original sort criteria given by a
- * Sort object. Each Object will have been returned from
- * the <code>value</code> method corresponding
- * FieldComparator used to sort this field.
+ /**
+ * Expert: The values which are used to sort the referenced document. The order of these will
+ * match the original sort criteria given by a Sort object. Each Object will have been returned
+ * from the <code>value</code> method corresponding FieldComparator used to sort this field.
+ *
* @see Sort
* @see IndexSearcher#search(Query,int,Sort)
*/
@@ -60,13 +55,13 @@ public class FieldDoc extends ScoreDoc {
super(doc, score);
this.fields = fields;
}
-
+
/** Expert: Creates one of these objects with the given sort information. */
public FieldDoc(int doc, float score, Object[] fields, int shardIndex) {
super(doc, score, shardIndex);
this.fields = fields;
}
-
+
// A convenience method for debugging.
@Override
public String toString() {
diff --git a/lucene/core/src/java/org/apache/lucene/search/FieldValueHitQueue.java b/lucene/core/src/java/org/apache/lucene/search/FieldValueHitQueue.java
index a790636..b445403 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FieldValueHitQueue.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FieldValueHitQueue.java
@@ -16,25 +16,21 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
-
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.util.PriorityQueue;
/**
* Expert: A hit queue for sorting by hits by terms in more than one field.
- *
+ *
* @lucene.experimental
* @since 2.9
* @see IndexSearcher#search(Query,int,Sort)
*/
-public abstract class FieldValueHitQueue<T extends FieldValueHitQueue.Entry> extends PriorityQueue<T> {
+public abstract class FieldValueHitQueue<T extends FieldValueHitQueue.Entry>
+ extends PriorityQueue<T> {
- /**
- * Extension of ScoreDoc to also store the
- * {@link FieldComparator} slot.
- */
+ /** Extension of ScoreDoc to also store the {@link FieldComparator} slot. */
public static class Entry extends ScoreDoc {
public int slot;
@@ -42,7 +38,7 @@ public abstract class FieldValueHitQueue<T extends FieldValueHitQueue.Entry> ext
super(doc, Float.NaN);
this.slot = slot;
}
-
+
@Override
public String toString() {
return "slot:" + slot + " " + super.toString();
@@ -50,14 +46,15 @@ public abstract class FieldValueHitQueue<T extends FieldValueHitQueue.Entry> ext
}
/**
- * An implementation of {@link FieldValueHitQueue} which is optimized in case
- * there is just one comparator.
+ * An implementation of {@link FieldValueHitQueue} which is optimized in case there is just one
+ * comparator.
*/
- private static final class OneComparatorFieldValueHitQueue<T extends FieldValueHitQueue.Entry> extends FieldValueHitQueue<T> {
-
+ private static final class OneComparatorFieldValueHitQueue<T extends FieldValueHitQueue.Entry>
+ extends FieldValueHitQueue<T> {
+
private final int oneReverseMul;
private final FieldComparator<?> oneComparator;
-
+
public OneComparatorFieldValueHitQueue(SortField[] fields, int size) {
super(fields, size);
@@ -68,9 +65,11 @@ public abstract class FieldValueHitQueue<T extends FieldValueHitQueue.Entry> ext
/**
* Returns whether <code>hitA</code> is less relevant than <code>hitB</code>.
+ *
* @param hitA Entry
* @param hitB Entry
- * @return <code>true</code> if document <code>hitA</code> should be sorted after document <code>hitB</code>.
+ * @return <code>true</code> if document <code>hitA</code> should be sorted after document
+ * <code>hitB</code>.
*/
@Override
protected boolean lessThan(final Entry hitA, final Entry hitB) {
@@ -86,19 +85,19 @@ public abstract class FieldValueHitQueue<T extends FieldValueHitQueue.Entry> ext
// avoid random sort order that could lead to duplicates (bug #31241):
return hitA.doc > hitB.doc;
}
-
}
-
+
/**
- * An implementation of {@link FieldValueHitQueue} which is optimized in case
- * there is more than one comparator.
+ * An implementation of {@link FieldValueHitQueue} which is optimized in case there is more than
+ * one comparator.
*/
- private static final class MultiComparatorsFieldValueHitQueue<T extends FieldValueHitQueue.Entry> extends FieldValueHitQueue<T> {
+ private static final class MultiComparatorsFieldValueHitQueue<T extends FieldValueHitQueue.Entry>
+ extends FieldValueHitQueue<T> {
public MultiComparatorsFieldValueHitQueue(SortField[] fields, int size) {
super(fields, size);
}
-
+
@Override
protected boolean lessThan(final Entry hitA, final Entry hitB) {
@@ -117,7 +116,6 @@ public abstract class FieldValueHitQueue<T extends FieldValueHitQueue.Entry> ext
// avoid random sort order that could lead to duplicates (bug #31241):
return hitA.doc > hitB.doc;
}
-
}
// prevent instantiation and extension.
@@ -125,7 +123,7 @@ public abstract class FieldValueHitQueue<T extends FieldValueHitQueue.Entry> ext
super(size);
// When we get here, fields.length is guaranteed to be > 0, therefore no
// need to check it again.
-
+
// All these are required by this class's API - need to return arrays.
// Therefore even in the case of a single comparator, create an array
// anyway.
@@ -147,17 +145,16 @@ public abstract class FieldValueHitQueue<T extends FieldValueHitQueue.Entry> ext
/**
* Creates a hit queue sorted by the given list of fields.
- *
- * <p><b>NOTE</b>: The instances returned by this method
- * pre-allocate a full array of length <code>numHits</code>.
- *
- * @param fields
- * SortField array we are sorting by in priority order (highest
- * priority first); cannot be <code>null</code> or empty
- * @param size
- * The number of hits to retain. Must be greater than zero.
+ *
+ * <p><b>NOTE</b>: The instances returned by this method pre-allocate a full array of length
+ * <code>numHits</code>.
+ *
+ * @param fields SortField array we are sorting by in priority order (highest priority first);
+ * cannot be <code>null</code> or empty
+ * @param size The number of hits to retain. Must be greater than zero.
*/
- public static <T extends FieldValueHitQueue.Entry> FieldValueHitQueue<T> create(SortField[] fields, int size) {
+ public static <T extends FieldValueHitQueue.Entry> FieldValueHitQueue<T> create(
+ SortField[] fields, int size) {
if (fields.length == 0) {
throw new IllegalArgumentException("Sort must contain at least one field");
@@ -169,7 +166,7 @@ public abstract class FieldValueHitQueue<T extends FieldValueHitQueue.Entry> ext
return new MultiComparatorsFieldValueHitQueue<>(fields, size);
}
}
-
+
public FieldComparator<?>[] getComparators() {
return comparators;
}
@@ -188,19 +185,19 @@ public abstract class FieldValueHitQueue<T extends FieldValueHitQueue.Entry> ext
/** Stores the sort criteria being used. */
protected final SortField[] fields;
+
protected final FieldComparator<?>[] comparators;
protected final int[] reverseMul;
@Override
- protected abstract boolean lessThan (final Entry a, final Entry b);
+ protected abstract boolean lessThan(final Entry a, final Entry b);
/**
- * Given a queue Entry, creates a corresponding FieldDoc
- * that contains the values used to sort the given document.
- * These values are not the raw values out of the index, but the internal
- * representation of them. This is so the given search hit can be collated by
- * a MultiSearcher with other search hits.
- *
+ * Given a queue Entry, creates a corresponding FieldDoc that contains the values used to sort the
+ * given document. These values are not the raw values out of the index, but the internal
+ * representation of them. This is so the given search hit can be collated by a MultiSearcher with
+ * other search hits.
+ *
* @param entry The Entry used to create a FieldDoc
* @return The newly created FieldDoc
* @see IndexSearcher#search(Query,int,Sort)
@@ -211,7 +208,7 @@ public abstract class FieldValueHitQueue<T extends FieldValueHitQueue.Entry> ext
for (int i = 0; i < n; ++i) {
fields[i] = comparators[i].value(entry.slot);
}
- //if (maxscore > 1.0f) doc.score /= maxscore; // normalize scores
+ // if (maxscore > 1.0f) doc.score /= maxscore; // normalize scores
return new FieldDoc(entry.doc, entry.score, fields);
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/FilterCollector.java b/lucene/core/src/java/org/apache/lucene/search/FilterCollector.java
index f7ff0ce..9f57a57 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FilterCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FilterCollector.java
@@ -17,10 +17,8 @@
package org.apache.lucene.search;
import java.io.IOException;
-
import org.apache.lucene.index.LeafReaderContext;
-
/**
* {@link Collector} delegator.
*
diff --git a/lucene/core/src/java/org/apache/lucene/search/FilterLeafCollector.java b/lucene/core/src/java/org/apache/lucene/search/FilterLeafCollector.java
index 5adecb9..2473366 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FilterLeafCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FilterLeafCollector.java
@@ -16,7 +16,6 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
/**
@@ -52,5 +51,4 @@ public abstract class FilterLeafCollector implements LeafCollector {
}
return name + "(" + in + ")";
}
-
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/FilterMatchesIterator.java b/lucene/core/src/java/org/apache/lucene/search/FilterMatchesIterator.java
index 214cf61..3392fdf 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FilterMatchesIterator.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FilterMatchesIterator.java
@@ -19,18 +19,15 @@ package org.apache.lucene.search;
import java.io.IOException;
-/**
- * A MatchesIterator that delegates all calls to another MatchesIterator
- */
+/** A MatchesIterator that delegates all calls to another MatchesIterator */
public abstract class FilterMatchesIterator implements MatchesIterator {
- /**
- * The delegate
- */
+ /** The delegate */
protected final MatchesIterator in;
/**
* Create a new FilterMatchesIterator
+ *
* @param in the delegate
*/
protected FilterMatchesIterator(MatchesIterator in) {
diff --git a/lucene/core/src/java/org/apache/lucene/search/FilterScorable.java b/lucene/core/src/java/org/apache/lucene/search/FilterScorable.java
index c2e68c1..d9e0e29 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FilterScorable.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FilterScorable.java
@@ -22,12 +22,10 @@ import java.util.Collection;
import java.util.Collections;
/**
- * Filter a {@link Scorable}, intercepting methods and optionally changing
- * their return values
+ * Filter a {@link Scorable}, intercepting methods and optionally changing their return values
*
- * The default implementation simply passes all calls to its delegate, with
- * the exception of {@link #setMinCompetitiveScore(float)} which defaults
- * to a no-op.
+ * <p>The default implementation simply passes all calls to its delegate, with the exception of
+ * {@link #setMinCompetitiveScore(float)} which defaults to a no-op.
*/
public class FilterScorable extends Scorable {
@@ -35,7 +33,8 @@ public class FilterScorable extends Scorable {
/**
* Filter a scorer
- * @param in the scorer to filter
+ *
+ * @param in the scorer to filter
*/
public FilterScorable(Scorable in) {
this.in = in;
diff --git a/lucene/core/src/java/org/apache/lucene/search/FilterScorer.java b/lucene/core/src/java/org/apache/lucene/search/FilterScorer.java
index 7bcb1ce..5d27cc9 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FilterScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FilterScorer.java
@@ -16,24 +16,21 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
-/**
- * A {@code FilterScorer} contains another {@code Scorer}, which it
- * uses as its basic source of data, possibly transforming the data along the
- * way or providing additional functionality. The class
- * {@code FilterScorer} itself simply implements all abstract methods
- * of {@code Scorer} with versions that pass all requests to the
- * contained scorer. Subclasses of {@code FilterScorer} may
- * further override some of these methods and may also provide additional
- * methods and fields.
+/**
+ * A {@code FilterScorer} contains another {@code Scorer}, which it uses as its basic source of
+ * data, possibly transforming the data along the way or providing additional functionality. The
+ * class {@code FilterScorer} itself simply implements all abstract methods of {@code Scorer} with
+ * versions that pass all requests to the contained scorer. Subclasses of {@code FilterScorer} may
+ * further override some of these methods and may also provide additional methods and fields.
*/
public abstract class FilterScorer extends Scorer {
protected final Scorer in;
/**
* Create a new FilterScorer
+ *
* @param in the {@link Scorer} to wrap
*/
public FilterScorer(Scorer in) {
@@ -43,6 +40,7 @@ public abstract class FilterScorer extends Scorer {
/**
* Create a new FilterScorer with a specific weight
+ *
* @param in the {@link Scorer} to wrap
* @param weight a {@link Weight}
*/
@@ -53,7 +51,7 @@ public abstract class FilterScorer extends Scorer {
}
this.in = in;
}
-
+
@Override
public float score() throws IOException {
return in.score();
@@ -71,7 +69,7 @@ public abstract class FilterScorer extends Scorer {
public final DocIdSetIterator iterator() {
return in.iterator();
}
-
+
@Override
public final TwoPhaseIterator twoPhaseIterator() {
return in.twoPhaseIterator();
diff --git a/lucene/core/src/java/org/apache/lucene/search/FilterWeight.java b/lucene/core/src/java/org/apache/lucene/search/FilterWeight.java
index 5292e78..e9f2bbe 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FilterWeight.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FilterWeight.java
@@ -17,36 +17,31 @@
package org.apache.lucene.search;
import java.io.IOException;
-
import org.apache.lucene.index.LeafReaderContext;
/**
- * A {@code FilterWeight} contains another {@code Weight} and implements
- * all abstract methods by calling the contained weight's method.
+ * A {@code FilterWeight} contains another {@code Weight} and implements all abstract methods by
+ * calling the contained weight's method.
*
- * Note that {@code FilterWeight} does not override the non-abstract
- * {@link Weight#bulkScorer(LeafReaderContext)} method and subclasses of
- * {@code FilterWeight} must provide their bulkScorer implementation
- * if required.
+ * <p>Note that {@code FilterWeight} does not override the non-abstract {@link
+ * Weight#bulkScorer(LeafReaderContext)} method and subclasses of {@code FilterWeight} must provide
+ * their bulkScorer implementation if required.
*
* @lucene.internal
*/
public abstract class FilterWeight extends Weight {
- final protected Weight in;
+ protected final Weight in;
- /**
- * Default constructor.
- */
+ /** Default constructor. */
protected FilterWeight(Weight weight) {
this(weight.getQuery(), weight);
}
/**
- * Alternative constructor.
- * Use this variant only if the <code>weight</code> was not obtained
- * via the {@link Query#createWeight(IndexSearcher, ScoreMode, float)}
- * method of the <code>query</code> object.
+ * Alternative constructor. Use this variant only if the <code>weight</code> was not obtained via
+ * the {@link Query#createWeight(IndexSearcher, ScoreMode, float)} method of the <code>query
+ * </code> object.
*/
protected FilterWeight(Query query, Weight weight) {
super(query);
diff --git a/lucene/core/src/java/org/apache/lucene/search/FilteredDocIdSetIterator.java b/lucene/core/src/java/org/apache/lucene/search/FilteredDocIdSetIterator.java
index bd88a08..723b0a2 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FilteredDocIdSetIterator.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FilteredDocIdSetIterator.java
@@ -16,13 +16,11 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
/**
- * Abstract decorator class of a DocIdSetIterator
- * implementation that provides on-demand filter/validation
- * mechanism on an underlying DocIdSetIterator.
+ * Abstract decorator class of a DocIdSetIterator implementation that provides on-demand
+ * filter/validation mechanism on an underlying DocIdSetIterator.
*/
public abstract class FilteredDocIdSetIterator extends DocIdSetIterator {
protected DocIdSetIterator _innerIter;
@@ -30,6 +28,7 @@ public abstract class FilteredDocIdSetIterator extends DocIdSetIterator {
/**
* Constructor.
+ *
* @param innerIter Underlying DocIdSetIterator.
*/
public FilteredDocIdSetIterator(DocIdSetIterator innerIter) {
@@ -47,6 +46,7 @@ public abstract class FilteredDocIdSetIterator extends DocIdSetIterator {
/**
* Validation method to determine whether a docid should be in the result set.
+ *
* @param doc docid to be tested
* @return true if input docid should be in the result set, false otherwise.
* @see #FilteredDocIdSetIterator(DocIdSetIterator)
@@ -57,7 +57,7 @@ public abstract class FilteredDocIdSetIterator extends DocIdSetIterator {
public int docID() {
return doc;
}
-
+
@Override
public int nextDoc() throws IOException {
while ((doc = _innerIter.nextDoc()) != NO_MORE_DOCS) {
@@ -67,7 +67,7 @@ public abstract class FilteredDocIdSetIterator extends DocIdSetIterator {
}
return doc;
}
-
+
@Override
public int advance(int target) throws IOException {
doc = _innerIter.advance(target);
diff --git a/lucene/core/src/java/org/apache/lucene/search/FuzzyAutomatonBuilder.java b/lucene/core/src/java/org/apache/lucene/search/FuzzyAutomatonBuilder.java
index 42e4b07..1fedbf7 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FuzzyAutomatonBuilder.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FuzzyAutomatonBuilder.java
@@ -23,9 +23,8 @@ import org.apache.lucene.util.automaton.LevenshteinAutomata;
import org.apache.lucene.util.automaton.TooComplexToDeterminizeException;
/**
- * Builds a set of CompiledAutomaton for fuzzy matching on a given term,
- * with specified maximum edit distance, fixed prefix and whether or not
- * to allow transpositions.
+ * Builds a set of CompiledAutomaton for fuzzy matching on a given term, with specified maximum edit
+ * distance, fixed prefix and whether or not to allow transpositions.
*/
class FuzzyAutomatonBuilder {
@@ -37,7 +36,11 @@ class FuzzyAutomatonBuilder {
FuzzyAutomatonBuilder(String term, int maxEdits, int prefixLength, boolean transpositions) {
if (maxEdits < 0 || maxEdits > LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE) {
- throw new IllegalArgumentException("max edits must be 0.." + LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE + ", inclusive; got: " + maxEdits);
+ throw new IllegalArgumentException(
+ "max edits must be 0.."
+ + LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE
+ + ", inclusive; got: "
+ + maxEdits);
}
if (prefixLength < 0) {
throw new IllegalArgumentException("prefixLength cannot be less than 0");
@@ -58,8 +61,7 @@ class FuzzyAutomatonBuilder {
for (int i = 0; i <= maxEdits; i++) {
try {
compiled[i] = new CompiledAutomaton(levBuilder.toAutomaton(i, prefix), true, false);
- }
- catch (TooComplexToDeterminizeException e) {
+ } catch (TooComplexToDeterminizeException e) {
throw new FuzzyTermsEnum.FuzzyTermsException(term, e);
}
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/FuzzyQuery.java b/lucene/core/src/java/org/apache/lucene/search/FuzzyQuery.java
index e1359ab..f0e9e28 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FuzzyQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FuzzyQuery.java
@@ -16,10 +16,8 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
import java.util.Objects;
-
import org.apache.lucene.index.SingleTermsEnum;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.Terms;
@@ -28,64 +26,64 @@ import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.automaton.CompiledAutomaton;
import org.apache.lucene.util.automaton.LevenshteinAutomata;
-/** Implements the fuzzy search query. The similarity measurement
- * is based on the Damerau-Levenshtein (optimal string alignment) algorithm,
- * though you can explicitly choose classic Levenshtein by passing <code>false</code>
- * to the <code>transpositions</code> parameter.
- *
- * <p>This query uses {@link MultiTermQuery.TopTermsBlendedFreqScoringRewrite}
- * as default. So terms will be collected and scored according to their
- * edit distance. Only the top terms are used for building the {@link BooleanQuery}.
- * It is not recommended to change the rewrite mode for fuzzy queries.
- *
- * <p>At most, this query will match terms up to
- * {@value org.apache.lucene.util.automaton.LevenshteinAutomata#MAXIMUM_SUPPORTED_DISTANCE} edits.
- * Higher distances (especially with transpositions enabled), are generally not useful and
- * will match a significant amount of the term dictionary. If you really want this, consider
- * using an n-gram indexing technique (such as the SpellChecker in the
- * <a href="{@docRoot}/../suggest/overview-summary.html">suggest module</a>) instead.
+/**
+ * Implements the fuzzy search query. The similarity measurement is based on the Damerau-Levenshtein
+ * (optimal string alignment) algorithm, though you can explicitly choose classic Levenshtein by
+ * passing <code>false</code> to the <code>transpositions</code> parameter.
+ *
+ * <p>This query uses {@link MultiTermQuery.TopTermsBlendedFreqScoringRewrite} as default. So terms
+ * will be collected and scored according to their edit distance. Only the top terms are used for
+ * building the {@link BooleanQuery}. It is not recommended to change the rewrite mode for fuzzy
+ * queries.
+ *
+ * <p>At most, this query will match terms up to {@value
+ * org.apache.lucene.util.automaton.LevenshteinAutomata#MAXIMUM_SUPPORTED_DISTANCE} edits. Higher
+ * distances (especially with transpositions enabled), are generally not useful and will match a
+ * significant amount of the term dictionary. If you really want this, consider using an n-gram
+ * indexing technique (such as the SpellChecker in the <a
+ * href="{@docRoot}/../suggest/overview-summary.html">suggest module</a>) instead.
*
- * <p>NOTE: terms of length 1 or 2 will sometimes not match because of how the scaled
- * distance between two terms is computed. For a term to match, the edit distance between
- * the terms must be less than the minimum length term (either the input term, or
- * the candidate term). For example, FuzzyQuery on term "abcd" with maxEdits=2 will
- * not match an indexed term "ab", and FuzzyQuery on term "a" with maxEdits=2 will not
- * match an indexed term "abc".
+ * <p>NOTE: terms of length 1 or 2 will sometimes not match because of how the scaled distance
+ * between two terms is computed. For a term to match, the edit distance between the terms must be
+ * less than the minimum length term (either the input term, or the candidate term). For example,
+ * FuzzyQuery on term "abcd" with maxEdits=2 will not match an indexed term "ab", and FuzzyQuery on
+ * term "a" with maxEdits=2 will not match an indexed term "abc".
*/
public class FuzzyQuery extends MultiTermQuery {
-
- public final static int defaultMaxEdits = LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE;
- public final static int defaultPrefixLength = 0;
- public final static int defaultMaxExpansions = 50;
- public final static boolean defaultTranspositions = true;
-
+
+ public static final int defaultMaxEdits = LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE;
+ public static final int defaultPrefixLength = 0;
+ public static final int defaultMaxExpansions = 50;
+ public static final boolean defaultTranspositions = true;
+
private final int maxEdits;
private final int maxExpansions;
private final boolean transpositions;
private final int prefixLength;
private final Term term;
-
+
/**
- * Create a new FuzzyQuery that will match terms with an edit distance
- * of at most <code>maxEdits</code> to <code>term</code>.
- * If a <code>prefixLength</code> > 0 is specified, a common prefix
- * of that length is also required.
- *
+ * Create a new FuzzyQuery that will match terms with an edit distance of at most <code>maxEdits
+ * </code> to <code>term</code>. If a <code>prefixLength</code> > 0 is specified, a common
+ * prefix of that length is also required.
+ *
* @param term the term to search for
- * @param maxEdits must be {@code >= 0} and {@code <=} {@link LevenshteinAutomata#MAXIMUM_SUPPORTED_DISTANCE}.
+ * @param maxEdits must be {@code >= 0} and {@code <=} {@link
+ * LevenshteinAutomata#MAXIMUM_SUPPORTED_DISTANCE}.
* @param prefixLength length of common (non-fuzzy) prefix
- * @param maxExpansions the maximum number of terms to match. If this number is
- * greater than {@link IndexSearcher#getMaxClauseCount} when the query is rewritten,
- * then the maxClauseCount will be used instead.
- * @param transpositions true if transpositions should be treated as a primitive
- * edit operation. If this is false, comparisons will implement the classic
- * Levenshtein algorithm.
+ * @param maxExpansions the maximum number of terms to match. If this number is greater than
+ * {@link IndexSearcher#getMaxClauseCount} when the query is rewritten, then the
+ * maxClauseCount will be used instead.
+ * @param transpositions true if transpositions should be treated as a primitive edit operation.
+ * If this is false, comparisons will implement the classic Levenshtein algorithm.
*/
- public FuzzyQuery(Term term, int maxEdits, int prefixLength, int maxExpansions, boolean transpositions) {
+ public FuzzyQuery(
+ Term term, int maxEdits, int prefixLength, int maxExpansions, boolean transpositions) {
super(term.field());
-
+
if (maxEdits < 0 || maxEdits > LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE) {
- throw new IllegalArgumentException("maxEdits must be between 0 and " + LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE);
+ throw new IllegalArgumentException(
+ "maxEdits must be between 0 and " + LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE);
}
if (prefixLength < 0) {
throw new IllegalArgumentException("prefixLength cannot be negative.");
@@ -93,7 +91,7 @@ public class FuzzyQuery extends MultiTermQuery {
if (maxExpansions <= 0) {
throw new IllegalArgumentException("maxExpansions must be positive.");
}
-
+
this.term = term;
this.maxEdits = maxEdits;
this.prefixLength = prefixLength;
@@ -101,58 +99,50 @@ public class FuzzyQuery extends MultiTermQuery {
this.maxExpansions = maxExpansions;
setRewriteMethod(new MultiTermQuery.TopTermsBlendedFreqScoringRewrite(maxExpansions));
}
-
+
/**
- * Calls {@link #FuzzyQuery(Term, int, int, int, boolean)
- * FuzzyQuery(term, maxEdits, prefixLength, defaultMaxExpansions, defaultTranspositions)}.
+ * Calls {@link #FuzzyQuery(Term, int, int, int, boolean) FuzzyQuery(term, maxEdits, prefixLength,
+ * defaultMaxExpansions, defaultTranspositions)}.
*/
public FuzzyQuery(Term term, int maxEdits, int prefixLength) {
this(term, maxEdits, prefixLength, defaultMaxExpansions, defaultTranspositions);
}
-
- /**
- * Calls {@link #FuzzyQuery(Term, int, int) FuzzyQuery(term, maxEdits, defaultPrefixLength)}.
- */
+
+ /** Calls {@link #FuzzyQuery(Term, int, int) FuzzyQuery(term, maxEdits, defaultPrefixLength)}. */
public FuzzyQuery(Term term, int maxEdits) {
this(term, maxEdits, defaultPrefixLength);
}
- /**
- * Calls {@link #FuzzyQuery(Term, int) FuzzyQuery(term, defaultMaxEdits)}.
- */
+ /** Calls {@link #FuzzyQuery(Term, int) FuzzyQuery(term, defaultMaxEdits)}. */
public FuzzyQuery(Term term) {
this(term, defaultMaxEdits);
}
-
- /**
- * @return the maximum number of edit distances allowed for this query to match.
- */
+
+ /** @return the maximum number of edit distances allowed for this query to match. */
public int getMaxEdits() {
return maxEdits;
}
-
+
/**
- * Returns the non-fuzzy prefix length. This is the number of characters at the start
- * of a term that must be identical (not fuzzy) to the query term if the query
- * is to match that term.
+ * Returns the non-fuzzy prefix length. This is the number of characters at the start of a term
+ * that must be identical (not fuzzy) to the query term if the query is to match that term.
*/
public int getPrefixLength() {
return prefixLength;
}
-
+
/**
- * Returns true if transpositions should be treated as a primitive edit operation.
- * If this is false, comparisons will implement the classic Levenshtein algorithm.
+ * Returns true if transpositions should be treated as a primitive edit operation. If this is
+ * false, comparisons will implement the classic Levenshtein algorithm.
*/
public boolean getTranspositions() {
return transpositions;
}
- /**
- * Returns the compiled automata used to match terms
- */
+ /** Returns the compiled automata used to match terms */
public CompiledAutomaton getAutomata() {
- FuzzyAutomatonBuilder builder = new FuzzyAutomatonBuilder(term.text(), maxEdits, prefixLength, transpositions);
+ FuzzyAutomatonBuilder builder =
+ new FuzzyAutomatonBuilder(term.text(), maxEdits, prefixLength, transpositions);
return builder.buildMaxEditAutomaton();
}
@@ -171,19 +161,17 @@ public class FuzzyQuery extends MultiTermQuery {
return new FuzzyTermsEnum(terms, atts, getTerm(), maxEdits, prefixLength, transpositions);
}
- /**
- * Returns the pattern term.
- */
+ /** Returns the pattern term. */
public Term getTerm() {
return term;
}
-
+
@Override
public String toString(String field) {
final StringBuilder buffer = new StringBuilder();
if (!term.field().equals(field)) {
- buffer.append(term.field());
- buffer.append(":");
+ buffer.append(term.field());
+ buffer.append(":");
}
buffer.append(term.text());
buffer.append('~');
@@ -205,22 +193,20 @@ public class FuzzyQuery extends MultiTermQuery {
@Override
public boolean equals(Object obj) {
- if (this == obj)
- return true;
- if (!super.equals(obj))
- return false;
- if (getClass() != obj.getClass())
- return false;
+ if (this == obj) return true;
+ if (!super.equals(obj)) return false;
+ if (getClass() != obj.getClass()) return false;
FuzzyQuery other = (FuzzyQuery) obj;
- return Objects.equals(maxEdits, other.maxEdits) && Objects.equals(prefixLength, other.prefixLength)
- && Objects.equals(maxExpansions, other.maxExpansions) && Objects.equals(transpositions, other.transpositions)
+ return Objects.equals(maxEdits, other.maxEdits)
+ && Objects.equals(prefixLength, other.prefixLength)
+ && Objects.equals(maxExpansions, other.maxExpansions)
+ && Objects.equals(transpositions, other.transpositions)
&& Objects.equals(term, other.term);
}
/**
- * Helper function to convert from "minimumSimilarity" fractions
- * to raw edit distances.
- *
+ * Helper function to convert from "minimumSimilarity" fractions to raw edit distances.
+ *
* @param minimumSimilarity scaled similarity
* @param termLen length (in unicode codepoints) of the term.
* @return equivalent number of maxEdits
@@ -231,9 +217,9 @@ public class FuzzyQuery extends MultiTermQuery {
} else if (minimumSimilarity == 0.0f) {
return 0; // 0 means exact, not infinite # of edits!
} else {
- return Math.min((int) ((1D-minimumSimilarity) * termLen),
- LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE);
+ return Math.min(
+ (int) ((1D - minimumSimilarity) * termLen),
+ LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE);
}
}
-
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/FuzzyTermsEnum.java b/lucene/core/src/java/org/apache/lucene/search/FuzzyTermsEnum.java
index 4c49d8a..cfd6ed2 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FuzzyTermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FuzzyTermsEnum.java
@@ -16,10 +16,8 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
import java.util.function.Supplier;
-
import org.apache.lucene.index.ImpactsEnum;
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Term;
@@ -35,12 +33,11 @@ import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util.automaton.CompiledAutomaton;
-/** Subclass of TermsEnum for enumerating all terms that are similar
- * to the specified filter term.
+/**
+ * Subclass of TermsEnum for enumerating all terms that are similar to the specified filter term.
*
- * <p>Term enumerations are always ordered by
- * {@link BytesRef#compareTo}. Each term in the enumeration is
- * greater than all that precede it.</p>
+ * <p>Term enumerations are always ordered by {@link BytesRef#compareTo}. Each term in the
+ * enumeration is greater than all that precede it.
*/
public final class FuzzyTermsEnum extends TermsEnum {
@@ -48,38 +45,42 @@ public final class FuzzyTermsEnum extends TermsEnum {
private TermsEnum actualEnum;
private final AttributeSource atts;
-
+
// We use this to communicate the score (boost) of the current matched term we are on back to
- // MultiTermQuery.TopTermsBlendedFreqScoringRewrite that is collecting the best (default 50) matched terms:
+ // MultiTermQuery.TopTermsBlendedFreqScoringRewrite that is collecting the best (default 50)
+ // matched terms:
private final BoostAttribute boostAtt;
- // MultiTermQuery.TopTermsBlendedFreqScoringRewrite tells us the worst boost still in its queue using this att,
- // which we use to know when we can reduce the automaton from ed=2 to ed=1, or ed=0 if only single top term is collected:
+ // MultiTermQuery.TopTermsBlendedFreqScoringRewrite tells us the worst boost still in its queue
+ // using this att,
+ // which we use to know when we can reduce the automaton from ed=2 to ed=1, or ed=0 if only single
+ // top term is collected:
private final MaxNonCompetitiveBoostAttribute maxBoostAtt;
private final CompiledAutomaton[] automata;
private final Terms terms;
private final int termLength;
private final Term term;
-
+
private float bottom;
private BytesRef bottomTerm;
private BytesRef queuedBottom;
-
- // Maximum number of edits we will accept. This is either 2 or 1 (or, degenerately, 0) passed by the user originally,
- // but as we collect terms, we can lower this (e.g. from 2 to 1) if we detect that the term queue is full, and all
+ // Maximum number of edits we will accept. This is either 2 or 1 (or, degenerately, 0) passed by
+ // the user originally,
+ // but as we collect terms, we can lower this (e.g. from 2 to 1) if we detect that the term queue
+ // is full, and all
// collected terms are ed=1:
private int maxEdits;
-
/**
- * Constructor for enumeration of all terms from specified <code>reader</code> which share a prefix of
- * length <code>prefixLength</code> with <code>term</code> and which have at most {@code maxEdits} edits.
- * <p>
- * After calling the constructor the enumeration is already pointing to the first
- * valid term if such a term exists.
+ * Constructor for enumeration of all terms from specified <code>reader</code> which share a
+ * prefix of length <code>prefixLength</code> with <code>term</code> and which have at most {@code
+ * maxEdits} edits.
+ *
+ * <p>After calling the constructor the enumeration is already pointing to the first valid term if
+ * such a term exists.
*
* @param terms Delivers terms.
* @param term Pattern term.
@@ -88,16 +89,23 @@ public final class FuzzyTermsEnum extends TermsEnum {
* @param transpositions whether transpositions should count as a single edit
* @throws IOException if there is a low-level IO error
*/
- public FuzzyTermsEnum(Terms terms, Term term, int maxEdits, int prefixLength, boolean transpositions) throws IOException {
- this(terms, new AttributeSource(), term, () -> new FuzzyAutomatonBuilder(term.text(), maxEdits, prefixLength, transpositions));
+ public FuzzyTermsEnum(
+ Terms terms, Term term, int maxEdits, int prefixLength, boolean transpositions)
+ throws IOException {
+ this(
+ terms,
+ new AttributeSource(),
+ term,
+ () -> new FuzzyAutomatonBuilder(term.text(), maxEdits, prefixLength, transpositions));
}
/**
- * Constructor for enumeration of all terms from specified <code>reader</code> which share a prefix of
- * length <code>prefixLength</code> with <code>term</code> and which have at most {@code maxEdits} edits.
- * <p>
- * After calling the constructor the enumeration is already pointing to the first
- * valid term if such a term exists.
+ * Constructor for enumeration of all terms from specified <code>reader</code> which share a
+ * prefix of length <code>prefixLength</code> with <code>term</code> and which have at most {@code
+ * maxEdits} edits.
+ *
+ * <p>After calling the constructor the enumeration is already pointing to the first valid term if
+ * such a term exists.
*
* @param terms Delivers terms.
* @param atts An AttributeSource used to share automata between segments
@@ -107,11 +115,27 @@ public final class FuzzyTermsEnum extends TermsEnum {
* @param transpositions whether transpositions should count as a single edit
* @throws IOException if there is a low-level IO error
*/
- FuzzyTermsEnum(Terms terms, AttributeSource atts, Term term, int maxEdits, int prefixLength, boolean transpositions) throws IOException {
- this(terms, atts, term, () -> new FuzzyAutomatonBuilder(term.text(), maxEdits, prefixLength, transpositions));
+ FuzzyTermsEnum(
+ Terms terms,
+ AttributeSource atts,
+ Term term,
+ int maxEdits,
+ int prefixLength,
+ boolean transpositions)
+ throws IOException {
+ this(
+ terms,
+ atts,
+ term,
+ () -> new FuzzyAutomatonBuilder(term.text(), maxEdits, prefixLength, transpositions));
}
- private FuzzyTermsEnum(Terms terms, AttributeSource atts, Term term, Supplier<FuzzyAutomatonBuilder> automatonBuilder) throws IOException {
+ private FuzzyTermsEnum(
+ Terms terms,
+ AttributeSource atts,
+ Term term,
+ Supplier<FuzzyAutomatonBuilder> automatonBuilder)
+ throws IOException {
this.terms = terms;
this.atts = atts;
@@ -134,24 +158,19 @@ public final class FuzzyTermsEnum extends TermsEnum {
}
/**
- * Sets the maximum non-competitive boost, which may allow switching to a
- * lower max-edit automaton at run time
+ * Sets the maximum non-competitive boost, which may allow switching to a lower max-edit automaton
+ * at run time
*/
public void setMaxNonCompetitiveBoost(float boost) {
this.maxBoostAtt.setMaxNonCompetitiveBoost(boost);
}
- /**
- * Gets the boost of the current term
- */
+ /** Gets the boost of the current term */
public float getBoost() {
return boostAtt.getBoost();
}
-
- /**
- * return an automata-based enum for matching up to editDistance from
- * lastTerm, if possible
- */
+
+ /** return an automata-based enum for matching up to editDistance from lastTerm, if possible */
private TermsEnum getAutomatonEnum(int editDistance, BytesRef lastTerm) throws IOException {
assert editDistance < automata.length;
final CompiledAutomaton compiled = automata[editDistance];
@@ -167,14 +186,16 @@ public final class FuzzyTermsEnum extends TermsEnum {
}
/**
- * fired when the max non-competitive boost has changed. this is the hook to
- * swap in a smarter actualEnum.
+ * fired when the max non-competitive boost has changed. this is the hook to swap in a smarter
+ * actualEnum.
*/
private void bottomChanged(BytesRef lastTerm) throws IOException {
int oldMaxEdits = maxEdits;
-
- // true if the last term encountered is lexicographically equal or after the bottom term in the PQ
- boolean termAfter = bottomTerm == null || (lastTerm != null && lastTerm.compareTo(bottomTerm) >= 0);
+
+ // true if the last term encountered is lexicographically equal or after the bottom term in the
+ // PQ
+ boolean termAfter =
+ bottomTerm == null || (lastTerm != null && lastTerm.compareTo(bottomTerm) >= 0);
// as long as the max non-competitive boost is >= the max boost
// for some edit distance, keep dropping the max edit distance.
@@ -187,13 +208,15 @@ public final class FuzzyTermsEnum extends TermsEnum {
}
if (oldMaxEdits != maxEdits || lastTerm == null) {
- // This is a very powerful optimization: the maximum edit distance has changed. This happens because we collect only the top scoring
- // N (= 50, by default) terms, and if e.g. maxEdits=2, and the queue is now full of matching terms, and we notice that the worst entry
+ // This is a very powerful optimization: the maximum edit distance has changed. This happens
+ // because we collect only the top scoring
+ // N (= 50, by default) terms, and if e.g. maxEdits=2, and the queue is now full of matching
+ // terms, and we notice that the worst entry
// in that queue is ed=1, then we can switch the automata here to ed=1 which is a big speedup.
actualEnum = getAutomatonEnum(maxEdits, lastTerm);
}
}
-
+
@Override
public BytesRef next() throws IOException {
@@ -201,7 +224,6 @@ public final class FuzzyTermsEnum extends TermsEnum {
bottomChanged(queuedBottom);
queuedBottom = null;
}
-
BytesRef term;
@@ -212,7 +234,7 @@ public final class FuzzyTermsEnum extends TermsEnum {
}
int ed = maxEdits;
-
+
// we know the outer DFA always matches.
// now compute exact edit distance
while (ed > 0) {
@@ -222,7 +244,7 @@ public final class FuzzyTermsEnum extends TermsEnum {
break;
}
}
-
+
if (ed == 0) { // exact match
boostAtt.setBoost(1.0F);
} else {
@@ -232,7 +254,7 @@ public final class FuzzyTermsEnum extends TermsEnum {
float similarity = 1.0f - (float) ed / (float) minTermLength;
boostAtt.setBoost(similarity);
}
-
+
final float bottom = maxBoostAtt.getMaxNonCompetitiveBoost();
final BytesRef bottomTerm = maxBoostAtt.getCompetitiveTerm();
if (bottom != this.bottom || bottomTerm != this.bottomTerm) {
@@ -241,18 +263,21 @@ public final class FuzzyTermsEnum extends TermsEnum {
// clone the term before potentially doing something with it
// this is a rare but wonderful occurrence anyway
- // We must delay bottomChanged until the next next() call otherwise we mess up docFreq(), etc., for the current term:
+ // We must delay bottomChanged until the next next() call otherwise we mess up docFreq(),
+ // etc., for the current term:
queuedBottom = BytesRef.deepCopyOf(term);
}
-
+
return term;
}
/** returns true if term is within k edits of the query term */
private boolean matches(BytesRef termIn, int k) {
- return k == 0 ? termIn.equals(term.bytes()) : automata[k].runAutomaton.run(termIn.bytes, termIn.offset, termIn.length);
+ return k == 0
+ ? termIn.equals(term.bytes())
+ : automata[k].runAutomaton.run(termIn.bytes, termIn.offset, termIn.length);
}
-
+
// proxy all other enum calls to the actual enum
@Override
public int docFreq() throws IOException {
@@ -263,27 +288,27 @@ public final class FuzzyTermsEnum extends TermsEnum {
public long totalTermFreq() throws IOException {
return actualEnum.totalTermFreq();
}
-
+
@Override
public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException {
return actualEnum.postings(reuse, flags);
}
-
+
@Override
public ImpactsEnum impacts(int flags) throws IOException {
return actualEnum.impacts(flags);
}
-
+
@Override
public void seekExact(BytesRef term, TermState state) throws IOException {
actualEnum.seekExact(term, state);
}
-
+
@Override
public TermState termState() throws IOException {
return actualEnum.termState();
}
-
+
@Override
public long ord() throws IOException {
return actualEnum.ord();
@@ -303,21 +328,21 @@ public final class FuzzyTermsEnum extends TermsEnum {
public SeekStatus seekCeil(BytesRef text) throws IOException {
return actualEnum.seekCeil(text);
}
-
+
@Override
public void seekExact(long ord) throws IOException {
actualEnum.seekExact(ord);
}
-
+
@Override
public BytesRef term() throws IOException {
return actualEnum.term();
}
/**
- * Thrown to indicate that there was an issue creating a fuzzy query for a given term.
- * Typically occurs with terms longer than 220 UTF-8 characters,
- * but also possible with shorter terms consisting of UTF-32 code points.
+ * Thrown to indicate that there was an issue creating a fuzzy query for a given term. Typically
+ * occurs with terms longer than 220 UTF-8 characters, but also possible with shorter terms
+ * consisting of UTF-32 code points.
*/
public static class FuzzyTermsException extends RuntimeException {
FuzzyTermsException(String term, Throwable cause) {
@@ -328,15 +353,16 @@ public final class FuzzyTermsEnum extends TermsEnum {
/**
* Used for sharing automata between segments
*
- * Levenshtein automata are large and expensive to build; we don't want to build
- * them directly on the query because this can blow up caches that use queries
- * as keys; we also don't want to rebuild them for every segment. This attribute
- * allows the FuzzyTermsEnum to build the automata once for its first segment
- * and then share them for subsequent segment calls.
+ * <p>Levenshtein automata are large and expensive to build; we don't want to build them directly
+ * on the query because this can blow up caches that use queries as keys; we also don't want to
+ * rebuild them for every segment. This attribute allows the FuzzyTermsEnum to build the automata
+ * once for its first segment and then share them for subsequent segment calls.
*/
private interface AutomatonAttribute extends Attribute {
CompiledAutomaton[] getAutomata();
+
int getTermLength();
+
void init(Supplier<FuzzyAutomatonBuilder> builder);
}
@@ -380,5 +406,4 @@ public final class FuzzyTermsEnum extends TermsEnum {
throw new UnsupportedOperationException();
}
}
-
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/HitQueue.java b/lucene/core/src/java/org/apache/lucene/search/HitQueue.java
index 6c569fe..9998452 100644
--- a/lucene/core/src/java/org/apache/lucene/search/HitQueue.java
+++ b/lucene/core/src/java/org/apache/lucene/search/HitQueue.java
@@ -16,70 +16,67 @@
*/
package org.apache.lucene.search;
-
import org.apache.lucene.util.PriorityQueue;
-/** Expert: Priority queue containing hit docs
+/**
+ * Expert: Priority queue containing hit docs
+ *
* @lucene.internal
*/
public final class HitQueue extends PriorityQueue<ScoreDoc> {
/**
- * Creates a new instance with <code>size</code> elements. If
- * <code>prePopulate</code> is set to true, the queue will pre-populate itself
- * with sentinel objects and set its {@link #size()} to <code>size</code>. In
- * that case, you should not rely on {@link #size()} to get the number of
+ * Creates a new instance with <code>size</code> elements. If <code>prePopulate</code> is set to
+ * true, the queue will pre-populate itself with sentinel objects and set its {@link #size()} to
+ * <code>size</code>. In that case, you should not rely on {@link #size()} to get the number of
* actual elements that were added to the queue, but keep track yourself.<br>
- * <b>NOTE:</b> in case <code>prePopulate</code> is true, you should pop
- * elements from the queue using the following code example:
- *
+ * <b>NOTE:</b> in case <code>prePopulate</code> is true, you should pop elements from the queue
+ * using the following code example:
+ *
* <pre class="prettyprint">
* PriorityQueue<ScoreDoc> pq = new HitQueue(10, true); // pre-populate.
* ScoreDoc top = pq.top();
- *
+ *
* // Add/Update one element.
* top.score = 1.0f;
* top.doc = 0;
* top = (ScoreDoc) pq.updateTop();
* int totalHits = 1;
- *
+ *
* // Now pop only the elements that were *truly* inserted.
* // First, pop all the sentinel elements (there are pq.size() - totalHits).
* for (int i = pq.size() - totalHits; i > 0; i--) pq.pop();
- *
+ *
* // Now pop the truly added elements.
* ScoreDoc[] results = new ScoreDoc[totalHits];
* for (int i = totalHits - 1; i >= 0; i--) {
* results[i] = (ScoreDoc) pq.pop();
* }
* </pre>
- *
- * <p><b>NOTE</b>: This class pre-allocate a full array of
- * length <code>size</code>.
- *
- * @param size
- * the requested size of this queue.
- * @param prePopulate
- * specifies whether to pre-populate the queue with sentinel values.
+ *
+ * <p><b>NOTE</b>: This class pre-allocate a full array of length <code>size</code>.
+ *
+ * @param size the requested size of this queue.
+ * @param prePopulate specifies whether to pre-populate the queue with sentinel values.
*/
public HitQueue(int size, boolean prePopulate) {
- super(size, () -> {
- if (prePopulate) {
- // Always set the doc Id to MAX_VALUE so that it won't be favored by
- // lessThan. This generally should not happen since if score is not NEG_INF,
- // TopScoreDocCollector will always add the object to the queue.
- return new ScoreDoc(Integer.MAX_VALUE, Float.NEGATIVE_INFINITY);
- } else {
- return null;
- }
- });
+ super(
+ size,
+ () -> {
+ if (prePopulate) {
+ // Always set the doc Id to MAX_VALUE so that it won't be favored by
+ // lessThan. This generally should not happen since if score is not NEG_INF,
+ // TopScoreDocCollector will always add the object to the queue.
+ return new ScoreDoc(Integer.MAX_VALUE, Float.NEGATIVE_INFINITY);
+ } else {
+ return null;
+ }
+ });
}
@Override
protected final boolean lessThan(ScoreDoc hitA, ScoreDoc hitB) {
- if (hitA.score == hitB.score)
- return hitA.doc > hitB.doc;
- else
- return hitA.score < hitB.score;
+ if (hitA.score == hitB.score) return hitA.doc > hitB.doc;
+ else return hitA.score < hitB.score;
}
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/HitsThresholdChecker.java b/lucene/core/src/java/org/apache/lucene/search/HitsThresholdChecker.java
index 2f8ffe9..9e42cd7 100644
--- a/lucene/core/src/java/org/apache/lucene/search/HitsThresholdChecker.java
+++ b/lucene/core/src/java/org/apache/lucene/search/HitsThresholdChecker.java
@@ -19,13 +19,9 @@ package org.apache.lucene.search;
import java.util.concurrent.atomic.AtomicLong;
-/**
- * Used for defining custom algorithms to allow searches to early terminate
- */
+/** Used for defining custom algorithms to allow searches to early terminate */
abstract class HitsThresholdChecker {
- /**
- * Implementation of HitsThresholdChecker which allows global hit counting
- */
+ /** Implementation of HitsThresholdChecker which allows global hit counting */
private static class GlobalHitsThresholdChecker extends HitsThresholdChecker {
private final int totalHitsThreshold;
private final AtomicLong globalHitCount;
@@ -33,7 +29,8 @@ abstract class HitsThresholdChecker {
public GlobalHitsThresholdChecker(int totalHitsThreshold) {
if (totalHitsThreshold < 0) {
- throw new IllegalArgumentException("totalHitsThreshold must be >= 0, got " + totalHitsThreshold);
+ throw new IllegalArgumentException(
+ "totalHitsThreshold must be >= 0, got " + totalHitsThreshold);
}
this.totalHitsThreshold = totalHitsThreshold;
@@ -46,7 +43,7 @@ abstract class HitsThresholdChecker {
}
@Override
- public boolean isThresholdReached(){
+ public boolean isThresholdReached() {
return globalHitCount.getAcquire() > totalHitsThreshold;
}
@@ -61,9 +58,7 @@ abstract class HitsThresholdChecker {
}
}
- /**
- * Default implementation of HitsThresholdChecker to be used for single threaded execution
- */
+ /** Default implementation of HitsThresholdChecker to be used for single threaded execution */
private static class LocalHitsThresholdChecker extends HitsThresholdChecker {
private final int totalHitsThreshold;
private int hitCount;
@@ -71,7 +66,8 @@ abstract class HitsThresholdChecker {
public LocalHitsThresholdChecker(int totalHitsThreshold) {
if (totalHitsThreshold < 0) {
- throw new IllegalArgumentException("totalHitsThreshold must be >= 0, got " + totalHitsThreshold);
+ throw new IllegalArgumentException(
+ "totalHitsThreshold must be >= 0, got " + totalHitsThreshold);
}
this.totalHitsThreshold = totalHitsThreshold;
@@ -113,7 +109,10 @@ abstract class HitsThresholdChecker {
}
public abstract void incrementHitCount();
+
public abstract ScoreMode scoreMode();
+
public abstract int getHitsThreshold();
+
public abstract boolean isThresholdReached();
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/ImpactsDISI.java b/lucene/core/src/java/org/apache/lucene/search/ImpactsDISI.java
index 0f978ed..843bb4a 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ImpactsDISI.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ImpactsDISI.java
@@ -17,16 +17,16 @@
package org.apache.lucene.search;
import java.io.IOException;
-
import org.apache.lucene.index.Impacts;
import org.apache.lucene.index.ImpactsEnum;
import org.apache.lucene.index.ImpactsSource;
import org.apache.lucene.search.similarities.Similarity.SimScorer;
/**
- * {@link DocIdSetIterator} that skips non-competitive docs thanks to the
- * indexed impacts. Call {@link #setMinCompetitiveScore(float)} in order to
- * give this iterator the ability to skip low-scoring documents.
+ * {@link DocIdSetIterator} that skips non-competitive docs thanks to the indexed impacts. Call
+ * {@link #setMinCompetitiveScore(float)} in order to give this iterator the ability to skip
+ * low-scoring documents.
+ *
* @lucene.internal
*/
public final class ImpactsDISI extends DocIdSetIterator {
@@ -41,9 +41,10 @@ public final class ImpactsDISI extends DocIdSetIterator {
/**
* Sole constructor.
- * @param in wrapped iterator
+ *
+ * @param in wrapped iterator
* @param impactsSource source of impacts
- * @param scorer scorer
+ * @param scorer scorer
*/
public ImpactsDISI(DocIdSetIterator in, ImpactsSource impactsSource, SimScorer scorer) {
this.in = in;
@@ -54,6 +55,7 @@ public final class ImpactsDISI extends DocIdSetIterator {
/**
* Set the minimum competitive score.
+ *
* @see Scorer#setMinCompetitiveScore(float)
*/
public void setMinCompetitiveScore(float minCompetitiveScore) {
@@ -68,8 +70,9 @@ public final class ImpactsDISI extends DocIdSetIterator {
}
/**
- * Implement the contract of {@link Scorer#advanceShallow(int)} based on the
- * wrapped {@link ImpactsEnum}.
+ * Implement the contract of {@link Scorer#advanceShallow(int)} based on the wrapped {@link
+ * ImpactsEnum}.
+ *
* @see Scorer#advanceShallow(int)
*/
public int advanceShallow(int target) throws IOException {
@@ -79,8 +82,9 @@ public final class ImpactsDISI extends DocIdSetIterator {
}
/**
- * Implement the contract of {@link Scorer#getMaxScore(int)} based on the
- * wrapped {@link ImpactsEnum} and {@link Scorer}.
+ * Implement the contract of {@link Scorer#getMaxScore(int)} based on the wrapped {@link
+ * ImpactsEnum} and {@link Scorer}.
+ *
* @see Scorer#getMaxScore(int)
*/
public float getMaxScore(int upTo) throws IOException {
@@ -145,5 +149,4 @@ public final class ImpactsDISI extends DocIdSetIterator {
public long cost() {
return in.cost();
}
-
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/IndexOrDocValuesQuery.java b/lucene/core/src/java/org/apache/lucene/search/IndexOrDocValuesQuery.java
index a3a3a22..c9338b7 100644
--- a/lucene/core/src/java/org/apache/lucene/search/IndexOrDocValuesQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/IndexOrDocValuesQuery.java
@@ -17,20 +17,19 @@
package org.apache.lucene.search;
import java.io.IOException;
-
import org.apache.lucene.document.LongPoint;
import org.apache.lucene.document.SortedNumericDocValuesField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
/**
- * A query that uses either an index structure (points or terms) or doc values
- * in order to run a query, depending which one is more efficient. This is
- * typically useful for range queries, whose {@link Weight#scorer} is costly
- * to create since it usually needs to sort large lists of doc ids. For
- * instance, for a field that both indexed {@link LongPoint}s and
- * {@link SortedNumericDocValuesField}s with the same values, an efficient
- * range query could be created by doing:
+ * A query that uses either an index structure (points or terms) or doc values in order to run a
+ * query, depending which one is more efficient. This is typically useful for range queries, whose
+ * {@link Weight#scorer} is costly to create since it usually needs to sort large lists of doc ids.
+ * For instance, for a field that both indexed {@link LongPoint}s and {@link
+ * SortedNumericDocValuesField}s with the same values, an efficient range query could be created by
+ * doing:
+ *
* <pre class="prettyprint">
* String field;
* long minValue, maxValue;
@@ -38,13 +37,15 @@ import org.apache.lucene.index.LeafReaderContext;
* Query dvQuery = SortedNumericDocValuesField.newSlowRangeQuery(field, minValue, maxValue);
* Query query = new IndexOrDocValuesQuery(pointQuery, dvQuery);
* </pre>
- * The above query will be efficient as it will use points in the case that they
- * perform better, ie. when we need a good lead iterator that will be almost
- * entirely consumed; and doc values otherwise, ie. in the case that another
- * part of the query is already leading iteration but we still need the ability
- * to verify that some documents match.
- * <p><b>NOTE</b>This query currently only works well with point range/exact
- * queries and their equivalent doc values queries.
+ *
+ * The above query will be efficient as it will use points in the case that they perform better, ie.
+ * when we need a good lead iterator that will be almost entirely consumed; and doc values
+ * otherwise, ie. in the case that another part of the query is already leading iteration but we
+ * still need the ability to verify that some documents match.
+ *
+ * <p><b>NOTE</b>This query currently only works well with point range/exact queries and their
+ * equivalent doc values queries.
+ *
* @lucene.experimental
*/
public final class IndexOrDocValuesQuery extends Query {
@@ -52,25 +53,27 @@ public final class IndexOrDocValuesQuery extends Query {
private final Query indexQuery, dvQuery;
/**
- * Create an {@link IndexOrDocValuesQuery}. Both provided queries must match
- * the same documents and give the same scores.
+ * Create an {@link IndexOrDocValuesQuery}. Both provided queries must match the same documents
+ * and give the same scores.
+ *
* @param indexQuery a query that has a good iterator but whose scorer may be costly to create
- * @param dvQuery a query whose scorer is cheap to create that can quickly check whether a given document matches
+ * @param dvQuery a query whose scorer is cheap to create that can quickly check whether a given
+ * document matches
*/
public IndexOrDocValuesQuery(Query indexQuery, Query dvQuery) {
this.indexQuery = indexQuery;
this.dvQuery = dvQuery;
}
- /** Return the wrapped query that may be costly to initialize but has a good
- * iterator. */
+ /** Return the wrapped query that may be costly to initialize but has a good iterator. */
public Query getIndexQuery() {
return indexQuery;
}
- /** Return the wrapped query that may be slow at identifying all matching
- * documents, but which is cheap to initialize and can efficiently
- * verify that some documents match. */
+ /**
+ * Return the wrapped query that may be slow at identifying all matching documents, but which is
+ * cheap to initialize and can efficiently verify that some documents match.
+ */
public Query getRandomAccessQuery() {
return dvQuery;
}
@@ -115,7 +118,8 @@ public final class IndexOrDocValuesQuery extends Query {
}
@Override
- public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
+ public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost)
+ throws IOException {
final Weight indexWeight = indexQuery.createWeight(searcher, scoreMode, boost);
final Weight dvWeight = dvQuery.createWeight(searcher, scoreMode, boost);
return new Weight(this) {
@@ -182,8 +186,6 @@ public final class IndexOrDocValuesQuery extends Query {
// the index query's cachehelper here
return indexWeight.isCacheable(ctx);
}
-
};
}
-
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java b/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java
index 1df656d..e44debb 100644
--- a/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java
+++ b/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java
@@ -16,7 +16,6 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
@@ -32,7 +31,6 @@ import java.util.concurrent.Future;
import java.util.concurrent.FutureTask;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.function.Supplier;
-
import org.apache.lucene.document.Document;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
@@ -50,48 +48,42 @@ import org.apache.lucene.util.Bits;
import org.apache.lucene.util.ThreadInterruptedException;
import org.apache.lucene.util.automaton.ByteRunAutomaton;
-/** Implements search over a single IndexReader.
+/**
+ * Implements search over a single IndexReader.
*
- * <p>Applications usually need only call the inherited
- * {@link #search(Query,int)} method. For
- * performance reasons, if your index is unchanging, you
- * should share a single IndexSearcher instance across
- * multiple searches instead of creating a new one
- * per-search. If your index has changed and you wish to
- * see the changes reflected in searching, you should
- * use {@link DirectoryReader#openIfChanged(DirectoryReader)}
- * to obtain a new reader and
- * then create a new IndexSearcher from that. Also, for
- * low-latency turnaround it's best to use a near-real-time
- * reader ({@link DirectoryReader#open(IndexWriter)}).
- * Once you have a new {@link IndexReader}, it's relatively
- * cheap to create a new IndexSearcher from it.
+ * <p>Applications usually need only call the inherited {@link #search(Query,int)} method. For
+ * performance reasons, if your index is unchanging, you should share a single IndexSearcher
+ * instance across multiple searches instead of creating a new one per-search. If your index has
+ * changed and you wish to see the changes reflected in searching, you should use {@link
+ * DirectoryReader#openIfChanged(DirectoryReader)} to obtain a new reader and then create a new
+ * IndexSearcher from that. Also, for low-latency turnaround it's best to use a near-real-time
+ * reader ({@link DirectoryReader#open(IndexWriter)}). Once you have a new {@link IndexReader}, it's
+ * relatively cheap to create a new IndexSearcher from it.
*
- * <p><b>NOTE</b>: The {@link #search} and {@link #searchAfter} methods are
- * configured to only count top hits accurately up to {@code 1,000} and may
- * return a {@link TotalHits.Relation lower bound} of the hit count if the
- * hit count is greater than or equal to {@code 1,000}. On queries that match
- * lots of documents, counting the number of hits may take much longer than
- * computing the top hits so this trade-off allows to get some minimal
- * information about the hit count without slowing down search too much. The
- * {@link TopDocs#scoreDocs} array is always accurate however. If this behavior
- * doesn't suit your needs, you should create collectors manually with either
- * {@link TopScoreDocCollector#create} or {@link TopFieldCollector#create} and
- * call {@link #search(Query, Collector)}.
+ * <p><b>NOTE</b>: The {@link #search} and {@link #searchAfter} methods are configured to only count
+ * top hits accurately up to {@code 1,000} and may return a {@link TotalHits.Relation lower bound}
+ * of the hit count if the hit count is greater than or equal to {@code 1,000}. On queries that
+ * match lots of documents, counting the number of hits may take much longer than computing the top
+ * hits so this trade-off allows to get some minimal information about the hit count without slowing
+ * down search too much. The {@link TopDocs#scoreDocs} array is always accurate however. If this
+ * behavior doesn't suit your needs, you should create collectors manually with either {@link
+ * TopScoreDocCollector#create} or {@link TopFieldCollector#create} and call {@link #search(Query,
+ * Collector)}.
*
- * <a id="thread-safety"></a><p><b>NOTE</b>: <code>{@link
- * IndexSearcher}</code> instances are completely
- * thread safe, meaning multiple threads can call any of its
- * methods, concurrently. If your application requires
- * external synchronization, you should <b>not</b>
- * synchronize on the <code>IndexSearcher</code> instance;
- * use your own (non-Lucene) objects instead.</p>
+ * <p><a id="thread-safety"></a>
+ *
+ * <p><b>NOTE</b>: <code>{@link
+ * IndexSearcher}</code> instances are completely thread safe, meaning multiple threads can call any
+ * of its methods, concurrently. If your application requires external synchronization, you should
+ * <b>not</b> synchronize on the <code>IndexSearcher</code> instance; use your own (non-Lucene)
+ * objects instead.
*/
public class IndexSearcher {
static int maxClauseCount = 1024;
private static QueryCache DEFAULT_QUERY_CACHE;
private static QueryCachingPolicy DEFAULT_CACHING_POLICY = new UsageTrackingQueryCachingPolicy();
+
static {
final int maxCachedQueries = 1000;
// min of 32MB or 5% of the heap size
@@ -99,20 +91,21 @@ public class IndexSearcher {
DEFAULT_QUERY_CACHE = new LRUQueryCache(maxCachedQueries, maxRamBytesUsed);
}
/**
- * By default we count hits accurately up to 1000. This makes sure that we
- * don't spend most time on computing hit counts
+ * By default we count hits accurately up to 1000. This makes sure that we don't spend most time
+ * on computing hit counts
*/
private static final int TOTAL_HITS_THRESHOLD = 1000;
/**
- * Thresholds for index slice allocation logic. To change the default, extend
- * <code> IndexSearcher</code> and use custom values
+ * Thresholds for index slice allocation logic. To change the default, extend <code> IndexSearcher
+ * </code> and use custom values
*/
private static final int MAX_DOCS_PER_SLICE = 250_000;
+
private static final int MAX_SEGMENTS_PER_SLICE = 5;
final IndexReader reader; // package private for testing!
-
+
// NOTE: these members might change in incompatible ways
// in the next release
protected final IndexReaderContext readerContext;
@@ -134,10 +127,10 @@ public class IndexSearcher {
private QueryCachingPolicy queryCachingPolicy = DEFAULT_CACHING_POLICY;
/**
- * Expert: returns a default Similarity instance.
- * In general, this method is only called to initialize searchers and writers.
- * User code and query implementations should respect
- * {@link IndexSearcher#getSimilarity()}.
+ * Expert: returns a default Similarity instance. In general, this method is only called to
+ * initialize searchers and writers. User code and query implementations should respect {@link
+ * IndexSearcher#getSimilarity()}.
+ *
* @lucene.internal
*/
public static Similarity getDefaultSimilarity() {
@@ -146,6 +139,7 @@ public class IndexSearcher {
/**
* Expert: Get the default {@link QueryCache} or {@code null} if the cache is disabled.
+ *
* @lucene.internal
*/
public static QueryCache getDefaultQueryCache() {
@@ -154,6 +148,7 @@ public class IndexSearcher {
/**
* Expert: set the default {@link QueryCache} instance.
+ *
* @lucene.internal
*/
public static void setDefaultQueryCache(QueryCache defaultQueryCache) {
@@ -162,6 +157,7 @@ public class IndexSearcher {
/**
* Expert: Get the default {@link QueryCachingPolicy}.
+ *
* @lucene.internal
*/
public static QueryCachingPolicy getDefaultQueryCachingPolicy() {
@@ -170,6 +166,7 @@ public class IndexSearcher {
/**
* Expert: set the default {@link QueryCachingPolicy} instance.
+ *
* @lucene.internal
*/
public static void setDefaultQueryCachingPolicy(QueryCachingPolicy defaultQueryCachingPolicy) {
@@ -184,29 +181,27 @@ public class IndexSearcher {
this(r, null);
}
- /** Runs searches for each segment separately, using the
- * provided Executor. NOTE:
- * if you are using {@link NIOFSDirectory}, do not use
- * the shutdownNow method of ExecutorService as this uses
- * Thread.interrupt under-the-hood which can silently
- * close file descriptors (see <a
- * href="https://issues.apache.org/jira/browse/LUCENE-2239">LUCENE-2239</a>).
- *
- * @lucene.experimental */
+ /**
+ * Runs searches for each segment separately, using the provided Executor. NOTE: if you are using
+ * {@link NIOFSDirectory}, do not use the shutdownNow method of ExecutorService as this uses
+ * Thread.interrupt under-the-hood which can silently close file descriptors (see <a
+ * href="https://issues.apache.org/jira/browse/LUCENE-2239">LUCENE-2239</a>).
+ *
+ * @lucene.experimental
+ */
public IndexSearcher(IndexReader r, Executor executor) {
this(r.getContext(), executor);
}
/**
* Creates a searcher searching the provided top-level {@link IndexReaderContext}.
- * <p>
- * Given a non-<code>null</code> {@link Executor} this method runs
- * searches for each segment separately, using the provided Executor.
- * NOTE: if you are using {@link NIOFSDirectory}, do not use the shutdownNow method of
- * ExecutorService as this uses Thread.interrupt under-the-hood which can
- * silently close file descriptors (see <a
+ *
+ * <p>Given a non-<code>null</code> {@link Executor} this method runs searches for each segment
+ * separately, using the provided Executor. NOTE: if you are using {@link NIOFSDirectory}, do not
+ * use the shutdownNow method of ExecutorService as this uses Thread.interrupt under-the-hood
+ * which can silently close file descriptors (see <a
* href="https://issues.apache.org/jira/browse/LUCENE-2239">LUCENE-2239</a>).
- *
+ *
* @see IndexReaderContext
* @see IndexReader#getContext()
* @lucene.experimental
@@ -217,8 +212,9 @@ public class IndexSearcher {
// Package private for testing
IndexSearcher(IndexReaderContext context, Executor executor, SliceExecutor sliceExecutor) {
- assert context.isTopLevel: "IndexSearcher's ReaderContext must be topLevel for reader" + context.reader();
- assert (sliceExecutor == null) == (executor==null);
+ assert context.isTopLevel
+ : "IndexSearcher's ReaderContext must be topLevel for reader" + context.reader();
+ assert (sliceExecutor == null) == (executor == null);
reader = context.reader();
this.executor = executor;
@@ -239,18 +235,18 @@ public class IndexSearcher {
this(context, null);
}
- /** Return the maximum number of clauses permitted, 1024 by default.
- * Attempts to add more than the permitted number of clauses cause {@link
- * TooManyClauses} to be thrown.
+ /**
+ * Return the maximum number of clauses permitted, 1024 by default. Attempts to add more than the
+ * permitted number of clauses cause {@link TooManyClauses} to be thrown.
+ *
* @see #setMaxClauseCount(int)
*/
- public static int getMaxClauseCount() { return maxClauseCount; }
+ public static int getMaxClauseCount() {
+ return maxClauseCount;
+ }
- /**
- * Set the maximum number of clauses permitted per Query.
- * Default value is 1024.
- */
- public static void setMaxClauseCount(int value) {
+ /** Set the maximum number of clauses permitted per Query. Default value is 1024. */
+ public static void setMaxClauseCount(int value) {
if (value < 1) {
throw new IllegalArgumentException("maxClauseCount must be >= 1");
}
@@ -258,12 +254,13 @@ public class IndexSearcher {
}
/**
- * Set the {@link QueryCache} to use when scores are not needed.
- * A value of {@code null} indicates that query matches should never be
- * cached. This method should be called <b>before</b> starting using this
- * {@link IndexSearcher}.
- * <p>NOTE: When using a query cache, queries should not be modified after
- * they have been passed to IndexSearcher.
+ * Set the {@link QueryCache} to use when scores are not needed. A value of {@code null} indicates
+ * that query matches should never be cached. This method should be called <b>before</b> starting
+ * using this {@link IndexSearcher}.
+ *
+ * <p>NOTE: When using a query cache, queries should not be modified after they have been passed
+ * to IndexSearcher.
+ *
* @see QueryCache
* @lucene.experimental
*/
@@ -272,10 +269,11 @@ public class IndexSearcher {
}
/**
- * Return the query cache of this {@link IndexSearcher}. This will be either
- * the {@link #getDefaultQueryCache() default query cache} or the query cache
- * that was last set through {@link #setQueryCache(QueryCache)}. A return
- * value of {@code null} indicates that caching is disabled.
+ * Return the query cache of this {@link IndexSearcher}. This will be either the {@link
+ * #getDefaultQueryCache() default query cache} or the query cache that was last set through
+ * {@link #setQueryCache(QueryCache)}. A return value of {@code null} indicates that caching is
+ * disabled.
+ *
* @lucene.experimental
*/
public QueryCache getQueryCache() {
@@ -283,9 +281,9 @@ public class IndexSearcher {
}
/**
- * Set the {@link QueryCachingPolicy} to use for query caching.
- * This method should be called <b>before</b> starting using this
- * {@link IndexSearcher}.
+ * Set the {@link QueryCachingPolicy} to use for query caching. This method should be called
+ * <b>before</b> starting using this {@link IndexSearcher}.
+ *
* @see QueryCachingPolicy
* @lucene.experimental
*/
@@ -294,9 +292,10 @@ public class IndexSearcher {
}
/**
- * Return the query cache of this {@link IndexSearcher}. This will be either
- * the {@link #getDefaultQueryCachingPolicy() default policy} or the policy
- * that was last set through {@link #setQueryCachingPolicy(QueryCachingPolicy)}.
+ * Return the query cache of this {@link IndexSearcher}. This will be either the {@link
+ * #getDefaultQueryCachingPolicy() default policy} or the policy that was last set through {@link
+ * #setQueryCachingPolicy(QueryCachingPolicy)}.
+ *
* @lucene.experimental
*/
public QueryCachingPolicy getQueryCachingPolicy() {
@@ -304,25 +303,23 @@ public class IndexSearcher {
}
/**
- * Expert: Creates an array of leaf slices each holding a subset of the given leaves.
- * Each {@link LeafSlice} is executed in a single thread. By default, segments with more than
+ * Expert: Creates an array of leaf slices each holding a subset of the given leaves. Each {@link
+ * LeafSlice} is executed in a single thread. By default, segments with more than
* MAX_DOCS_PER_SLICE will get their own thread
*/
protected LeafSlice[] slices(List<LeafReaderContext> leaves) {
return slices(leaves, MAX_DOCS_PER_SLICE, MAX_SEGMENTS_PER_SLICE);
}
- /**
- * Static method to segregate LeafReaderContexts amongst multiple slices
- */
- public static LeafSlice[] slices (List<LeafReaderContext> leaves, int maxDocsPerSlice,
- int maxSegmentsPerSlice) {
+ /** Static method to segregate LeafReaderContexts amongst multiple slices */
+ public static LeafSlice[] slices(
+ List<LeafReaderContext> leaves, int maxDocsPerSlice, int maxSegmentsPerSlice) {
// Make a copy so we can sort:
List<LeafReaderContext> sortedLeaves = new ArrayList<>(leaves);
// Sort by maxDoc, descending:
- Collections.sort(sortedLeaves,
- Collections.reverseOrder(Comparator.comparingInt(l -> l.reader().maxDoc())));
+ Collections.sort(
+ sortedLeaves, Collections.reverseOrder(Comparator.comparingInt(l -> l.reader().maxDoc())));
final List<List<LeafReaderContext>> groupedLeaves = new ArrayList<>();
long docSum = 0;
@@ -364,47 +361,48 @@ public class IndexSearcher {
return reader;
}
- /**
- * Sugar for <code>.getIndexReader().document(docID)</code>
- * @see IndexReader#document(int)
+ /**
+ * Sugar for <code>.getIndexReader().document(docID)</code>
+ *
+ * @see IndexReader#document(int)
*/
public Document doc(int docID) throws IOException {
return reader.document(docID);
}
- /**
+ /**
* Sugar for <code>.getIndexReader().document(docID, fieldVisitor)</code>
- * @see IndexReader#document(int, StoredFieldVisitor)
+ *
+ * @see IndexReader#document(int, StoredFieldVisitor)
*/
public void doc(int docID, StoredFieldVisitor fieldVisitor) throws IOException {
reader.document(docID, fieldVisitor);
}
- /**
+ /**
* Sugar for <code>.getIndexReader().document(docID, fieldsToLoad)</code>
- * @see IndexReader#document(int, Set)
+ *
+ * @see IndexReader#document(int, Set)
*/
public Document doc(int docID, Set<String> fieldsToLoad) throws IOException {
return reader.document(docID, fieldsToLoad);
}
- /** Expert: Set the Similarity implementation used by this IndexSearcher.
- *
- */
+ /** Expert: Set the Similarity implementation used by this IndexSearcher. */
public void setSimilarity(Similarity similarity) {
this.similarity = similarity;
}
- /** Expert: Get the {@link Similarity} to use to compute scores. This returns the
- * {@link Similarity} that has been set through {@link #setSimilarity(Similarity)}
- * or the default {@link Similarity} if none has been set explicitly. */
+ /**
+ * Expert: Get the {@link Similarity} to use to compute scores. This returns the {@link
+ * Similarity} that has been set through {@link #setSimilarity(Similarity)} or the default {@link
+ * Similarity} if none has been set explicitly.
+ */
public Similarity getSimilarity() {
return similarity;
}
- /**
- * Count how many documents match the given query.
- */
+ /** Count how many documents match the given query. */
public int count(Query query) throws IOException {
query = rewrite(query);
while (true) {
@@ -429,169 +427,171 @@ public class IndexSearcher {
}
// general case: create a collector and count matches
- final CollectorManager<TotalHitCountCollector, Integer> collectorManager = new CollectorManager<TotalHitCountCollector, Integer>() {
-
- @Override
- public TotalHitCountCollector newCollector() throws IOException {
- return new TotalHitCountCollector();
- }
-
- @Override
- public Integer reduce(Collection<TotalHitCountCollector> collectors) throws IOException {
- int total = 0;
- for (TotalHitCountCollector collector : collectors) {
- total += collector.getTotalHits();
- }
- return total;
- }
-
- };
+ final CollectorManager<TotalHitCountCollector, Integer> collectorManager =
+ new CollectorManager<TotalHitCountCollector, Integer>() {
+
+ @Override
+ public TotalHitCountCollector newCollector() throws IOException {
+ return new TotalHitCountCollector();
+ }
+
+ @Override
+ public Integer reduce(Collection<TotalHitCountCollector> collectors) throws IOException {
+ int total = 0;
+ for (TotalHitCountCollector collector : collectors) {
+ total += collector.getTotalHits();
+ }
+ return total;
+ }
+ };
return search(query, collectorManager);
}
- /** Returns the leaf slices used for concurrent searching, or null if no {@code Executor} was
- * passed to the constructor.
+ /**
+ * Returns the leaf slices used for concurrent searching, or null if no {@code Executor} was
+ * passed to the constructor.
*
- * @lucene.experimental */
+ * @lucene.experimental
+ */
public LeafSlice[] getSlices() {
- return leafSlices;
+ return leafSlices;
}
-
- /** Finds the top <code>n</code>
- * hits for <code>query</code> where all results are after a previous
+
+ /**
+ * Finds the top <code>n</code> hits for <code>query</code> where all results are after a previous
* result (<code>after</code>).
- * <p>
- * By passing the bottom result from a previous page as <code>after</code>,
- * this method can be used for efficient 'deep-paging' across potentially
- * large result sets.
*
- * @throws TooManyClauses If a query would exceed
- * {@link IndexSearcher#getMaxClauseCount()} clauses.
+ * <p>By passing the bottom result from a previous page as <code>after</code>, this method can be
+ * used for efficient 'deep-paging' across potentially large result sets.
+ *
+ * @throws TooManyClauses If a query would exceed {@link IndexSearcher#getMaxClauseCount()}
+ * clauses.
*/
public TopDocs searchAfter(ScoreDoc after, Query query, int numHits) throws IOException {
final int limit = Math.max(1, reader.maxDoc());
if (after != null && after.doc >= limit) {
- throw new IllegalArgumentException("after.doc exceeds the number of documents in the reader: after.doc="
- + after.doc + " limit=" + limit);
+ throw new IllegalArgumentException(
+ "after.doc exceeds the number of documents in the reader: after.doc="
+ + after.doc
+ + " limit="
+ + limit);
}
final int cappedNumHits = Math.min(numHits, limit);
- final CollectorManager<TopScoreDocCollector, TopDocs> manager = new CollectorManager<TopScoreDocCollector, TopDocs>() {
-
- private final HitsThresholdChecker hitsThresholdChecker = (executor == null || leafSlices.length <= 1) ? HitsThresholdChecker.create(Math.max(TOTAL_HITS_THRESHOLD, numHits)) :
- HitsThresholdChecker.createShared(Math.max(TOTAL_HITS_THRESHOLD, numHits));
-
- private final MaxScoreAccumulator minScoreAcc = (executor == null || leafSlices.length <= 1) ? null : new MaxScoreAccumulator();
-
- @Override
- public TopScoreDocCollector newCollector() throws IOException {
- return TopScoreDocCollector.create(cappedNumHits, after, hitsThresholdChecker, minScoreAcc);
- }
-
- @Override
- public TopDocs reduce(Collection<TopScoreDocCollector> collectors) throws IOException {
- final TopDocs[] topDocs = new TopDocs[collectors.size()];
- int i = 0;
- for (TopScoreDocCollector collector : collectors) {
- topDocs[i++] = collector.topDocs();
- }
- return TopDocs.merge(0, cappedNumHits, topDocs);
- }
-
- };
+ final CollectorManager<TopScoreDocCollector, TopDocs> manager =
+ new CollectorManager<TopScoreDocCollector, TopDocs>() {
+
+ private final HitsThresholdChecker hitsThresholdChecker =
+ (executor == null || leafSlices.length <= 1)
+ ? HitsThresholdChecker.create(Math.max(TOTAL_HITS_THRESHOLD, numHits))
+ : HitsThresholdChecker.createShared(Math.max(TOTAL_HITS_THRESHOLD, numHits));
+
+ private final MaxScoreAccumulator minScoreAcc =
+ (executor == null || leafSlices.length <= 1) ? null : new MaxScoreAccumulator();
+
+ @Override
+ public TopScoreDocCollector newCollector() throws IOException {
+ return TopScoreDocCollector.create(
+ cappedNumHits, after, hitsThresholdChecker, minScoreAcc);
+ }
+
+ @Override
+ public TopDocs reduce(Collection<TopScoreDocCollector> collectors) throws IOException {
+ final TopDocs[] topDocs = new TopDocs[collectors.size()];
+ int i = 0;
+ for (TopScoreDocCollector collector : collectors) {
+ topDocs[i++] = collector.topDocs();
+ }
+ return TopDocs.merge(0, cappedNumHits, topDocs);
+ }
+ };
return search(query, manager);
}
- /** Finds the top <code>n</code>
- * hits for <code>query</code>.
+ /**
+ * Finds the top <code>n</code> hits for <code>query</code>.
*
- * @throws TooManyClauses If a query would exceed
- * {@link IndexSearcher#getMaxClauseCount()} clauses.
+ * @throws TooManyClauses If a query would exceed {@link IndexSearcher#getMaxClauseCount()}
+ * clauses.
*/
- public TopDocs search(Query query, int n)
- throws IOException {
+ public TopDocs search(Query query, int n) throws IOException {
return searchAfter(null, query, n);
}
- /** Lower-level search API.
+ /**
+ * Lower-level search API.
*
* <p>{@link LeafCollector#collect(int)} is called for every matching document.
*
- * @throws TooManyClauses If a query would exceed
- * {@link IndexSearcher#getMaxClauseCount()} clauses.
+ * @throws TooManyClauses If a query would exceed {@link IndexSearcher#getMaxClauseCount()}
+ * clauses.
*/
- public void search(Query query, Collector results)
- throws IOException {
+ public void search(Query query, Collector results) throws IOException {
query = rewrite(query);
search(leafContexts, createWeight(query, results.scoreMode(), 1), results);
}
- /** Search implementation with arbitrary sorting, plus
- * control over whether hit scores and max score
- * should be computed. Finds
- * the top <code>n</code> hits for <code>query</code>, and sorting
- * the hits by the criteria in <code>sort</code>.
- * If <code>doDocScores</code> is <code>true</code>
- * then the score of each hit will be computed and
- * returned. If <code>doMaxScore</code> is
- * <code>true</code> then the maximum score over all
- * collected hits will be computed.
- *
- * @throws TooManyClauses If a query would exceed
- * {@link IndexSearcher#getMaxClauseCount()} clauses.
- */
- public TopFieldDocs search(Query query, int n,
- Sort sort, boolean doDocScores) throws IOException {
+ /**
+ * Search implementation with arbitrary sorting, plus control over whether hit scores and max
+ * score should be computed. Finds the top <code>n</code> hits for <code>query</code>, and sorting
+ * the hits by the criteria in <code>sort</code>. If <code>doDocScores</code> is <code>true</code>
+ * then the score of each hit will be computed and returned. If <code>doMaxScore</code> is <code>
+ * true</code> then the maximum score over all collected hits will be computed.
+ *
+ * @throws TooManyClauses If a query would exceed {@link IndexSearcher#getMaxClauseCount()}
+ * clauses.
+ */
+ public TopFieldDocs search(Query query, int n, Sort sort, boolean doDocScores)
+ throws IOException {
return searchAfter(null, query, n, sort, doDocScores);
}
/**
* Search implementation with arbitrary sorting.
+ *
* @param query The query to search for
* @param n Return only the top n results
* @param sort The {@link org.apache.lucene.search.Sort} object
- * @return The top docs, sorted according to the supplied {@link org.apache.lucene.search.Sort} instance
+ * @return The top docs, sorted according to the supplied {@link org.apache.lucene.search.Sort}
+ * instance
* @throws IOException if there is a low-level I/O error
*/
public TopFieldDocs search(Query query, int n, Sort sort) throws IOException {
return searchAfter(null, query, n, sort, false);
}
- /** Finds the top <code>n</code>
- * hits for <code>query</code> where all results are after a previous
+ /**
+ * Finds the top <code>n</code> hits for <code>query</code> where all results are after a previous
* result (<code>after</code>).
- * <p>
- * By passing the bottom result from a previous page as <code>after</code>,
- * this method can be used for efficient 'deep-paging' across potentially
- * large result sets.
*
- * @throws TooManyClauses If a query would exceed
- * {@link IndexSearcher#getMaxClauseCount()} clauses.
+ * <p>By passing the bottom result from a previous page as <code>after</code>, this method can be
+ * used for efficient 'deep-paging' across potentially large result sets.
+ *
+ * @throws TooManyClauses If a query would exceed {@link IndexSearcher#getMaxClauseCount()}
+ * clauses.
*/
public TopDocs searchAfter(ScoreDoc after, Query query, int n, Sort sort) throws IOException {
return searchAfter(after, query, n, sort, false);
}
- /** Finds the top <code>n</code>
- * hits for <code>query</code> where all results are after a previous
- * result (<code>after</code>), allowing control over
- * whether hit scores and max score should be computed.
- * <p>
- * By passing the bottom result from a previous page as <code>after</code>,
- * this method can be used for efficient 'deep-paging' across potentially
- * large result sets. If <code>doDocScores</code> is <code>true</code>
- * then the score of each hit will be computed and
- * returned. If <code>doMaxScore</code> is
- * <code>true</code> then the maximum score over all
- * collected hits will be computed.
- *
- * @throws TooManyClauses If a query would exceed
- * {@link IndexSearcher#getMaxClauseCount()} clauses.
- */
- public TopFieldDocs searchAfter(ScoreDoc after, Query query, int numHits, Sort sort,
- boolean doDocScores) throws IOException {
+ /**
+ * Finds the top <code>n</code> hits for <code>query</code> where all results are after a previous
+ * result (<code>after</code>), allowing control over whether hit scores and max score should be
+ * computed.
+ *
+ * <p>By passing the bottom result from a previous page as <code>after</code>, this method can be
+ * used for efficient 'deep-paging' across potentially large result sets. If <code>doDocScores
+ * </code> is <code>true</code> then the score of each hit will be computed and returned. If
+ * <code>doMaxScore</code> is <code>true</code> then the maximum score over all collected hits
+ * will be computed.
+ *
+ * @throws TooManyClauses If a query would exceed {@link IndexSearcher#getMaxClauseCount()}
+ * clauses.
+ */
+ public TopFieldDocs searchAfter(
+ ScoreDoc after, Query query, int numHits, Sort sort, boolean doDocScores) throws IOException {
if (after != null && !(after instanceof FieldDoc)) {
// TODO: if we fix type safety of TopFieldDocs we can
// remove this
@@ -600,40 +600,47 @@ public class IndexSearcher {
return searchAfter((FieldDoc) after, query, numHits, sort, doDocScores);
}
- private TopFieldDocs searchAfter(FieldDoc after, Query query, int numHits, Sort sort,
- boolean doDocScores) throws IOException {
+ private TopFieldDocs searchAfter(
+ FieldDoc after, Query query, int numHits, Sort sort, boolean doDocScores) throws IOException {
final int limit = Math.max(1, reader.maxDoc());
if (after != null && after.doc >= limit) {
- throw new IllegalArgumentException("after.doc exceeds the number of documents in the reader: after.doc="
- + after.doc + " limit=" + limit);
+ throw new IllegalArgumentException(
+ "after.doc exceeds the number of documents in the reader: after.doc="
+ + after.doc
+ + " limit="
+ + limit);
}
final int cappedNumHits = Math.min(numHits, limit);
final Sort rewrittenSort = sort.rewrite(this);
- final CollectorManager<TopFieldCollector, TopFieldDocs> manager = new CollectorManager<>() {
-
- private final HitsThresholdChecker hitsThresholdChecker = (executor == null || leafSlices.length <= 1) ? HitsThresholdChecker.create(Math.max(TOTAL_HITS_THRESHOLD, numHits)) :
- HitsThresholdChecker.createShared(Math.max(TOTAL_HITS_THRESHOLD, numHits));
-
- private final MaxScoreAccumulator minScoreAcc = (executor == null || leafSlices.length <= 1) ? null : new MaxScoreAccumulator();
-
- @Override
- public TopFieldCollector newCollector() throws IOException {
- // TODO: don't pay the price for accurate hit counts by default
- return TopFieldCollector.create(rewrittenSort, cappedNumHits, after, hitsThresholdChecker, minScoreAcc);
- }
-
- @Override
- public TopFieldDocs reduce(Collection<TopFieldCollector> collectors) throws IOException {
- final TopFieldDocs[] topDocs = new TopFieldDocs[collectors.size()];
- int i = 0;
- for (TopFieldCollector collector : collectors) {
- topDocs[i++] = collector.topDocs();
- }
- return TopDocs.merge(rewrittenSort, 0, cappedNumHits, topDocs);
- }
-
- };
+ final CollectorManager<TopFieldCollector, TopFieldDocs> manager =
+ new CollectorManager<>() {
+
+ private final HitsThresholdChecker hitsThresholdChecker =
+ (executor == null || leafSlices.length <= 1)
+ ? HitsThresholdChecker.create(Math.max(TOTAL_HITS_THRESHOLD, numHits))
+ : HitsThresholdChecker.createShared(Math.max(TOTAL_HITS_THRESHOLD, numHits));
+
+ private final MaxScoreAccumulator minScoreAcc =
+ (executor == null || leafSlices.length <= 1) ? null : new MaxScoreAccumulator();
+
+ @Override
+ public TopFieldCollector newCollector() throws IOException {
+ // TODO: don't pay the price for accurate hit counts by default
+ return TopFieldCollector.create(
+ rewrittenSort, cappedNumHits, after, hitsThresholdChecker, minScoreAcc);
+ }
+
+ @Override
+ public TopFieldDocs reduce(Collection<TopFieldCollector> collectors) throws IOException {
+ final TopFieldDocs[] topDocs = new TopFieldDocs[collectors.size()];
+ int i = 0;
+ for (TopFieldCollector collector : collectors) {
+ topDocs[i++] = collector.topDocs();
+ }
+ return TopDocs.merge(rewrittenSort, 0, cappedNumHits, topDocs);
+ }
+ };
TopFieldDocs topDocs = search(query, manager);
if (doDocScores) {
@@ -642,16 +649,16 @@ public class IndexSearcher {
return topDocs;
}
- /**
- * Lower-level search API.
- * Search all leaves using the given {@link CollectorManager}. In contrast
- * to {@link #search(Query, Collector)}, this method will use the searcher's
- * {@link Executor} in order to parallelize execution of the collection
- * on the configured {@link #leafSlices}.
- * @see CollectorManager
- * @lucene.experimental
- */
- public <C extends Collector, T> T search(Query query, CollectorManager<C, T> collectorManager) throws IOException {
+ /**
+ * Lower-level search API. Search all leaves using the given {@link CollectorManager}. In contrast
+ * to {@link #search(Query, Collector)}, this method will use the searcher's {@link Executor} in
+ * order to parallelize execution of the collection on the configured {@link #leafSlices}.
+ *
+ * @see CollectorManager
+ * @lucene.experimental
+ */
+ public <C extends Collector, T> T search(Query query, CollectorManager<C, T> collectorManager)
+ throws IOException {
if (executor == null || leafSlices.length <= 1) {
final C collector = collectorManager.newCollector();
search(query, collector);
@@ -665,7 +672,8 @@ public class IndexSearcher {
if (scoreMode == null) {
scoreMode = collector.scoreMode();
} else if (scoreMode != collector.scoreMode()) {
- throw new IllegalStateException("CollectorManager does not always produce collectors with the same score mode");
+ throw new IllegalStateException(
+ "CollectorManager does not always produce collectors with the same score mode");
}
}
if (scoreMode == null) {
@@ -678,10 +686,12 @@ public class IndexSearcher {
for (int i = 0; i < leafSlices.length; ++i) {
final LeafReaderContext[] leaves = leafSlices[i].leaves;
final C collector = collectors.get(i);
- FutureTask<C> task = new FutureTask<>(() -> {
- search(Arrays.asList(leaves), weight, collector);
- return collector;
- });
+ FutureTask<C> task =
+ new FutureTask<>(
+ () -> {
+ search(Arrays.asList(leaves), weight, collector);
+ return collector;
+ });
listTasks.add(task);
}
@@ -703,22 +713,17 @@ public class IndexSearcher {
/**
* Lower-level search API.
- *
- * <p>
- * {@link LeafCollector#collect(int)} is called for every document. <br>
- *
- * <p>
- * NOTE: this method executes the searches on all given leaves exclusively.
- * To search across all the searchers leaves use {@link #leafContexts}.
- *
- * @param leaves
- * the searchers leaves to execute the searches on
- * @param weight
- * to match documents
- * @param collector
- * to receive hits
- * @throws TooManyClauses If a query would exceed
- * {@link IndexSearcher#getMaxClauseCount()} clauses.
+ *
+ * <p>{@link LeafCollector#collect(int)} is called for every document. <br>
+ *
+ * <p>NOTE: this method executes the searches on all given leaves exclusively. To search across
+ * all the searchers leaves use {@link #leafContexts}.
+ *
+ * @param leaves the searchers leaves to execute the searches on
+ * @param weight to match documents
+ * @param collector to receive hits
+ * @throws TooManyClauses If a query would exceed {@link IndexSearcher#getMaxClauseCount()}
+ * clauses.
*/
protected void search(List<LeafReaderContext> leaves, Weight weight, Collector collector)
throws IOException {
@@ -747,24 +752,27 @@ public class IndexSearcher {
}
}
- /** Expert: called to re-write queries into primitive queries.
- * @throws TooManyClauses If a query would exceed
- * {@link IndexSearcher#getMaxClauseCount()} clauses.
+ /**
+ * Expert: called to re-write queries into primitive queries.
+ *
+ * @throws TooManyClauses If a query would exceed {@link IndexSearcher#getMaxClauseCount()}
+ * clauses.
*/
public Query rewrite(Query original) throws IOException {
Query query = original;
- for (Query rewrittenQuery = query.rewrite(reader); rewrittenQuery != query;
- rewrittenQuery = query.rewrite(reader)) {
+ for (Query rewrittenQuery = query.rewrite(reader);
+ rewrittenQuery != query;
+ rewrittenQuery = query.rewrite(reader)) {
query = rewrittenQuery;
}
query.visit(getNumClausesCheckVisitor());
return query;
}
- /** Returns a QueryVisitor which recursively checks the total
- * number of clauses that a query and its children cumulatively
- * have and validates that the total number does not exceed
- * the specified limit
+ /**
+ * Returns a QueryVisitor which recursively checks the total number of clauses that a query and
+ * its children cumulatively have and validates that the total number does not exceed the
+ * specified limit
*/
private static QueryVisitor getNumClausesCheckVisitor() {
return new QueryVisitor() {
@@ -794,7 +802,8 @@ public class IndexSearcher {
}
@Override
- public void consumeTermsMatching(Query query, String field, Supplier<ByteRunAutomaton> automaton) {
+ public void consumeTermsMatching(
+ Query query, String field, Supplier<ByteRunAutomaton> automaton) {
if (numClauses > maxClauseCount) {
throw new TooManyClauses();
}
@@ -803,30 +812,30 @@ public class IndexSearcher {
};
}
- /** Returns an Explanation that describes how <code>doc</code> scored against
- * <code>query</code>.
+ /**
+ * Returns an Explanation that describes how <code>doc</code> scored against <code>query</code>.
*
- * <p>This is intended to be used in developing Similarity implementations,
- * and, for good performance, should not be displayed with every hit.
- * Computing an explanation is as expensive as executing the query over the
- * entire index.
+ * <p>This is intended to be used in developing Similarity implementations, and, for good
+ * performance, should not be displayed with every hit. Computing an explanation is as expensive
+ * as executing the query over the entire index.
*/
public Explanation explain(Query query, int doc) throws IOException {
query = rewrite(query);
return explain(createWeight(query, ScoreMode.COMPLETE, 1), doc);
}
- /** Expert: low-level implementation method
- * Returns an Explanation that describes how <code>doc</code> scored against
- * <code>weight</code>.
+ /**
+ * Expert: low-level implementation method Returns an Explanation that describes how <code>doc
+ * </code> scored against <code>weight</code>.
+ *
+ * <p>This is intended to be used in developing Similarity implementations, and, for good
+ * performance, should not be displayed with every hit. Computing an explanation is as expensive
+ * as executing the query over the entire index.
*
- * <p>This is intended to be used in developing Similarity implementations,
- * and, for good performance, should not be displayed with every hit.
- * Computing an explanation is as expensive as executing the query over the
- * entire index.
* <p>Applications should call {@link IndexSearcher#explain(Query, int)}.
- * @throws TooManyClauses If a query would exceed
- * {@link IndexSearcher#getMaxClauseCount()} clauses.
+ *
+ * @throws TooManyClauses If a query would exceed {@link IndexSearcher#getMaxClauseCount()}
+ * clauses.
*/
protected Explanation explain(Weight weight, int doc) throws IOException {
int n = ReaderUtil.subIndex(doc, leafContexts);
@@ -840,8 +849,9 @@ public class IndexSearcher {
}
/**
- * Creates a {@link Weight} for the given query, potentially adding caching
- * if possible and configured.
+ * Creates a {@link Weight} for the given query, potentially adding caching if possible and
+ * configured.
+ *
* @lucene.experimental
*/
public Weight createWeight(Query query, ScoreMode scoreMode, float boost) throws IOException {
@@ -855,6 +865,7 @@ public class IndexSearcher {
/**
* Returns this searchers the top-level {@link IndexReaderContext}.
+ *
* @see IndexReader#getContext()
*/
/* sugar for #getReader().getTopReaderContext() */
@@ -863,18 +874,20 @@ public class IndexSearcher {
}
/**
- * A class holding a subset of the {@link IndexSearcher}s leaf contexts to be
- * executed within a single thread.
- *
+ * A class holding a subset of the {@link IndexSearcher}s leaf contexts to be executed within a
+ * single thread.
+ *
* @lucene.experimental
*/
public static class LeafSlice {
- /** The leaves that make up this slice.
+ /**
+ * The leaves that make up this slice.
*
- * @lucene.experimental */
+ * @lucene.experimental
+ */
public final LeafReaderContext[] leaves;
-
+
public LeafSlice(List<LeafReaderContext> leavesList) {
Collections.sort(leavesList, Comparator.comparingInt(l -> l.docBase));
this.leaves = leavesList.toArray(new LeafReaderContext[0]);
@@ -883,32 +896,39 @@ public class IndexSearcher {
@Override
public String toString() {
- return "IndexSearcher(" + reader + "; executor=" + executor + "; sliceExecutionControlPlane " + sliceExecutor + ")";
+ return "IndexSearcher("
+ + reader
+ + "; executor="
+ + executor
+ + "; sliceExecutionControlPlane "
+ + sliceExecutor
+ + ")";
}
-
+
/**
* Returns {@link TermStatistics} for a term.
- *
- * This can be overridden for example, to return a term's statistics
- * across a distributed collection.
+ *
+ * <p>This can be overridden for example, to return a term's statistics across a distributed
+ * collection.
*
* @param docFreq The document frequency of the term. It must be greater or equal to 1.
* @param totalTermFreq The total term frequency.
* @return A {@link TermStatistics} (never null).
- *
* @lucene.experimental
*/
- public TermStatistics termStatistics(Term term, int docFreq, long totalTermFreq) throws IOException {
+ public TermStatistics termStatistics(Term term, int docFreq, long totalTermFreq)
+ throws IOException {
// This constructor will throw an exception if docFreq <= 0.
return new TermStatistics(term.bytes(), docFreq, totalTermFreq);
}
-
+
/**
- * Returns {@link CollectionStatistics} for a field, or {@code null} if
- * the field does not exist (has no indexed terms)
- *
- * This can be overridden for example, to return a field's statistics
- * across a distributed collection.
+ * Returns {@link CollectionStatistics} for a field, or {@code null} if the field does not exist
+ * (has no indexed terms)
+ *
+ * <p>This can be overridden for example, to return a field's statistics across a distributed
+ * collection.
+ *
* @lucene.experimental
*/
public CollectionStatistics collectionStatistics(String field) throws IOException {
@@ -931,17 +951,15 @@ public class IndexSearcher {
return new CollectionStatistics(field, reader.maxDoc(), docCount, sumTotalTermFreq, sumDocFreq);
}
- /**
- * Returns this searchers executor or <code>null</code> if no executor was provided
- */
+ /** Returns this searchers executor or <code>null</code> if no executor was provided */
public Executor getExecutor() {
return executor;
}
- /** Thrown when an attempt is made to add more than {@link
- * #getMaxClauseCount()} clauses. This typically happens if
- * a PrefixQuery, FuzzyQuery, WildcardQuery, or TermRangeQuery
- * is expanded to many terms during search.
+ /**
+ * Thrown when an attempt is made to add more than {@link #getMaxClauseCount()} clauses. This
+ * typically happens if a PrefixQuery, FuzzyQuery, WildcardQuery, or TermRangeQuery is expanded to
+ * many terms during search.
*/
public static class TooManyClauses extends RuntimeException {
public TooManyClauses() {
@@ -949,9 +967,7 @@ public class IndexSearcher {
}
}
- /**
- * Return the SliceExecutionControlPlane instance to be used for this IndexSearcher instance
- */
+ /** Return the SliceExecutionControlPlane instance to be used for this IndexSearcher instance */
private static SliceExecutor getSliceExecutionControlPlane(Executor executor) {
if (executor == null) {
return null;
diff --git a/lucene/core/src/java/org/apache/lucene/search/LRUQueryCache.java b/lucene/core/src/java/org/apache/lucene/search/LRUQueryCache.java
index 2656071..1592836 100644
--- a/lucene/core/src/java/org/apache/lucene/search/LRUQueryCache.java
+++ b/lucene/core/src/java/org/apache/lucene/search/LRUQueryCache.java
@@ -16,6 +16,9 @@
*/
package org.apache.lucene.search;
+import static org.apache.lucene.util.RamUsageEstimator.HASHTABLE_RAM_BYTES_PER_ENTRY;
+import static org.apache.lucene.util.RamUsageEstimator.LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY;
+import static org.apache.lucene.util.RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED;
import java.io.IOException;
import java.util.ArrayList;
@@ -31,7 +34,6 @@ import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Predicate;
-
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.LeafReaderContext;
@@ -43,24 +45,19 @@ import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.RoaringDocIdSet;
-import static org.apache.lucene.util.RamUsageEstimator.HASHTABLE_RAM_BYTES_PER_ENTRY;
-import static org.apache.lucene.util.RamUsageEstimator.LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY;
-import static org.apache.lucene.util.RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED;
-
/**
- * A {@link QueryCache} that evicts queries using a LRU (least-recently-used)
- * eviction policy in order to remain under a given maximum size and number of
- * bytes used.
+ * A {@link QueryCache} that evicts queries using a LRU (least-recently-used) eviction policy in
+ * order to remain under a given maximum size and number of bytes used.
*
- * This class is thread-safe.
+ * <p>This class is thread-safe.
*
- * Note that query eviction runs in linear time with the total number of
- * segments that have cache entries so this cache works best with
- * {@link QueryCachingPolicy caching policies} that only cache on "large"
- * segments, and it is advised to not share this cache across too many indices.
+ * <p>Note that query eviction runs in linear time with the total number of segments that have cache
+ * entries so this cache works best with {@link QueryCachingPolicy caching policies} that only cache
+ * on "large" segments, and it is advised to not share this cache across too many indices.
+ *
+ * <p>A default query cache and policy instance is used in IndexSearcher. If you want to replace
+ * those defaults it is typically done like this:
*
- * A default query cache and policy instance is used in IndexSearcher. If you want to replace those defaults
- * it is typically done like this:
* <pre class="prettyprint">
* final int maxNumberOfCachedQueries = 256;
* final long maxRamBytesUsed = 50 * 1024L * 1024L; // 50MB
@@ -72,17 +69,15 @@ import static org.apache.lucene.util.RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_U
* indexSearcher.setQueryCachingPolicy(defaultCachingPolicy);
* </pre>
*
- * This cache exposes some global statistics ({@link #getHitCount() hit count},
- * {@link #getMissCount() miss count}, {@link #getCacheSize() number of cache
- * entries}, {@link #getCacheCount() total number of DocIdSets that have ever
- * been cached}, {@link #getEvictionCount() number of evicted entries}). In
- * case you would like to have more fine-grained statistics, such as per-index
- * or per-query-class statistics, it is possible to override various callbacks:
- * {@link #onHit}, {@link #onMiss},
- * {@link #onQueryCache}, {@link #onQueryEviction},
- * {@link #onDocIdSetCache}, {@link #onDocIdSetEviction} and {@link #onClear}.
- * It is better to not perform heavy computations in these methods though since
- * they are called synchronously and under a lock.
+ * This cache exposes some global statistics ({@link #getHitCount() hit count}, {@link
+ * #getMissCount() miss count}, {@link #getCacheSize() number of cache entries}, {@link
+ * #getCacheCount() total number of DocIdSets that have ever been cached}, {@link
+ * #getEvictionCount() number of evicted entries}). In case you would like to have more fine-grained
+ * statistics, such as per-index or per-query-class statistics, it is possible to override various
+ * callbacks: {@link #onHit}, {@link #onMiss}, {@link #onQueryCache}, {@link #onQueryEviction},
+ * {@link #onDocIdSetCache}, {@link #onDocIdSetEviction} and {@link #onClear}. It is better to not
+ * perform heavy computations in these methods though since they are called synchronously and under
+ * a lock.
*
* @see QueryCachingPolicy
* @lucene.experimental
@@ -112,20 +107,24 @@ public class LRUQueryCache implements QueryCache, Accountable {
private volatile long cacheSize;
/**
- * Expert: Create a new instance that will cache at most <code>maxSize</code>
- * queries with at most <code>maxRamBytesUsed</code> bytes of memory, only on
- * leaves that satisfy {@code leavesToCache}.
+ * Expert: Create a new instance that will cache at most <code>maxSize</code> queries with at most
+ * <code>maxRamBytesUsed</code> bytes of memory, only on leaves that satisfy {@code
+ * leavesToCache}.
*
- * Also, clauses whose cost is {@code skipCacheFactor} times more than the cost of the top-level query
- * will not be cached in order to not slow down queries too much.
+ * <p>Also, clauses whose cost is {@code skipCacheFactor} times more than the cost of the
+ * top-level query will not be cached in order to not slow down queries too much.
*/
- public LRUQueryCache(int maxSize, long maxRamBytesUsed,
- Predicate<LeafReaderContext> leavesToCache, float skipCacheFactor) {
+ public LRUQueryCache(
+ int maxSize,
+ long maxRamBytesUsed,
+ Predicate<LeafReaderContext> leavesToCache,
+ float skipCacheFactor) {
this.maxSize = maxSize;
this.maxRamBytesUsed = maxRamBytesUsed;
this.leavesToCache = leavesToCache;
if (skipCacheFactor >= 1 == false) { // NaN >= 1 evaluates false
- throw new IllegalArgumentException("skipCacheFactor must be no less than 1, get " + skipCacheFactor);
+ throw new IllegalArgumentException(
+ "skipCacheFactor must be no less than 1, get " + skipCacheFactor);
}
this.skipCacheFactor = skipCacheFactor;
@@ -137,17 +136,14 @@ public class LRUQueryCache implements QueryCache, Accountable {
}
/**
- * Create a new instance that will cache at most <code>maxSize</code> queries
- * with at most <code>maxRamBytesUsed</code> bytes of memory. Queries will
- * only be cached on leaves that have more than 10k documents and have more
- * than 3% of the total number of documents in the index.
- * This should guarantee that all leaves from the upper
- * {@link TieredMergePolicy tier} will be cached while ensuring that at most
- * <code>33</code> leaves can make it to the cache (very likely less than 10 in
- * practice), which is useful for this implementation since some operations
- * perform in linear time with the number of cached leaves.
- * Only clauses whose cost is at most 100x the cost of the top-level query will
- * be cached in order to not hurt latency too much because of caching.
+ * Create a new instance that will cache at most <code>maxSize</code> queries with at most <code>
+ * maxRamBytesUsed</code> bytes of memory. Queries will only be cached on leaves that have more
+ * than 10k documents and have more than 3% of the total number of documents in the index. This
+ * should guarantee that all leaves from the upper {@link TieredMergePolicy tier} will be cached
+ * while ensuring that at most <code>33</code> leaves can make it to the cache (very likely less
+ * than 10 in practice), which is useful for this implementation since some operations perform in
+ * linear time with the number of cached leaves. Only clauses whose cost is at most 100x the cost
+ * of the top-level query will be cached in order to not hurt latency too much because of caching.
*/
public LRUQueryCache(int maxSize, long maxRamBytesUsed) {
this(maxSize, maxRamBytesUsed, new MinSegmentSizePredicate(10000, .03f), 250);
@@ -176,9 +172,9 @@ public class LRUQueryCache implements QueryCache, Accountable {
}
/**
- * Expert: callback when there is a cache hit on a given query.
- * Implementing this method is typically useful in order to compute more
- * fine-grained statistics about the query cache.
+ * Expert: callback when there is a cache hit on a given query. Implementing this method is
+ * typically useful in order to compute more fine-grained statistics about the query cache.
+ *
* @see #onMiss
* @lucene.experimental
*/
@@ -189,6 +185,7 @@ public class LRUQueryCache implements QueryCache, Accountable {
/**
* Expert: callback when there is a cache miss on a given query.
+ *
* @see #onHit
* @lucene.experimental
*/
@@ -199,9 +196,9 @@ public class LRUQueryCache implements QueryCache, Accountable {
}
/**
- * Expert: callback when a query is added to this cache.
- * Implementing this method is typically useful in order to compute more
- * fine-grained statistics about the query cache.
+ * Expert: callback when a query is added to this cache. Implementing this method is typically
+ * useful in order to compute more fine-grained statistics about the query cache.
+ *
* @see #onQueryEviction
* @lucene.experimental
*/
@@ -212,6 +209,7 @@ public class LRUQueryCache implements QueryCache, Accountable {
/**
* Expert: callback when a query is evicted from this cache.
+ *
* @see #onQueryCache
* @lucene.experimental
*/
@@ -221,9 +219,9 @@ public class LRUQueryCache implements QueryCache, Accountable {
}
/**
- * Expert: callback when a {@link DocIdSet} is added to this cache.
- * Implementing this method is typically useful in order to compute more
- * fine-grained statistics about the query cache.
+ * Expert: callback when a {@link DocIdSet} is added to this cache. Implementing this method is
+ * typically useful in order to compute more fine-grained statistics about the query cache.
+ *
* @see #onDocIdSetEviction
* @lucene.experimental
*/
@@ -235,8 +233,8 @@ public class LRUQueryCache implements QueryCache, Accountable {
}
/**
- * Expert: callback when one or more {@link DocIdSet}s are removed from this
- * cache.
+ * Expert: callback when one or more {@link DocIdSet}s are removed from this cache.
+ *
* @see #onDocIdSetCache
* @lucene.experimental
*/
@@ -248,6 +246,7 @@ public class LRUQueryCache implements QueryCache, Accountable {
/**
* Expert: callback when the cache is completely cleared.
+ *
* @lucene.experimental
*/
protected void onClear() {
@@ -334,19 +333,21 @@ public class LRUQueryCache implements QueryCache, Accountable {
if (size == mostRecentlyUsedQueries.size()) {
// size did not decrease, because the hash of the query changed since it has been
// put into the cache
- throw new ConcurrentModificationException("Removal from the cache failed! This " +
- "is probably due to a query which has been modified after having been put into " +
- " the cache or a badly implemented clone(). Query class: [" + query.getClass() +
- "], query: [" + query + "]");
+ throw new ConcurrentModificationException(
+ "Removal from the cache failed! This "
+ + "is probably due to a query which has been modified after having been put into "
+ + " the cache or a badly implemented clone(). Query class: ["
+ + query.getClass()
+ + "], query: ["
+ + query
+ + "]");
}
onEviction(query);
} while (iterator.hasNext() && requiresEviction());
}
}
- /**
- * Remove all cache entries for the given core cache key.
- */
+ /** Remove all cache entries for the given core cache key. */
public void clearCoreCacheKey(Object coreKey) {
lock.lock();
try {
@@ -366,9 +367,7 @@ public class LRUQueryCache implements QueryCache, Accountable {
}
}
- /**
- * Remove all cache entries for the given query.
- */
+ /** Remove all cache entries for the given query. */
public void clearQuery(Query query) {
lock.lock();
try {
@@ -389,14 +388,13 @@ public class LRUQueryCache implements QueryCache, Accountable {
}
}
- /**
- * Clear the content of this cache.
- */
+ /** Clear the content of this cache. */
public void clear() {
lock.lock();
try {
cache.clear();
- // Note that this also clears the uniqueQueries map since mostRecentlyUsedQueries is the uniqueQueries.keySet view:
+ // Note that this also clears the uniqueQueries map since mostRecentlyUsedQueries is the
+ // uniqueQueries.keySet view:
mostRecentlyUsedQueries.clear();
onClear();
} finally {
@@ -409,20 +407,28 @@ public class LRUQueryCache implements QueryCache, Accountable {
lock.lock();
try {
if (requiresEviction()) {
- throw new AssertionError("requires evictions: size=" + mostRecentlyUsedQueries.size()
- + ", maxSize=" + maxSize + ", ramBytesUsed=" + ramBytesUsed() + ", maxRamBytesUsed=" + maxRamBytesUsed);
+ throw new AssertionError(
+ "requires evictions: size="
+ + mostRecentlyUsedQueries.size()
+ + ", maxSize="
+ + maxSize
+ + ", ramBytesUsed="
+ + ramBytesUsed()
+ + ", maxRamBytesUsed="
+ + maxRamBytesUsed);
}
for (LeafCache leafCache : cache.values()) {
Set<Query> keys = Collections.newSetFromMap(new IdentityHashMap<>());
keys.addAll(leafCache.cache.keySet());
keys.removeAll(mostRecentlyUsedQueries);
if (!keys.isEmpty()) {
- throw new AssertionError("One leaf cache contains more keys than the top-level cache: " + keys);
+ throw new AssertionError(
+ "One leaf cache contains more keys than the top-level cache: " + keys);
}
}
long recomputedRamBytesUsed =
- HASHTABLE_RAM_BYTES_PER_ENTRY * cache.size()
- + LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY * uniqueQueries.size();
+ HASHTABLE_RAM_BYTES_PER_ENTRY * cache.size()
+ + LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY * uniqueQueries.size();
recomputedRamBytesUsed += mostRecentlyUsedQueries.size() * QUERY_DEFAULT_RAM_BYTES_USED;
for (LeafCache leafCache : cache.values()) {
recomputedRamBytesUsed += HASHTABLE_RAM_BYTES_PER_ENTRY * leafCache.cache.size();
@@ -431,7 +437,8 @@ public class LRUQueryCache implements QueryCache, Accountable {
}
}
if (recomputedRamBytesUsed != ramBytesUsed) {
- throw new AssertionError("ramBytesUsed mismatch : " + ramBytesUsed + " != " + recomputedRamBytesUsed);
+ throw new AssertionError(
+ "ramBytesUsed mismatch : " + ramBytesUsed + " != " + recomputedRamBytesUsed);
}
long recomputedCacheSize = 0;
@@ -439,7 +446,8 @@ public class LRUQueryCache implements QueryCache, Accountable {
recomputedCacheSize += leafCache.cache.size();
}
if (recomputedCacheSize != getCacheSize()) {
- throw new AssertionError("cacheSize mismatch : " + getCacheSize() + " != " + recomputedCacheSize);
+ throw new AssertionError(
+ "cacheSize mismatch : " + getCacheSize() + " != " + recomputedCacheSize);
}
} finally {
lock.unlock();
@@ -482,9 +490,8 @@ public class LRUQueryCache implements QueryCache, Accountable {
}
/**
- * Default cache implementation: uses {@link RoaringDocIdSet} for sets that
- * have a density < 1% and a {@link BitDocIdSet} over a {@link FixedBitSet}
- * otherwise.
+ * Default cache implementation: uses {@link RoaringDocIdSet} for sets that have a density < 1%
+ * and a {@link BitDocIdSet} over a {@link FixedBitSet} otherwise.
*/
protected DocIdSet cacheImpl(BulkScorer scorer, int maxDoc) throws IOException {
if (scorer.cost() * 100 >= maxDoc) {
@@ -499,44 +506,47 @@ public class LRUQueryCache implements QueryCache, Accountable {
private static DocIdSet cacheIntoBitSet(BulkScorer scorer, int maxDoc) throws IOException {
final FixedBitSet bitSet = new FixedBitSet(maxDoc);
long cost[] = new long[1];
- scorer.score(new LeafCollector() {
+ scorer.score(
+ new LeafCollector() {
- @Override
- public void setScorer(Scorable scorer) throws IOException {}
-
- @Override
- public void collect(int doc) throws IOException {
- cost[0]++;
- bitSet.set(doc);
- }
+ @Override
+ public void setScorer(Scorable scorer) throws IOException {}
- }, null);
+ @Override
+ public void collect(int doc) throws IOException {
+ cost[0]++;
+ bitSet.set(doc);
+ }
+ },
+ null);
return new BitDocIdSet(bitSet, cost[0]);
}
- private static DocIdSet cacheIntoRoaringDocIdSet(BulkScorer scorer, int maxDoc) throws IOException {
+ private static DocIdSet cacheIntoRoaringDocIdSet(BulkScorer scorer, int maxDoc)
+ throws IOException {
RoaringDocIdSet.Builder builder = new RoaringDocIdSet.Builder(maxDoc);
- scorer.score(new LeafCollector() {
-
- @Override
- public void setScorer(Scorable scorer) throws IOException {}
+ scorer.score(
+ new LeafCollector() {
- @Override
- public void collect(int doc) throws IOException {
- builder.add(doc);
- }
+ @Override
+ public void setScorer(Scorable scorer) throws IOException {}
- }, null);
+ @Override
+ public void collect(int doc) throws IOException {
+ builder.add(doc);
+ }
+ },
+ null);
return builder.build();
}
/**
- * Return the total number of times that a {@link Query} has been looked up
- * in this {@link QueryCache}. Note that this number is incremented once per
- * segment so running a cached query only once will increment this counter
- * by the number of segments that are wrapped by the searcher.
- * Note that by definition, {@link #getTotalCount()} is the sum of
- * {@link #getHitCount()} and {@link #getMissCount()}.
+ * Return the total number of times that a {@link Query} has been looked up in this {@link
+ * QueryCache}. Note that this number is incremented once per segment so running a cached query
+ * only once will increment this counter by the number of segments that are wrapped by the
+ * searcher. Note that by definition, {@link #getTotalCount()} is the sum of {@link
+ * #getHitCount()} and {@link #getMissCount()}.
+ *
* @see #getHitCount()
* @see #getMissCount()
*/
@@ -545,9 +555,9 @@ public class LRUQueryCache implements QueryCache, Accountable {
}
/**
- * Over the {@link #getTotalCount() total} number of times that a query has
- * been looked up, return how many times a cached {@link DocIdSet} has been
- * found and returned.
+ * Over the {@link #getTotalCount() total} number of times that a query has been looked up, return
+ * how many times a cached {@link DocIdSet} has been found and returned.
+ *
* @see #getTotalCount()
* @see #getMissCount()
*/
@@ -556,9 +566,9 @@ public class LRUQueryCache implements QueryCache, Accountable {
}
/**
- * Over the {@link #getTotalCount() total} number of times that a query has
- * been looked up, return how many times this query was not contained in the
- * cache.
+ * Over the {@link #getTotalCount() total} number of times that a query has been looked up, return
+ * how many times this query was not contained in the cache.
+ *
* @see #getTotalCount()
* @see #getHitCount()
*/
@@ -567,8 +577,8 @@ public class LRUQueryCache implements QueryCache, Accountable {
}
/**
- * Return the total number of {@link DocIdSet}s which are currently stored
- * in the cache.
+ * Return the total number of {@link DocIdSet}s which are currently stored in the cache.
+ *
* @see #getCacheCount()
* @see #getEvictionCount()
*/
@@ -577,11 +587,11 @@ public class LRUQueryCache implements QueryCache, Accountable {
}
/**
- * Return the total number of cache entries that have been generated and put
- * in the cache. It is highly desirable to have a {@link #getHitCount() hit
- * count} that is much higher than the {@link #getCacheCount() cache count}
- * as the opposite would indicate that the query cache makes efforts in order
- * to cache queries but then they do not get reused.
+ * Return the total number of cache entries that have been generated and put in the cache. It is
+ * highly desirable to have a {@link #getHitCount() hit count} that is much higher than the {@link
+ * #getCacheCount() cache count} as the opposite would indicate that the query cache makes efforts
+ * in order to cache queries but then they do not get reused.
+ *
* @see #getCacheSize()
* @see #getEvictionCount()
*/
@@ -590,12 +600,12 @@ public class LRUQueryCache implements QueryCache, Accountable {
}
/**
- * Return the number of cache entries that have been removed from the cache
- * either in order to stay under the maximum configured size/ram usage, or
- * because a segment has been closed. High numbers of evictions might mean
- * that queries are not reused or that the {@link QueryCachingPolicy
- * caching policy} caches too aggressively on NRT segments which get merged
+ * Return the number of cache entries that have been removed from the cache either in order to
+ * stay under the maximum configured size/ram usage, or because a segment has been closed. High
+ * numbers of evictions might mean that queries are not reused or that the {@link
+ * QueryCachingPolicy caching policy} caches too aggressively on NRT segments which get merged
* early.
+ *
* @see #getCacheCount()
* @see #getCacheSize()
*/
@@ -654,7 +664,6 @@ public class LRUQueryCache implements QueryCache, Accountable {
public long ramBytesUsed() {
return ramBytesUsed;
}
-
}
private class CachingWrapperWeight extends ConstantScoreWeight {
@@ -700,7 +709,8 @@ public class LRUQueryCache implements QueryCache, Accountable {
/** Check whether this segment is eligible for caching, regardless of the query. */
private boolean shouldCache(LeafReaderContext context) throws IOException {
- return cacheEntryHasReasonableWorstCaseSize(ReaderUtil.getTopLevelContext(context).reader().maxDoc())
+ return cacheEntryHasReasonableWorstCaseSize(
+ ReaderUtil.getTopLevelContext(context).reader().maxDoc())
&& leavesToCache.test(context);
}
@@ -757,15 +767,18 @@ public class LRUQueryCache implements QueryCache, Accountable {
}
Scorer scorer = supplier.get(Long.MAX_VALUE);
- DocIdSet docIdSet = cacheImpl(new DefaultBulkScorer(scorer), context.reader().maxDoc());
+ DocIdSet docIdSet =
+ cacheImpl(new DefaultBulkScorer(scorer), context.reader().maxDoc());
putIfAbsent(in.getQuery(), docIdSet, cacheHelper);
DocIdSetIterator disi = docIdSet.iterator();
if (disi == null) {
- // docIdSet.iterator() is allowed to return null when empty but we want a non-null iterator here
+ // docIdSet.iterator() is allowed to return null when empty but we want a non-null
+ // iterator here
disi = DocIdSetIterator.empty();
}
- return new ConstantScoreScorer(CachingWrapperWeight.this, 0f, ScoreMode.COMPLETE_NO_SCORES, disi);
+ return new ConstantScoreScorer(
+ CachingWrapperWeight.this, 0f, ScoreMode.COMPLETE_NO_SCORES, disi);
}
@Override
@@ -790,7 +803,8 @@ public class LRUQueryCache implements QueryCache, Accountable {
return new ScorerSupplier() {
@Override
public Scorer get(long LeadCost) throws IOException {
- return new ConstantScoreScorer(CachingWrapperWeight.this, 0f, ScoreMode.COMPLETE_NO_SCORES, disi);
+ return new ConstantScoreScorer(
+ CachingWrapperWeight.this, 0f, ScoreMode.COMPLETE_NO_SCORES, disi);
}
@Override
@@ -798,7 +812,6 @@ public class LRUQueryCache implements QueryCache, Accountable {
return disi.cost();
}
};
-
}
@Override
@@ -868,8 +881,8 @@ public class LRUQueryCache implements QueryCache, Accountable {
return null;
}
- return new DefaultBulkScorer(new ConstantScoreScorer(this, 0f, ScoreMode.COMPLETE_NO_SCORES, disi));
+ return new DefaultBulkScorer(
+ new ConstantScoreScorer(this, 0f, ScoreMode.COMPLETE_NO_SCORES, disi));
}
-
}
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/LeafCollector.java b/lucene/core/src/java/org/apache/lucene/search/LeafCollector.java
index 82f78ee..983e5bb 100644
--- a/lucene/core/src/java/org/apache/lucene/search/LeafCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/LeafCollector.java
@@ -16,28 +16,20 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
/**
- * <p>Collector decouples the score from the collected doc:
- * the score computation is skipped entirely if it's not
- * needed. Collectors that do need the score should
- * implement the {@link #setScorer} method, to hold onto the
- * passed {@link Scorer} instance, and call {@link
- * Scorer#score()} within the collect method to compute the
- * current hit's score. If your collector may request the
- * score for a single hit multiple times, you should use
- * {@link ScoreCachingWrappingScorer}. </p>
- *
- * <p><b>NOTE:</b> The doc that is passed to the collect
- * method is relative to the current reader. If your
- * collector needs to resolve this to the docID space of the
- * Multi*Reader, you must re-base it by recording the
- * docBase from the most recent setNextReader call. Here's
- * a simple example showing how to collect docIDs into a
- * BitSet:</p>
- *
+ * Collector decouples the score from the collected doc: the score computation is skipped entirely
+ * if it's not needed. Collectors that do need the score should implement the {@link #setScorer}
+ * method, to hold onto the passed {@link Scorer} instance, and call {@link Scorer#score()} within
+ * the collect method to compute the current hit's score. If your collector may request the score
+ * for a single hit multiple times, you should use {@link ScoreCachingWrappingScorer}.
+ *
+ * <p><b>NOTE:</b> The doc that is passed to the collect method is relative to the current reader.
+ * If your collector needs to resolve this to the docID space of the Multi*Reader, you must re-base
+ * it by recording the docBase from the most recent setNextReader call. Here's a simple example
+ * showing how to collect docIDs into a BitSet:
+ *
* <pre class="prettyprint">
* IndexSearcher searcher = new IndexSearcher(indexReader);
* final BitSet bits = new BitSet(indexReader.maxDoc());
@@ -62,47 +54,45 @@ import java.io.IOException;
* });
* </pre>
*
- * <p>Not all collectors will need to rebase the docID. For
- * example, a collector that simply counts the total number
- * of hits would skip it.</p>
+ * <p>Not all collectors will need to rebase the docID. For example, a collector that simply counts
+ * the total number of hits would skip it.
*
* @lucene.experimental
*/
public interface LeafCollector {
/**
- * Called before successive calls to {@link #collect(int)}. Implementations
- * that need the score of the current document (passed-in to
- * {@link #collect(int)}), should save the passed-in Scorer and call
- * scorer.score() when needed.
+ * Called before successive calls to {@link #collect(int)}. Implementations that need the score of
+ * the current document (passed-in to {@link #collect(int)}), should save the passed-in Scorer and
+ * call scorer.score() when needed.
*/
void setScorer(Scorable scorer) throws IOException;
-
+
/**
- * Called once for every document matching a query, with the unbased document
- * number.
- * <p>Note: The collection of the current segment can be terminated by throwing
- * a {@link CollectionTerminatedException}. In this case, the last docs of the
- * current {@link org.apache.lucene.index.LeafReaderContext} will be skipped and {@link IndexSearcher}
- * will swallow the exception and continue collection with the next leaf.
- * <p>
- * Note: This is called in an inner search loop. For good search performance,
- * implementations of this method should not call {@link IndexSearcher#doc(int)} or
- * {@link org.apache.lucene.index.IndexReader#document(int)} on every hit.
- * Doing so can slow searches by an order of magnitude or more.
+ * Called once for every document matching a query, with the unbased document number.
+ *
+ * <p>Note: The collection of the current segment can be terminated by throwing a {@link
+ * CollectionTerminatedException}. In this case, the last docs of the current {@link
+ * org.apache.lucene.index.LeafReaderContext} will be skipped and {@link IndexSearcher} will
+ * swallow the exception and continue collection with the next leaf.
+ *
+ * <p>Note: This is called in an inner search loop. For good search performance, implementations
+ * of this method should not call {@link IndexSearcher#doc(int)} or {@link
+ * org.apache.lucene.index.IndexReader#document(int)} on every hit. Doing so can slow searches by
+ * an order of magnitude or more.
*/
void collect(int doc) throws IOException;
/**
* Optionally returns an iterator over competitive documents.
*
- * Collectors should delegate this method to their comparators if
- * their comparators provide the skipping functionality over non-competitive docs.
+ * <p>Collectors should delegate this method to their comparators if their comparators provide the
+ * skipping functionality over non-competitive docs.
*
- * The default is to return {@code null} which is interpreted as the collector provide any competitive iterator.
+ * <p>The default is to return {@code null} which is interpreted as the collector provide any
+ * competitive iterator.
*/
default DocIdSetIterator competitiveIterator() throws IOException {
return null;
}
-
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/LeafFieldComparator.java b/lucene/core/src/java/org/apache/lucene/search/LeafFieldComparator.java
index 6a93658..594d882 100644
--- a/lucene/core/src/java/org/apache/lucene/search/LeafFieldComparator.java
+++ b/lucene/core/src/java/org/apache/lucene/search/LeafFieldComparator.java
@@ -16,36 +16,25 @@
*/
package org.apache.lucene.search;
-
import java.io.IOException;
/**
- * Expert: comparator that gets instantiated on each leaf
- * from a top-level {@link FieldComparator} instance.
+ * Expert: comparator that gets instantiated on each leaf from a top-level {@link FieldComparator}
+ * instance.
*
- * <p>A leaf comparator must define these functions:</p>
+ * <p>A leaf comparator must define these functions:
*
* <ul>
- *
- * <li> {@link #setBottom} This method is called by
- * {@link FieldValueHitQueue} to notify the
- * FieldComparator of the current weakest ("bottom")
- * slot. Note that this slot may not hold the weakest
- * value according to your comparator, in cases where
- * your comparator is not the primary one (ie, is only
- * used to break ties from the comparators before it).
- *
- * <li> {@link #compareBottom} Compare a new hit (docID)
- * against the "weakest" (bottom) entry in the queue.
- *
- * <li> {@link #compareTop} Compare a new hit (docID)
- * against the top value previously set by a call to
- * {@link FieldComparator#setTopValue}.
- *
- * <li> {@link #copy} Installs a new hit into the
- * priority queue. The {@link FieldValueHitQueue}
+ * <li>{@link #setBottom} This method is called by {@link FieldValueHitQueue} to notify the
+ * FieldComparator of the current weakest ("bottom") slot. Note that this slot may not hold
+ * the weakest value according to your comparator, in cases where your comparator is not the
+ * primary one (ie, is only used to break ties from the comparators before it).
+ * <li>{@link #compareBottom} Compare a new hit (docID) against the "weakest" (bottom) entry in
+ * the queue.
+ * <li>{@link #compareTop} Compare a new hit (docID) against the top value previously set by a
+ * call to {@link FieldComparator#setTopValue}.
+ * <li>{@link #copy} Installs a new hit into the priority queue. The {@link FieldValueHitQueue}
* calls this method when a new hit is competitive.
- *
* </ul>
*
* @see FieldComparator
@@ -54,82 +43,72 @@ import java.io.IOException;
public interface LeafFieldComparator {
/**
- * Set the bottom slot, ie the "weakest" (sorted last)
- * entry in the queue. When {@link #compareBottom} is
- * called, you should compare against this slot. This
- * will always be called before {@link #compareBottom}.
- *
+ * Set the bottom slot, ie the "weakest" (sorted last) entry in the queue. When {@link
+ * #compareBottom} is called, you should compare against this slot. This will always be called
+ * before {@link #compareBottom}.
+ *
* @param slot the currently weakest (sorted last) slot in the queue
*/
void setBottom(final int slot) throws IOException;
/**
- * Compare the bottom of the queue with this doc. This will
- * only invoked after setBottom has been called. This
- * should return the same result as {@link
- * FieldComparator#compare(int,int)}} as if bottom were slot1 and the new
- * document were slot 2.
- *
- * <p>For a search that hits many results, this method
- * will be the hotspot (invoked by far the most
- * frequently).</p>
- *
+ * Compare the bottom of the queue with this doc. This will only invoked after setBottom has been
+ * called. This should return the same result as {@link FieldComparator#compare(int,int)}} as if
+ * bottom were slot1 and the new document were slot 2.
+ *
+ * <p>For a search that hits many results, this method will be the hotspot (invoked by far the
+ * most frequently).
+ *
* @param doc that was hit
- * @return any {@code N < 0} if the doc's value is sorted after
- * the bottom entry (not competitive), any {@code N > 0} if the
- * doc's value is sorted before the bottom entry and {@code 0} if
- * they are equal.
+ * @return any {@code N < 0} if the doc's value is sorted after the bottom entry (not
+ * competitive), any {@code N > 0} if the doc's value is sorted before the bottom entry and
+ * {@code 0} if they are equal.
*/
int compareBottom(int doc) throws IOException;
/**
- * Compare the top value with this doc. This will
- * only invoked after setTopValue has been called. This
- * should return the same result as {@link
- * FieldComparator#compare(int,int)}} as if topValue were slot1 and the new
- * document were slot 2. This is only called for searches that
- * use searchAfter (deep paging).
- *
+ * Compare the top value with this doc. This will only invoked after setTopValue has been called.
+ * This should return the same result as {@link FieldComparator#compare(int,int)}} as if topValue
+ * were slot1 and the new document were slot 2. This is only called for searches that use
+ * searchAfter (deep paging).
+ *
* @param doc that was hit
- * @return any {@code N < 0} if the doc's value is sorted after
- * the top entry (not competitive), any {@code N > 0} if the
- * doc's value is sorted before the top entry and {@code 0} if
- * they are equal.
+ * @return any {@code N < 0} if the doc's value is sorted after the top entry (not competitive),
+ * any {@code N > 0} if the doc's value is sorted before the top entry and {@code 0} if they
+ * are equal.
*/
int compareTop(int doc) throws IOException;
/**
- * This method is called when a new hit is competitive.
- * You should copy any state associated with this document
- * that will be required for future comparisons, into the
- * specified slot.
- *
+ * This method is called when a new hit is competitive. You should copy any state associated with
+ * this document that will be required for future comparisons, into the specified slot.
+ *
* @param slot which slot to copy the hit to
* @param doc docID relative to current reader
*/
void copy(int slot, int doc) throws IOException;
- /** Sets the Scorer to use in case a document's score is
- * needed.
- *
- * @param scorer Scorer instance that you should use to
- * obtain the current hit's score, if necessary. */
+ /**
+ * Sets the Scorer to use in case a document's score is needed.
+ *
+ * @param scorer Scorer instance that you should use to obtain the current hit's score, if
+ * necessary.
+ */
void setScorer(Scorable scorer) throws IOException;
/**
* Returns a competitive iterator
- * @return an iterator over competitive docs that are stronger than already collected docs
- * or {@code null} if such an iterator is not available for the current comparator or segment.
+ *
+ * @return an iterator over competitive docs that are stronger than already collected docs or
+ * {@code null} if such an iterator is not available for the current comparator or segment.
*/
default DocIdSetIterator competitiveIterator() throws IOException {
return null;
}
/**
- * Informs this leaf comparator that hits threshold is reached.
- * This method is called from a collector when hits threshold is reached.
+ * Informs this leaf comparator that hits threshold is reached. This method is called from a
+ * collector when hits threshold is reached.
*/
- default void setHitsThresholdReached() throws IOException{
- }
-
+ default void setHitsThresholdReached() throws IOException {}
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/LeafSimScorer.java b/lucene/core/src/java/org/apache/lucene/search/LeafSimScorer.java
index b65d538..5d8d2e9 100644
--- a/lucene/core/src/java/org/apache/lucene/search/LeafSimScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/LeafSimScorer.java
@@ -18,23 +18,19 @@ package org.apache.lucene.search;
import java.io.IOException;
import java.util.Objects;
-
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.search.similarities.Similarity.SimScorer;
-/**
- * {@link SimScorer} on a specific {@link LeafReader}.
- */
+/** {@link SimScorer} on a specific {@link LeafReader}. */
public final class LeafSimScorer {
private final SimScorer scorer;
private final NumericDocValues norms;
- /**
- * Sole constructor: Score documents of {@code reader} with {@code scorer}.
- */
- public LeafSimScorer(SimScorer scorer, LeafReader reader, String field, boolean needsScores) throws IOException {
+ /** Sole constructor: Score documents of {@code reader} with {@code scorer}. */
+ public LeafSimScorer(SimScorer scorer, LeafReader reader, String field, boolean needsScores)
+ throws IOException {
this.scorer = Objects.requireNonNull(scorer);
norms = needsScores ? reader.getNormValues(field) : null;
}
@@ -54,18 +50,23 @@ public final class LeafSimScorer {
}
}
- /** Score the provided document assuming the given term document frequency.
- * This method must be called on non-decreasing sequences of doc ids.
- * @see SimScorer#score(float, long) */
+ /**
+ * Score the provided document assuming the given term document frequency. This method must be
+ * called on non-decreasing sequences of doc ids.
+ *
+ * @see SimScorer#score(float, long)
+ */
public float score(int doc, float freq) throws IOException {
return scorer.score(freq, getNormValue(doc));
}
- /** Explain the score for the provided document assuming the given term document frequency.
- * This method must be called on non-decreasing sequences of doc ids.
- * @see SimScorer#explain(Explanation, long) */
+ /**
+ * Explain the score for the provided document assuming the given term document frequency. This
+ * method must be called on non-decreasing sequences of doc ids.
+ *
... 61855 lines suppressed ...