You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by rm...@apache.org on 2021/03/23 16:46:52 UTC
[lucene] branch main updated: Revert "LUCENE-9856: fail precommit
on unused local variables (#34)"
This is an automated email from the ASF dual-hosted git repository.
rmuir pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/lucene.git
The following commit(s) were added to refs/heads/main by this push:
new e6c4956 Revert "LUCENE-9856: fail precommit on unused local variables (#34)"
e6c4956 is described below
commit e6c4956cf69b7d2b47297f589a6855fa3028396a
Author: Robert Muir <rm...@apache.org>
AuthorDate: Tue Mar 23 12:46:36 2021 -0400
Revert "LUCENE-9856: fail precommit on unused local variables (#34)"
This reverts commit 20dba278bbfc4fec8b53c8371eae982e3fa24b39.
---
build.gradle | 2 +-
gradle/generation/javacc.gradle | 15 --
.../dot.settings/org.eclipse.jdt.core.prefs | 2 -
gradle/validation/ecj-lint/ecj.javadocs.prefs | 53 ++-----
.../analysis/charfilter/HTMLStripCharFilter.java | 2 +-
.../analysis/charfilter/HTMLStripCharFilter.jflex | 2 +-
.../analysis/classic/ClassicTokenizerImpl.java | 2 +-
.../analysis/classic/ClassicTokenizerImpl.jflex | 2 +-
.../compound/hyphenation/PatternParser.java | 21 +++
.../analysis/email/UAX29URLEmailTokenizerImpl.java | 2 +-
.../email/UAX29URLEmailTokenizerImpl.jflex | 2 +-
.../org/apache/lucene/analysis/en/KStemmer.java | 4 +
.../hunspell/HunspellStemFilterFactory.java | 2 +
.../miscellaneous/ConcatenateGraphFilter.java | 2 +-
.../miscellaneous/FixBrokenOffsetsFilter.java | 3 +
.../miscellaneous/ProtectedTermFilterFactory.java | 4 +-
.../lucene/analysis/payloads/FloatEncoder.java | 2 +-
.../lucene/analysis/payloads/IdentityEncoder.java | 2 +-
.../lucene/analysis/payloads/IntegerEncoder.java | 2 +-
.../analysis/wikipedia/WikipediaTokenizerImpl.java | 2 +-
.../wikipedia/WikipediaTokenizerImpl.jflex | 2 +-
.../charfilter/TestHTMLStripCharFilter.java | 3 +-
.../analysis/cjk/TestCJKBigramFilterFactory.java | 11 +-
.../miscellaneous/TestWordDelimiterFilter.java | 12 ++
.../TestWordDelimiterGraphFilter.java | 10 ++
.../lucene/analysis/ngram/TestNGramTokenizer.java | 2 +-
.../pattern/TestSimplePatternSplitTokenizer.java | 12 +-
.../analysis/synonym/TestSynonymMapFilter.java | 8 +
.../wikipedia/TestWikipediaTokenizerFactory.java | 3 +-
.../icu/tokenattributes/ScriptAttributeImpl.java | 2 +-
.../ja/tokenattributes/BaseFormAttributeImpl.java | 2 +-
.../tokenattributes/InflectionAttributeImpl.java | 3 +-
.../tokenattributes/PartOfSpeechAttributeImpl.java | 3 +-
.../ja/tokenattributes/ReadingAttributeImpl.java | 2 +-
.../MorphosyntacticTagsAttributeImpl.java | 2 +-
.../tokenattributes/PartOfSpeechAttributeImpl.java | 3 +-
.../ko/tokenattributes/ReadingAttributeImpl.java | 2 +-
.../lucene/analysis/opennlp/OpenNLPTokenizer.java | 2 +
.../opennlp/TestOpenNLPTokenizerFactory.java | 14 +-
.../analysis/phonetic/DoubleMetaphoneFilter.java | 2 +
.../analysis/cn/smart/hhmm/BigramDictionary.java | 7 +-
.../stempel/src/java/org/egothor/stemmer/Trie.java | 3 +-
.../Lucene50CompressingTermVectorsReader.java | 4 +-
.../lucene60/Lucene60PointsReader.java | 3 +-
.../lucene70/Lucene70DocValuesConsumer.java | 3 +-
.../lucene70/Lucene70DocValuesProducer.java | 3 +-
.../lucene70/Lucene70NormsProducer.java | 14 ++
.../lucene70/Lucene70SegmentInfoFormat.java | 17 ++-
.../lucene80/Lucene80DocValuesConsumer.java | 2 +-
.../lucene80/Lucene80DocValuesProducer.java | 3 +-
.../lucene60/Lucene60PointsWriter.java | 3 +-
.../backward_codecs/lucene80/TestIndexedDISI.java | 5 +-
.../benchmark/byTask/feeds/EnwikiQueryMaker.java | 2 +-
.../byTask/feeds/FileBasedQueryMaker.java | 2 +-
.../benchmark/byTask/feeds/ReutersQueryMaker.java | 2 +-
.../benchmark/byTask/feeds/SimpleQueryMaker.java | 2 +-
.../benchmark/byTask/tasks/ReadTokensTask.java | 6 +
.../byTask/tasks/SearchTravRetHighlightTask.java | 1 -
.../Test20NewsgroupsClassification.java | 10 +-
.../lucene/codecs/blockterms/BlockTermsWriter.java | 3 +-
.../lucene/codecs/memory/DirectPostingsFormat.java | 20 +++
.../lucene/codecs/memory/FSTTermsReader.java | 2 +-
.../codecs/simpletext/SimpleTextBKDReader.java | 54 +++++++
.../codecs/simpletext/SimpleTextBKDWriter.java | 34 +++++
.../codecs/simpletext/SimpleTextPointsWriter.java | 8 +
.../codecs/simpletext/SimpleTextVectorWriter.java | 4 +-
.../analysis/standard/StandardTokenizerImpl.java | 2 +-
.../analysis/standard/StandardTokenizerImpl.jflex | 2 +-
.../tokenattributes/BytesTermAttributeImpl.java | 3 +-
.../tokenattributes/CharTermAttributeImpl.java | 2 +-
.../tokenattributes/FlagsAttributeImpl.java | 2 +-
.../tokenattributes/OffsetAttributeImpl.java | 2 +-
.../tokenattributes/PayloadAttributeImpl.java | 2 +-
.../PositionIncrementAttributeImpl.java | 2 +-
.../PositionLengthAttributeImpl.java | 3 +-
.../TermFrequencyAttributeImpl.java | 3 +-
.../tokenattributes/TypeAttributeImpl.java | 2 +-
.../org/apache/lucene/codecs/VectorWriter.java | 2 +
.../codecs/lucene86/Lucene86PointsReader.java | 3 +-
.../codecs/lucene86/Lucene86PointsWriter.java | 3 +-
.../codecs/lucene86/Lucene86SegmentInfoFormat.java | 7 +-
.../codecs/lucene90/Lucene90DocValuesConsumer.java | 2 +-
.../codecs/lucene90/Lucene90DocValuesProducer.java | 3 +-
.../codecs/lucene90/Lucene90FieldInfosFormat.java | 15 +-
.../codecs/lucene90/Lucene90VectorReader.java | 3 +
.../blocktree/IntersectTermsEnumFrame.java | 2 +
.../blocktree/Lucene90BlockTreeTermsReader.java | 7 +
.../lucene90/blocktree/SegmentTermsEnumFrame.java | 2 +
.../Lucene90CompressingTermVectorsReader.java | 4 +-
.../java/org/apache/lucene/index/CheckIndex.java | 169 +++++++++++++++++++++
.../java/org/apache/lucene/index/OrdinalMap.java | 1 +
.../org/apache/lucene/search/spans/SpanScorer.java | 6 +-
.../java/org/apache/lucene/store/IndexInput.java | 2 +-
.../java/org/apache/lucene/util/FixedBitSet.java | 2 +-
.../java/org/apache/lucene/util/OfflineSorter.java | 2 +
.../org/apache/lucene/util/SparseFixedBitSet.java | 2 +-
.../src/java/org/apache/lucene/util/fst/FST.java | 2 +
.../org/apache/lucene/util/hnsw/HnswGraph.java | 4 +-
.../apache/lucene/util/hnsw/HnswGraphBuilder.java | 2 +-
.../org/apache/lucene/util/hnsw/NeighborArray.java | 1 +
.../apache/lucene/util/packed/gen_BulkOperation.py | 7 +-
.../org/apache/lucene/analysis/TestStopFilter.java | 3 +-
.../lucene/codecs/lucene90/TestIndexedDISI.java | 5 +-
.../lucene/index/Test2BSortedDocValuesOrds.java | 1 +
.../test/org/apache/lucene/index/TestCodecs.java | 4 +
.../lucene/index/TestDemoParallelLeafReader.java | 8 +-
.../src/test/org/apache/lucene/index/TestDoc.java | 2 +-
.../org/apache/lucene/index/TestIndexSorting.java | 4 +
.../org/apache/lucene/index/TestIndexWriter.java | 1 +
.../apache/lucene/index/TestIndexWriterCommit.java | 2 +
.../lucene/index/TestIndexWriterExceptions.java | 2 +-
.../lucene/index/TestIndexWriterOnDiskFull.java | 3 +
.../apache/lucene/index/TestIndexWriterReader.java | 2 +
.../index/TestIndexWriterThreadsToSegments.java | 2 +
.../lucene/index/TestIndexWriterWithThreads.java | 2 +
.../lucene/index/TestNumericDocValuesUpdates.java | 7 +
.../test/org/apache/lucene/index/TestPayloads.java | 5 +
.../apache/lucene/index/TestStressIndexing.java | 8 +-
.../src/test/org/apache/lucene/index/TestTerm.java | 1 -
.../apache/lucene/index/TestTermVectorsReader.java | 1 +
.../apache/lucene/index/TestTermsHashPerField.java | 2 +-
.../lucene/index/TestTwoPhaseCommitTool.java | 4 +
.../org/apache/lucene/search/TestBoolean2.java | 3 +
.../search/TestControlledRealTimeReopenThread.java | 2 +-
.../apache/lucene/search/TestLongValuesSource.java | 1 +
.../apache/lucene/search/TestMatchesIterator.java | 2 +
.../lucene/search/TestSortedNumericSortField.java | 1 -
.../lucene/search/TestSortedSetSortField.java | 1 -
.../org/apache/lucene/search/TestTermQuery.java | 3 +
.../org/apache/lucene/search/TestTotalHits.java | 1 -
.../org/apache/lucene/search/TestWildcard.java | 1 -
.../org/apache/lucene/store/TestMultiMMap.java | 2 +-
.../lucene/store/TestNRTCachingDirectory.java | 3 +-
.../test/org/apache/lucene/util/bkd/TestBKD.java | 2 +
.../test/org/apache/lucene/util/fst/TestFSTs.java | 19 +--
.../test/org/apache/lucene/util/fst/TestUtil.java | 23 +++
.../lucene/expressions/ExpressionRescorer.java | 16 ++
.../lucene/expressions/js/JavascriptCompiler.java | 2 +-
.../expressions/TestExpressionValueSource.java | 1 -
.../org/apache/lucene/facet/FacetsCollector.java | 3 +-
.../java/org/apache/lucene/facet/FacetsConfig.java | 8 +
.../apache/lucene/facet/LongValueFacetCounts.java | 6 +-
.../lucene/facet/taxonomy/TaxonomyFacetLabels.java | 4 +
.../lucene/facet/TestLongValueFacetCounts.java | 3 +
.../lucene/facet/taxonomy/TestFacetLabel.java | 1 -
.../taxonomy/TestSearcherTaxonomyManager.java | 15 ++
.../directory/TestDirectoryTaxonomyReader.java | 3 +
.../grouping/TestAllGroupHeadsCollector.java | 1 +
.../lucene/search/grouping/TestGrouping.java | 1 +
.../apache/lucene/search/highlight/TokenGroup.java | 3 +-
.../uhighlight/TestUnifiedHighlighterMTQ.java | 2 +-
.../apache/lucene/search/join/TestJoinUtil.java | 8 +-
.../desktop/components/AnalysisPanelProvider.java | 2 +
.../dialog/menubar/OptimizeIndexDialogFactory.java | 5 +
.../luke/models/documents/TestDocumentsImpl.java | 18 +--
.../apache/lucene/index/memory/MemoryIndex.java | 9 +-
.../memory/TestMemoryIndexAgainstDirectory.java | 11 ++
.../apache/lucene/misc/util/fst/TestFSTsMisc.java | 1 +
.../lucene/monitor/TestBooleanTermExtractor.java | 1 +
.../function/valuesource/EnumFieldSource.java | 2 +
.../lucene/queries/intervals/IntervalQuery.java | 6 +-
.../apache/lucene/queries/mlt/MoreLikeThis.java | 17 ++-
.../lucene/queryparser/classic/QueryParser.java | 2 +-
.../classic/QueryParserTokenManager.java | 2 +-
.../lucene/queryparser/flexible/messages/NLS.java | 5 +-
.../flexible/standard/parser/ParseException.java | 52 +++----
.../standard/parser/StandardSyntaxParser.java | 2 +-
.../parser/StandardSyntaxParserTokenManager.java | 2 +-
.../queryparser/surround/parser/QueryParser.java | 2 +-
.../surround/parser/QueryParserTokenManager.java | 2 +-
.../apache/lucene/queryparser/xml/CoreParser.java | 2 +-
.../flexible/standard/TestMultiFieldQPHelper.java | 1 +
.../flexible/standard/TestQPHelper.java | 12 ++
.../apache/lucene/replicator/nrt/ReplicaNode.java | 2 +
.../TestIndexAndTaxonomyReplicationClient.java | 3 +-
.../lucene/replicator/nrt/SimplePrimaryNode.java | 3 +
.../lucene/replicator/nrt/SimpleReplicaNode.java | 2 +-
.../lucene/replicator/nrt/TestNRTReplication.java | 2 +
.../replicator/nrt/TestStressNRTReplication.java | 9 ++
.../lucene/sandbox/search/TermAutomatonQuery.java | 6 +-
.../lucene/sandbox/search/TermAutomatonScorer.java | 12 +-
.../search/TokenStreamToTermAutomatonQuery.java | 1 +
.../idversion/TestIDVersionPostingsFormat.java | 6 +
.../sandbox/search/TestTermAutomatonQuery.java | 2 +-
.../spatial/prefix/tree/DateRangePrefixTree.java | 1 +
.../org/apache/lucene/spatial/SpatialTestCase.java | 5 +
.../lucene/spatial/bbox/TestBBoxStrategy.java | 3 +-
.../prefix/TestRandomSpatialOpFuzzyPrefixTree.java | 2 +
.../prefix/TestTermQueryPrefixGridStrategy.java | 3 +-
.../prefix/tree/TestDateRangePrefixTree.java | 2 +
.../TestGeo3dShapeWGS84ModelRectRelation.java | 2 +
.../lucene/spatial3d/geom/GeoComplexPolygon.java | 10 ++
.../geom/GeoCompositeMembershipShape.java | 4 +-
.../lucene/spatial3d/geom/GeoDegeneratePath.java | 49 ++++++
.../lucene/spatial3d/geom/GeoMembershipShape.java | 2 +-
.../lucene/spatial3d/geom/GeoPolygonFactory.java | 4 +
.../lucene/spatial3d/geom/GeoStandardPath.java | 27 ++++
.../apache/lucene/spatial3d/geom/XYZBounds.java | 7 +
.../apache/lucene/spatial3d/TestGeo3DPoint.java | 8 +-
.../apache/lucene/spatial3d/geom/TestGeoBBox.java | 1 +
.../lucene/spatial3d/geom/TestGeoExactCircle.java | 24 ++-
.../apache/lucene/spatial3d/geom/TestGeoPath.java | 21 ++-
.../lucene/spatial3d/geom/TestGeoPolygon.java | 53 ++++---
.../spatial3d/geom/TestRandomGeoPolygon.java | 16 +-
.../suggest/analyzing/AnalyzingSuggester.java | 3 +-
.../suggest/analyzing/FreeTextSuggester.java | 3 +-
.../search/suggest/document/NRTSuggester.java | 2 +-
.../search/suggest/fst/FSTCompletionLookup.java | 2 +-
.../search/suggest/fst/WFSTCompletionLookup.java | 3 +-
.../search/suggest/jaspell/JaspellLookup.java | 3 +-
.../apache/lucene/analysis/CollationTestBase.java | 19 +++
.../codecs/asserting/AssertingDocValuesFormat.java | 6 +-
.../codecs/asserting/AssertingNormsFormat.java | 2 +-
.../lucene/index/BasePostingsFormatTestCase.java | 1 +
.../index/BaseTermVectorsFormatTestCase.java | 1 +
.../java/org/apache/lucene/index/RandomCodec.java | 22 +++
.../apache/lucene/index/RandomPostingsTester.java | 1 +
.../org/apache/lucene/util/LuceneTestCase.java | 23 +++
.../org/apache/lucene/util/TestExpectThrows.java | 37 ++---
219 files changed, 1180 insertions(+), 329 deletions(-)
diff --git a/build.gradle b/build.gradle
index 8a335a5..31d390e 100644
--- a/build.gradle
+++ b/build.gradle
@@ -86,7 +86,7 @@ ext {
scriptDepVersions = [
"apache-rat": "0.11",
"commons-codec": "1.13",
- "ecj": "3.25.0",
+ "ecj": "3.19.0",
"javacc": "7.0.4",
"jflex": "1.7.0",
"jgit": "5.9.0.202009080501-r",
diff --git a/gradle/generation/javacc.gradle b/gradle/generation/javacc.gradle
index c60c1ce..df1c7fa 100644
--- a/gradle/generation/javacc.gradle
+++ b/gradle/generation/javacc.gradle
@@ -95,12 +95,6 @@ def commonCleanups = { FileTree generatedFiles ->
text = text.replace(
"public void setDebugStream(java.io.PrintStream ds) { debugStream = ds; }",
"// (setDebugStream omitted).")
- text = text.replace(
- "public class QueryParserTokenManager ",
- '@SuppressWarnings("unused") public class QueryParserTokenManager ')
- text = text.replace(
- "public class StandardSyntaxParserTokenManager ",
- '@SuppressWarnings("unused") public class StandardSyntaxParserTokenManager ')
return text
})
}
@@ -129,9 +123,6 @@ configure(project(":lucene:queryparser")) {
text = text.replace(
"final private LookaheadSuccess jj_ls =",
"static final private LookaheadSuccess jj_ls =")
- text = text.replace(
- "public class QueryParser ",
- '@SuppressWarnings("unused") public class QueryParser ')
return text
})
}
@@ -154,9 +145,6 @@ configure(project(":lucene:queryparser")) {
text = text.replace(
"new java.util.ArrayList<int[]>",
"new java.util.ArrayList<>")
- text = text.replace(
- "public class QueryParser ",
- '@SuppressWarnings("unused") public class QueryParser ')
return text
})
}
@@ -233,9 +221,6 @@ configure(project(":lucene:queryparser")) {
text = text.replace(
"Collections.<QueryNode> singletonList",
"Collections.singletonList")
- text = text.replace(
- "public class StandardSyntaxParser ",
- '@SuppressWarnings("unused") public class StandardSyntaxParser ')
return text
})
}
diff --git a/gradle/ide/eclipse/dot.settings/org.eclipse.jdt.core.prefs b/gradle/ide/eclipse/dot.settings/org.eclipse.jdt.core.prefs
index a775421..8dba526 100644
--- a/gradle/ide/eclipse/dot.settings/org.eclipse.jdt.core.prefs
+++ b/gradle/ide/eclipse/dot.settings/org.eclipse.jdt.core.prefs
@@ -3,7 +3,6 @@ eclipse.preferences.version=1
org.eclipse.jdt.core.compiler.codegen.targetPlatform=11
org.eclipse.jdt.core.compiler.compliance=11
org.eclipse.jdt.core.compiler.doc.comment.support=enabled
-org.eclipse.jdt.core.compiler.problem.suppressOptionalErrors=enabled
org.eclipse.jdt.core.compiler.problem.annotationSuperInterface=error
org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
org.eclipse.jdt.core.compiler.problem.comparingIdentical=error
@@ -33,7 +32,6 @@ org.eclipse.jdt.core.compiler.problem.noEffectAssignment=error
org.eclipse.jdt.core.compiler.problem.noImplicitStringConversion=error
org.eclipse.jdt.core.compiler.problem.overridingPackageDefaultMethod=error
org.eclipse.jdt.core.compiler.problem.unusedImport=error
-org.eclipse.jdt.core.compiler.problem.unusedLocal=error
org.eclipse.jdt.core.compiler.problem.varargsArgumentNeedCast=error
org.eclipse.jdt.core.compiler.annotation.nullanalysis=disabled
org.eclipse.jdt.core.compiler.source=11
diff --git a/gradle/validation/ecj-lint/ecj.javadocs.prefs b/gradle/validation/ecj-lint/ecj.javadocs.prefs
index b85d32a..9757070 100644
--- a/gradle/validation/ecj-lint/ecj.javadocs.prefs
+++ b/gradle/validation/ecj-lint/ecj.javadocs.prefs
@@ -1,24 +1,13 @@
+#Sun Sep 23 20:55:03 EDT 2012
eclipse.preferences.version=1
-org.eclipse.jdt.core.compiler.annotation.inheritNullAnnotations=disabled
-org.eclipse.jdt.core.compiler.annotation.missingNonNullByDefaultAnnotation=ignore
org.eclipse.jdt.core.compiler.annotation.nonnull=org.eclipse.jdt.annotation.NonNull
-org.eclipse.jdt.core.compiler.annotation.nonnull.secondary=
org.eclipse.jdt.core.compiler.annotation.nonnullbydefault=org.eclipse.jdt.annotation.NonNullByDefault
-org.eclipse.jdt.core.compiler.annotation.nonnullbydefault.secondary=
+org.eclipse.jdt.core.compiler.annotation.nonnullisdefault=disabled
org.eclipse.jdt.core.compiler.annotation.nullable=org.eclipse.jdt.annotation.Nullable
-org.eclipse.jdt.core.compiler.annotation.nullable.secondary=
org.eclipse.jdt.core.compiler.annotation.nullanalysis=disabled
-org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
-org.eclipse.jdt.core.compiler.codegen.methodParameters=do not generate
org.eclipse.jdt.core.compiler.codegen.targetPlatform=11
-org.eclipse.jdt.core.compiler.codegen.unusedLocal=preserve
org.eclipse.jdt.core.compiler.compliance=11
-org.eclipse.jdt.core.compiler.debug.lineNumber=generate
-org.eclipse.jdt.core.compiler.debug.localVariable=generate
-org.eclipse.jdt.core.compiler.debug.sourceFile=generate
org.eclipse.jdt.core.compiler.doc.comment.support=enabled
-org.eclipse.jdt.core.compiler.problem.APILeak=error
-org.eclipse.jdt.core.compiler.problem.annotatedTypeArgumentToUnannotated=error
org.eclipse.jdt.core.compiler.problem.annotationSuperInterface=error
org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
org.eclipse.jdt.core.compiler.problem.autoboxing=ignore
@@ -29,9 +18,7 @@ org.eclipse.jdt.core.compiler.problem.deprecationInDeprecatedCode=disabled
org.eclipse.jdt.core.compiler.problem.deprecationWhenOverridingDeprecatedMethod=disabled
org.eclipse.jdt.core.compiler.problem.discouragedReference=error
org.eclipse.jdt.core.compiler.problem.emptyStatement=ignore
-org.eclipse.jdt.core.compiler.problem.enablePreviewFeatures=disabled
org.eclipse.jdt.core.compiler.problem.enumIdentifier=error
-org.eclipse.jdt.core.compiler.problem.explicitlyClosedAutoCloseable=ignore
org.eclipse.jdt.core.compiler.problem.fallthroughCase=ignore
org.eclipse.jdt.core.compiler.problem.fatalOptionalError=disabled
org.eclipse.jdt.core.compiler.problem.fieldHiding=ignore
@@ -50,10 +37,8 @@ org.eclipse.jdt.core.compiler.problem.invalidJavadocTagsNotVisibleRef=disabled
org.eclipse.jdt.core.compiler.problem.invalidJavadocTagsVisibility=private
org.eclipse.jdt.core.compiler.problem.localVariableHiding=ignore
org.eclipse.jdt.core.compiler.problem.methodWithConstructorName=error
-org.eclipse.jdt.core.compiler.problem.missingDefaultCase=ignore
-org.eclipse.jdt.core.compiler.problem.missingDeprecatedAnnotation=error
-org.eclipse.jdt.core.compiler.problem.missingEnumCaseDespiteDefault=disabled
-org.eclipse.jdt.core.compiler.problem.missingHashCodeMethod=error
+org.eclipse.jdt.core.compiler.problem.missingDeprecatedAnnotation=ignore
+org.eclipse.jdt.core.compiler.problem.missingHashCodeMethod=ignore
org.eclipse.jdt.core.compiler.problem.missingJavadocComments=ignore
org.eclipse.jdt.core.compiler.problem.missingJavadocCommentsOverriding=disabled
org.eclipse.jdt.core.compiler.problem.missingJavadocCommentsVisibility=public
@@ -69,63 +54,43 @@ org.eclipse.jdt.core.compiler.problem.missingSynchronizedOnInheritedMethod=ignor
org.eclipse.jdt.core.compiler.problem.noEffectAssignment=error
org.eclipse.jdt.core.compiler.problem.noImplicitStringConversion=error
org.eclipse.jdt.core.compiler.problem.nonExternalizedStringLiteral=ignore
-org.eclipse.jdt.core.compiler.problem.nonnullParameterAnnotationDropped=error
-org.eclipse.jdt.core.compiler.problem.nonnullTypeVariableFromLegacyInvocation=error
-org.eclipse.jdt.core.compiler.problem.nullAnnotationInferenceConflict=error
org.eclipse.jdt.core.compiler.problem.nullReference=ignore
-org.eclipse.jdt.core.compiler.problem.nullSpecViolation=error
-org.eclipse.jdt.core.compiler.problem.nullUncheckedConversion=error
org.eclipse.jdt.core.compiler.problem.overridingPackageDefaultMethod=error
org.eclipse.jdt.core.compiler.problem.parameterAssignment=ignore
-org.eclipse.jdt.core.compiler.problem.pessimisticNullAnalysisForFreeTypeVariables=error
org.eclipse.jdt.core.compiler.problem.possibleAccidentalBooleanAssignment=ignore
org.eclipse.jdt.core.compiler.problem.potentialNullReference=ignore
-org.eclipse.jdt.core.compiler.problem.potentiallyUnclosedCloseable=ignore
org.eclipse.jdt.core.compiler.problem.rawTypeReference=ignore
-org.eclipse.jdt.core.compiler.problem.redundantNullAnnotation=error
org.eclipse.jdt.core.compiler.problem.redundantNullCheck=ignore
org.eclipse.jdt.core.compiler.problem.redundantSpecificationOfTypeArguments=ignore
-org.eclipse.jdt.core.compiler.problem.redundantSuperinterface=error
+org.eclipse.jdt.core.compiler.problem.redundantSuperinterface=ignore
org.eclipse.jdt.core.compiler.problem.reportMethodCanBePotentiallyStatic=ignore
org.eclipse.jdt.core.compiler.problem.reportMethodCanBeStatic=ignore
-org.eclipse.jdt.core.compiler.problem.reportPreviewFeatures=error
org.eclipse.jdt.core.compiler.problem.specialParameterHidingField=disabled
org.eclipse.jdt.core.compiler.problem.staticAccessReceiver=ignore
-org.eclipse.jdt.core.compiler.problem.suppressOptionalErrors=enabled
+org.eclipse.jdt.core.compiler.problem.suppressOptionalErrors=disabled
org.eclipse.jdt.core.compiler.problem.suppressWarnings=enabled
-org.eclipse.jdt.core.compiler.problem.suppressWarningsNotFullyAnalysed=error
-org.eclipse.jdt.core.compiler.problem.syntacticNullAnalysisForFields=disabled
org.eclipse.jdt.core.compiler.problem.syntheticAccessEmulation=ignore
-org.eclipse.jdt.core.compiler.problem.terminalDeprecation=ignore
org.eclipse.jdt.core.compiler.problem.typeParameterHiding=ignore
org.eclipse.jdt.core.compiler.problem.unavoidableGenericTypeProblems=enabled
org.eclipse.jdt.core.compiler.problem.uncheckedTypeOperation=ignore
-org.eclipse.jdt.core.compiler.problem.unclosedCloseable=ignore
org.eclipse.jdt.core.compiler.problem.undocumentedEmptyBlock=ignore
org.eclipse.jdt.core.compiler.problem.unhandledWarningToken=ignore
-org.eclipse.jdt.core.compiler.problem.unlikelyCollectionMethodArgumentType=error
-org.eclipse.jdt.core.compiler.problem.unlikelyCollectionMethodArgumentTypeStrict=disabled
-org.eclipse.jdt.core.compiler.problem.unlikelyEqualsArgumentType=error
org.eclipse.jdt.core.compiler.problem.unnecessaryElse=ignore
org.eclipse.jdt.core.compiler.problem.unnecessaryTypeCheck=ignore
org.eclipse.jdt.core.compiler.problem.unqualifiedFieldAccess=ignore
-org.eclipse.jdt.core.compiler.problem.unstableAutoModuleName=ignore
org.eclipse.jdt.core.compiler.problem.unusedDeclaredThrownException=ignore
org.eclipse.jdt.core.compiler.problem.unusedDeclaredThrownExceptionExemptExceptionAndThrowable=enabled
org.eclipse.jdt.core.compiler.problem.unusedDeclaredThrownExceptionIncludeDocCommentReference=enabled
org.eclipse.jdt.core.compiler.problem.unusedDeclaredThrownExceptionWhenOverriding=disabled
-org.eclipse.jdt.core.compiler.problem.unusedExceptionParameter=ignore
org.eclipse.jdt.core.compiler.problem.unusedImport=error
-org.eclipse.jdt.core.compiler.problem.unusedLabel=error
-org.eclipse.jdt.core.compiler.problem.unusedLocal=error
+org.eclipse.jdt.core.compiler.problem.unusedLabel=ignore
+org.eclipse.jdt.core.compiler.problem.unusedLocal=ignore
org.eclipse.jdt.core.compiler.problem.unusedObjectAllocation=ignore
org.eclipse.jdt.core.compiler.problem.unusedParameter=ignore
org.eclipse.jdt.core.compiler.problem.unusedParameterIncludeDocCommentReference=enabled
org.eclipse.jdt.core.compiler.problem.unusedParameterWhenImplementingAbstract=disabled
org.eclipse.jdt.core.compiler.problem.unusedParameterWhenOverridingConcrete=disabled
-org.eclipse.jdt.core.compiler.problem.unusedPrivateMember=error
-org.eclipse.jdt.core.compiler.problem.unusedTypeParameter=ignore
+org.eclipse.jdt.core.compiler.problem.unusedPrivateMember=ignore
org.eclipse.jdt.core.compiler.problem.unusedWarningToken=ignore
org.eclipse.jdt.core.compiler.problem.varargsArgumentNeedCast=error
-org.eclipse.jdt.core.compiler.release=disabled
org.eclipse.jdt.core.compiler.source=11
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java
index 699de29..ebb92cd 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java
@@ -32,7 +32,7 @@ import org.apache.lucene.analysis.util.OpenStringBuilder;
/**
* A CharFilter that wraps another Reader and attempts to strip out HTML constructs.
*/
-@SuppressWarnings({"unused","fallthrough"})
+@SuppressWarnings("fallthrough")
public final class HTMLStripCharFilter extends BaseCharFilter {
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.jflex b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.jflex
index 1540df6..8b83de0 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.jflex
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.jflex
@@ -30,7 +30,7 @@ import org.apache.lucene.analysis.util.OpenStringBuilder;
/**
* A CharFilter that wraps another Reader and attempts to strip out HTML constructs.
*/
-@SuppressWarnings({"unused","fallthrough"})
+@SuppressWarnings("fallthrough")
%%
%unicode 9.0
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/classic/ClassicTokenizerImpl.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/classic/ClassicTokenizerImpl.java
index 55672d5..9f526a9 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/classic/ClassicTokenizerImpl.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/classic/ClassicTokenizerImpl.java
@@ -22,7 +22,7 @@ package org.apache.lucene.analysis.classic;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
/** This class implements the classic lucene StandardTokenizer up until 3.0 */
-@SuppressWarnings({"unused", "fallthrough"})
+@SuppressWarnings("fallthrough")
class ClassicTokenizerImpl {
/** This character denotes the end of file */
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/classic/ClassicTokenizerImpl.jflex b/lucene/analysis/common/src/java/org/apache/lucene/analysis/classic/ClassicTokenizerImpl.jflex
index c170962..798d9a5 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/classic/ClassicTokenizerImpl.jflex
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/classic/ClassicTokenizerImpl.jflex
@@ -22,7 +22,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
/**
* This class implements the classic lucene StandardTokenizer up until 3.0
*/
-@SuppressWarnings({"unused","fallthrough"})
+@SuppressWarnings("fallthrough")
%%
%class ClassicTokenizerImpl
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/PatternParser.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/PatternParser.java
index 886f3ff..33a762b 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/PatternParser.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/PatternParser.java
@@ -22,6 +22,7 @@ import javax.xml.parsers.SAXParserFactory;
import org.xml.sax.Attributes;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
+import org.xml.sax.SAXParseException;
import org.xml.sax.XMLReader;
import org.xml.sax.helpers.DefaultHandler;
@@ -340,4 +341,24 @@ public class PatternParser extends DefaultHandler {
word = readToken(chars);
}
}
+
+ /** Returns a string of the location. */
+ private String getLocationString(SAXParseException ex) {
+ StringBuilder str = new StringBuilder();
+
+ String systemId = ex.getSystemId();
+ if (systemId != null) {
+ int index = systemId.lastIndexOf('/');
+ if (index != -1) {
+ systemId = systemId.substring(index + 1);
+ }
+ str.append(systemId);
+ }
+ str.append(':');
+ str.append(ex.getLineNumber());
+ str.append(':');
+ str.append(ex.getColumnNumber());
+
+ return str.toString();
+ } // getLocationString(SAXParseException):String
}
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/email/UAX29URLEmailTokenizerImpl.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/email/UAX29URLEmailTokenizerImpl.java
index 20d9bc4..fa5b788 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/email/UAX29URLEmailTokenizerImpl.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/email/UAX29URLEmailTokenizerImpl.java
@@ -42,7 +42,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
* <li><EMOJI>: A sequence of Emoji characters</li>
* </ul>
*/
-@SuppressWarnings({"unused","fallthrough"})
+@SuppressWarnings("fallthrough")
public final class UAX29URLEmailTokenizerImpl {
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/email/UAX29URLEmailTokenizerImpl.jflex b/lucene/analysis/common/src/java/org/apache/lucene/analysis/email/UAX29URLEmailTokenizerImpl.jflex
index ec7ebc5..6f2028f 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/email/UAX29URLEmailTokenizerImpl.jflex
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/email/UAX29URLEmailTokenizerImpl.jflex
@@ -40,7 +40,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
* <li><EMOJI>: A sequence of Emoji characters</li>
* </ul>
*/
-@SuppressWarnings({"unused","fallthrough"})
+@SuppressWarnings("fallthrough")
%%
%unicode 9.0
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/KStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/KStemmer.java
index 204d7e5..e60bed8 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/KStemmer.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/KStemmer.java
@@ -619,6 +619,10 @@ public class KStemmer {
* CharArrayMap<String>(maxCacheSize,false); }
***/
+ private char finalChar() {
+ return word.charAt(k);
+ }
+
private char penultChar() {
return word.charAt(k - 1);
}
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellStemFilterFactory.java
index 63ae982..2fa2ec8 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellStemFilterFactory.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellStemFilterFactory.java
@@ -59,6 +59,8 @@ public class HunspellStemFilterFactory extends TokenFilterFactory implements Res
private static final String PARAM_DICTIONARY = "dictionary";
private static final String PARAM_AFFIX = "affix";
+ // NOTE: this one is currently unused?:
+ private static final String PARAM_RECURSION_CAP = "recursionCap";
private static final String PARAM_IGNORE_CASE = "ignoreCase";
private static final String PARAM_LONGEST_ONLY = "longestOnly";
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ConcatenateGraphFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ConcatenateGraphFilter.java
index 01517b1..bfdfb43 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ConcatenateGraphFilter.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ConcatenateGraphFilter.java
@@ -348,7 +348,7 @@ public final class ConcatenateGraphFilter extends TokenStream {
* @lucene.internal
*/
public static final class BytesRefBuilderTermAttributeImpl extends AttributeImpl
- implements BytesRefBuilderTermAttribute {
+ implements BytesRefBuilderTermAttribute, TermToBytesRefAttribute {
private final BytesRefBuilder bytes = new BytesRefBuilder();
private transient CharsRefBuilder charsRef;
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/FixBrokenOffsetsFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/FixBrokenOffsetsFilter.java
index 426d9d8..1b758f5 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/FixBrokenOffsetsFilter.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/FixBrokenOffsetsFilter.java
@@ -31,6 +31,7 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
public final class FixBrokenOffsetsFilter extends TokenFilter {
private int lastStartOffset;
+ private int lastEndOffset;
private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
@@ -57,6 +58,7 @@ public final class FixBrokenOffsetsFilter extends TokenFilter {
public void reset() throws IOException {
super.reset();
lastStartOffset = 0;
+ lastEndOffset = 0;
}
private void fixOffsets() {
@@ -70,5 +72,6 @@ public final class FixBrokenOffsetsFilter extends TokenFilter {
}
offsetAtt.setOffset(startOffset, endOffset);
lastStartOffset = startOffset;
+ lastEndOffset = endOffset;
}
}
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ProtectedTermFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ProtectedTermFilterFactory.java
index b0d7f9b..71f4e1a 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ProtectedTermFilterFactory.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ProtectedTermFilterFactory.java
@@ -31,6 +31,7 @@ import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.TokenFilterFactory;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.util.ResourceLoader;
+import org.apache.lucene.util.ResourceLoaderAware;
/**
* Factory for a {@link ProtectedTermFilter}
@@ -81,7 +82,8 @@ import org.apache.lucene.util.ResourceLoader;
* @since 7.4.0
* @lucene.spi {@value #NAME}
*/
-public class ProtectedTermFilterFactory extends ConditionalTokenFilterFactory {
+public class ProtectedTermFilterFactory extends ConditionalTokenFilterFactory
+ implements ResourceLoaderAware {
public static final String NAME = "protectedTerm";
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/FloatEncoder.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/FloatEncoder.java
index 5cf3319..b90e137 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/FloatEncoder.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/FloatEncoder.java
@@ -23,7 +23,7 @@ import org.apache.lucene.util.BytesRef;
*
* @see org.apache.lucene.analysis.payloads.PayloadHelper#encodeFloat(float, byte[], int)
*/
-public class FloatEncoder extends AbstractEncoder {
+public class FloatEncoder extends AbstractEncoder implements PayloadEncoder {
@Override
public BytesRef encode(char[] buffer, int offset, int length) {
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/IdentityEncoder.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/IdentityEncoder.java
index 96431dd..22dd4a1 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/IdentityEncoder.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/IdentityEncoder.java
@@ -23,7 +23,7 @@ import java.nio.charset.StandardCharsets;
import org.apache.lucene.util.BytesRef;
/** Does nothing other than convert the char array to a byte array using the specified encoding. */
-public class IdentityEncoder extends AbstractEncoder {
+public class IdentityEncoder extends AbstractEncoder implements PayloadEncoder {
protected Charset charset = StandardCharsets.UTF_8;
public IdentityEncoder() {}
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/IntegerEncoder.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/IntegerEncoder.java
index 476718f..01ac026 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/IntegerEncoder.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/IntegerEncoder.java
@@ -24,7 +24,7 @@ import org.apache.lucene.util.BytesRef;
*
* <p>See {@link org.apache.lucene.analysis.payloads.PayloadHelper#encodeInt(int, byte[], int)}.
*/
-public class IntegerEncoder extends AbstractEncoder {
+public class IntegerEncoder extends AbstractEncoder implements PayloadEncoder {
@Override
public BytesRef encode(char[] buffer, int offset, int length) {
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java
index 3e97b54..93886bf 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java
@@ -22,7 +22,7 @@ package org.apache.lucene.analysis.wikipedia;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
/** JFlex-generated tokenizer that is aware of Wikipedia syntax. */
-@SuppressWarnings({"unused", "fallthrough"})
+@SuppressWarnings("fallthrough")
class WikipediaTokenizerImpl {
/** This character denotes the end of file */
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.jflex b/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.jflex
index 4b4c677..3ac31e4 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.jflex
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.jflex
@@ -22,7 +22,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
/**
* JFlex-generated tokenizer that is aware of Wikipedia syntax.
*/
-@SuppressWarnings({"unused","fallthrough"})
+@SuppressWarnings("fallthrough")
%%
%class WikipediaTokenizerImpl
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestHTMLStripCharFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestHTMLStripCharFilter.java
index d91baa6..76aa70b 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestHTMLStripCharFilter.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestHTMLStripCharFilter.java
@@ -358,8 +358,9 @@ public class TestHTMLStripCharFilter extends BaseTokenStreamTestCase {
static void assertLegalOffsets(String in) throws Exception {
int length = in.length();
HTMLStripCharFilter reader = new HTMLStripCharFilter(new BufferedReader(new StringReader(in)));
+ int ch = 0;
int off = 0;
- while (reader.read() != -1) {
+ while ((ch = reader.read()) != -1) {
int correction = reader.correctOffset(off);
assertTrue(
"invalid offset correction: " + off + "->" + correction + " for doc of length: " + length,
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKBigramFilterFactory.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKBigramFilterFactory.java
index 2bf8bed..b7bbb24 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKBigramFilterFactory.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKBigramFilterFactory.java
@@ -56,10 +56,11 @@ public class TestCJKBigramFilterFactory extends BaseTokenStreamFactoryTestCase {
/** Test that bogus arguments result in exception */
public void testBogusArguments() throws Exception {
- expectThrows(
- IllegalArgumentException.class,
- () -> {
- tokenFilterFactory("CJKBigram", "bogusArg", "bogusValue");
- });
+ IllegalArgumentException expected =
+ expectThrows(
+ IllegalArgumentException.class,
+ () -> {
+ tokenFilterFactory("CJKBigram", "bogusArg", "bogusValue");
+ });
}
}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java
index 9f8ab64..013c254 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java
@@ -673,4 +673,16 @@ public class TestWordDelimiterFilter extends BaseTokenStreamTestCase {
null,
false);
}
+
+ private Analyzer getAnalyzer(final int flags) {
+ return new Analyzer() {
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName) {
+ Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+ return new TokenStreamComponents(
+ tokenizer, new WordDelimiterFilter(tokenizer, flags, null));
+ }
+ };
+ }
}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterGraphFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterGraphFilter.java
index f0fe89c..670faf6 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterGraphFilter.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterGraphFilter.java
@@ -800,6 +800,16 @@ public class TestWordDelimiterGraphFilter extends BaseTokenStreamTestCase {
return (flags & flag) != 0;
}
+ private static boolean isEnglishPossessive(String text, int pos) {
+ if (pos > 2) {
+ if ((text.charAt(pos - 1) == 's' || text.charAt(pos - 1) == 'S')
+ && (pos == text.length() || text.charAt(pos) != '-')) {
+ text = text.substring(0, text.length() - 2);
+ }
+ }
+ return true;
+ }
+
private static class WordPart {
final String part;
final int startOffset;
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/TestNGramTokenizer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/TestNGramTokenizer.java
index 5908cdf..f0b1066 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/TestNGramTokenizer.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/TestNGramTokenizer.java
@@ -44,7 +44,7 @@ public class TestNGramTokenizer extends BaseTokenStreamTestCase {
expectThrows(
IllegalArgumentException.class,
() -> {
- new NGramTokenizer(2, 1);
+ NGramTokenizer tok = new NGramTokenizer(2, 1);
});
}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestSimplePatternSplitTokenizer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestSimplePatternSplitTokenizer.java
index 82fc2ec..2cafcfc 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestSimplePatternSplitTokenizer.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestSimplePatternSplitTokenizer.java
@@ -70,7 +70,7 @@ public class TestSimplePatternSplitTokenizer extends BaseTokenStreamTestCase {
public void testNoTokens() throws Exception {
Tokenizer t = new SimplePatternSplitTokenizer(".*");
- t.getAttribute(CharTermAttribute.class);
+ CharTermAttribute termAtt = t.getAttribute(CharTermAttribute.class);
String s;
while (true) {
s = TestUtil.randomUnicodeString(random());
@@ -95,7 +95,7 @@ public class TestSimplePatternSplitTokenizer extends BaseTokenStreamTestCase {
public void testSplitSingleCharWhitespace() throws Exception {
Tokenizer t = new SimplePatternSplitTokenizer("[ \t\r\n]");
- t.getAttribute(CharTermAttribute.class);
+ CharTermAttribute termAtt = t.getAttribute(CharTermAttribute.class);
t.setReader(new StringReader("a \tb c"));
assertTokenStreamContents(
t, new String[] {"a", "b", "c"}, new int[] {0, 3, 7}, new int[] {1, 4, 8});
@@ -103,7 +103,7 @@ public class TestSimplePatternSplitTokenizer extends BaseTokenStreamTestCase {
public void testSplitMultiCharWhitespace() throws Exception {
Tokenizer t = new SimplePatternSplitTokenizer("[ \t\r\n]*");
- t.getAttribute(CharTermAttribute.class);
+ CharTermAttribute termAtt = t.getAttribute(CharTermAttribute.class);
t.setReader(new StringReader("a \tb c"));
assertTokenStreamContents(
t, new String[] {"a", "b", "c"}, new int[] {0, 3, 7}, new int[] {1, 4, 8});
@@ -111,21 +111,21 @@ public class TestSimplePatternSplitTokenizer extends BaseTokenStreamTestCase {
public void testLeadingNonToken() throws Exception {
Tokenizer t = new SimplePatternSplitTokenizer("[ \t\r\n]*");
- t.getAttribute(CharTermAttribute.class);
+ CharTermAttribute termAtt = t.getAttribute(CharTermAttribute.class);
t.setReader(new StringReader(" a c"));
assertTokenStreamContents(t, new String[] {"a", "c"}, new int[] {4, 6}, new int[] {5, 7});
}
public void testTrailingNonToken() throws Exception {
Tokenizer t = new SimplePatternSplitTokenizer("[ \t\r\n]*");
- t.getAttribute(CharTermAttribute.class);
+ CharTermAttribute termAtt = t.getAttribute(CharTermAttribute.class);
t.setReader(new StringReader("a c "));
assertTokenStreamContents(t, new String[] {"a", "c"}, new int[] {0, 2}, new int[] {1, 3});
}
public void testEmptyStringPatternOneMatch() throws Exception {
Tokenizer t = new SimplePatternSplitTokenizer("a*");
- t.getAttribute(CharTermAttribute.class);
+ CharTermAttribute termAtt = t.getAttribute(CharTermAttribute.class);
t.setReader(new StringReader("bbab"));
assertTokenStreamContents(t, new String[] {"bb", "b"}, new int[] {0, 3}, new int[] {2, 4});
}
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMapFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMapFilter.java
index 83e7f51..abd3b89 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMapFilter.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMapFilter.java
@@ -62,6 +62,14 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase {
b.add(inputCharsRef.get(), outputCharsRef.get(), keepOrig);
}
+ private void assertEquals(CharTermAttribute term, String expected) {
+ assertEquals(expected.length(), term.length());
+ final char[] buffer = term.buffer();
+ for (int chIDX = 0; chIDX < expected.length(); chIDX++) {
+ assertEquals(expected.charAt(chIDX), buffer[chIDX]);
+ }
+ }
+
// For the output string: separate positions with a space,
// and separate multiple tokens at each position with a
// /. If a token should have end offset != the input
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/TestWikipediaTokenizerFactory.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/TestWikipediaTokenizerFactory.java
index ae2491f..b654880 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/TestWikipediaTokenizerFactory.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/TestWikipediaTokenizerFactory.java
@@ -156,7 +156,8 @@ public class TestWikipediaTokenizerFactory extends BaseTokenStreamFactoryTestCas
expectThrows(
IllegalArgumentException.class,
() -> {
- tokenizerFactory(WIKIPEDIA, TOKEN_OUTPUT, "-1").create(newAttributeFactory());
+ Tokenizer tf =
+ tokenizerFactory(WIKIPEDIA, TOKEN_OUTPUT, "-1").create(newAttributeFactory());
});
assertTrue(
expected
diff --git a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/tokenattributes/ScriptAttributeImpl.java b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/tokenattributes/ScriptAttributeImpl.java
index 263656f..d2a389b 100644
--- a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/tokenattributes/ScriptAttributeImpl.java
+++ b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/tokenattributes/ScriptAttributeImpl.java
@@ -25,7 +25,7 @@ import org.apache.lucene.util.AttributeReflector;
*
* @lucene.experimental
*/
-public class ScriptAttributeImpl extends AttributeImpl implements ScriptAttribute {
+public class ScriptAttributeImpl extends AttributeImpl implements ScriptAttribute, Cloneable {
private int code = UScript.COMMON;
/** Initializes this attribute with <code>UScript.COMMON</code> */
diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/tokenattributes/BaseFormAttributeImpl.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/tokenattributes/BaseFormAttributeImpl.java
index 845c318..e1a6e43 100644
--- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/tokenattributes/BaseFormAttributeImpl.java
+++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/tokenattributes/BaseFormAttributeImpl.java
@@ -21,7 +21,7 @@ import org.apache.lucene.util.AttributeImpl;
import org.apache.lucene.util.AttributeReflector;
/** Attribute for {@link Token#getBaseForm()}. */
-public class BaseFormAttributeImpl extends AttributeImpl implements BaseFormAttribute {
+public class BaseFormAttributeImpl extends AttributeImpl implements BaseFormAttribute, Cloneable {
private Token token;
@Override
diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/tokenattributes/InflectionAttributeImpl.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/tokenattributes/InflectionAttributeImpl.java
index 36a04dc..21df3e9 100644
--- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/tokenattributes/InflectionAttributeImpl.java
+++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/tokenattributes/InflectionAttributeImpl.java
@@ -22,7 +22,8 @@ import org.apache.lucene.util.AttributeImpl;
import org.apache.lucene.util.AttributeReflector;
/** Attribute for Kuromoji inflection data. */
-public class InflectionAttributeImpl extends AttributeImpl implements InflectionAttribute {
+public class InflectionAttributeImpl extends AttributeImpl
+ implements InflectionAttribute, Cloneable {
private Token token;
@Override
diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/tokenattributes/PartOfSpeechAttributeImpl.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/tokenattributes/PartOfSpeechAttributeImpl.java
index 464adbd..c2f54ff 100644
--- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/tokenattributes/PartOfSpeechAttributeImpl.java
+++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/tokenattributes/PartOfSpeechAttributeImpl.java
@@ -22,7 +22,8 @@ import org.apache.lucene.util.AttributeImpl;
import org.apache.lucene.util.AttributeReflector;
/** Attribute for {@link Token#getPartOfSpeech()}. */
-public class PartOfSpeechAttributeImpl extends AttributeImpl implements PartOfSpeechAttribute {
+public class PartOfSpeechAttributeImpl extends AttributeImpl
+ implements PartOfSpeechAttribute, Cloneable {
private Token token;
@Override
diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/tokenattributes/ReadingAttributeImpl.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/tokenattributes/ReadingAttributeImpl.java
index bada242..c71aa64 100644
--- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/tokenattributes/ReadingAttributeImpl.java
+++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/tokenattributes/ReadingAttributeImpl.java
@@ -22,7 +22,7 @@ import org.apache.lucene.util.AttributeImpl;
import org.apache.lucene.util.AttributeReflector;
/** Attribute for Kuromoji reading data */
-public class ReadingAttributeImpl extends AttributeImpl implements ReadingAttribute {
+public class ReadingAttributeImpl extends AttributeImpl implements ReadingAttribute, Cloneable {
private Token token;
@Override
diff --git a/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorphosyntacticTagsAttributeImpl.java b/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorphosyntacticTagsAttributeImpl.java
index a749045..0c3ad9c 100644
--- a/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorphosyntacticTagsAttributeImpl.java
+++ b/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorphosyntacticTagsAttributeImpl.java
@@ -27,7 +27,7 @@ import org.apache.lucene.util.AttributeReflector;
* @see MorphosyntacticTagsAttribute
*/
public class MorphosyntacticTagsAttributeImpl extends AttributeImpl
- implements MorphosyntacticTagsAttribute {
+ implements MorphosyntacticTagsAttribute, Cloneable {
/** Initializes this attribute with no tags */
public MorphosyntacticTagsAttributeImpl() {}
diff --git a/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/tokenattributes/PartOfSpeechAttributeImpl.java b/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/tokenattributes/PartOfSpeechAttributeImpl.java
index a1b04cb..a46b888 100644
--- a/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/tokenattributes/PartOfSpeechAttributeImpl.java
+++ b/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/tokenattributes/PartOfSpeechAttributeImpl.java
@@ -28,7 +28,8 @@ import org.apache.lucene.util.AttributeReflector;
*
* @lucene.experimental
*/
-public class PartOfSpeechAttributeImpl extends AttributeImpl implements PartOfSpeechAttribute {
+public class PartOfSpeechAttributeImpl extends AttributeImpl
+ implements PartOfSpeechAttribute, Cloneable {
private Token token;
@Override
diff --git a/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/tokenattributes/ReadingAttributeImpl.java b/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/tokenattributes/ReadingAttributeImpl.java
index e3a6c0d..b6467a0 100644
--- a/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/tokenattributes/ReadingAttributeImpl.java
+++ b/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/tokenattributes/ReadingAttributeImpl.java
@@ -25,7 +25,7 @@ import org.apache.lucene.util.AttributeReflector;
*
* @lucene.experimental
*/
-public class ReadingAttributeImpl extends AttributeImpl implements ReadingAttribute {
+public class ReadingAttributeImpl extends AttributeImpl implements ReadingAttribute, Cloneable {
private Token token;
@Override
diff --git a/lucene/analysis/opennlp/src/java/org/apache/lucene/analysis/opennlp/OpenNLPTokenizer.java b/lucene/analysis/opennlp/src/java/org/apache/lucene/analysis/opennlp/OpenNLPTokenizer.java
index 134fa25..d465766 100644
--- a/lucene/analysis/opennlp/src/java/org/apache/lucene/analysis/opennlp/OpenNLPTokenizer.java
+++ b/lucene/analysis/opennlp/src/java/org/apache/lucene/analysis/opennlp/OpenNLPTokenizer.java
@@ -43,6 +43,7 @@ public final class OpenNLPTokenizer extends SegmentingTokenizerBase {
private int termNum = 0;
private int sentenceStart = 0;
+ private NLPSentenceDetectorOp sentenceOp = null;
private NLPTokenizerOp tokenizerOp = null;
public OpenNLPTokenizer(
@@ -53,6 +54,7 @@ public final class OpenNLPTokenizer extends SegmentingTokenizerBase {
throw new IllegalArgumentException(
"OpenNLPTokenizer: both a Sentence Detector and a Tokenizer are required");
}
+ this.sentenceOp = sentenceOp;
this.tokenizerOp = tokenizerOp;
}
diff --git a/lucene/analysis/opennlp/src/test/org/apache/lucene/analysis/opennlp/TestOpenNLPTokenizerFactory.java b/lucene/analysis/opennlp/src/test/org/apache/lucene/analysis/opennlp/TestOpenNLPTokenizerFactory.java
index 315af01..4573ce7 100644
--- a/lucene/analysis/opennlp/src/test/org/apache/lucene/analysis/opennlp/TestOpenNLPTokenizerFactory.java
+++ b/lucene/analysis/opennlp/src/test/org/apache/lucene/analysis/opennlp/TestOpenNLPTokenizerFactory.java
@@ -82,9 +82,10 @@ public class TestOpenNLPTokenizerFactory extends BaseTokenStreamTestCase {
expectThrows(
IllegalArgumentException.class,
() -> {
- CustomAnalyzer.builder(new ClasspathResourceLoader(getClass()))
- .withTokenizer("opennlp", "tokenizerModel", "en-test-tokenizer.bin")
- .build();
+ CustomAnalyzer analyzer =
+ CustomAnalyzer.builder(new ClasspathResourceLoader(getClass()))
+ .withTokenizer("opennlp", "tokenizerModel", "en-test-tokenizer.bin")
+ .build();
});
assertTrue(
expected.getMessage().contains("Configuration Error: missing parameter 'sentenceModel'"));
@@ -96,9 +97,10 @@ public class TestOpenNLPTokenizerFactory extends BaseTokenStreamTestCase {
expectThrows(
IllegalArgumentException.class,
() -> {
- CustomAnalyzer.builder(new ClasspathResourceLoader(getClass()))
- .withTokenizer("opennlp", "sentenceModel", "en-test-sent.bin")
- .build();
+ CustomAnalyzer analyzer =
+ CustomAnalyzer.builder(new ClasspathResourceLoader(getClass()))
+ .withTokenizer("opennlp", "sentenceModel", "en-test-sent.bin")
+ .build();
});
assertTrue(
expected.getMessage().contains("Configuration Error: missing parameter 'tokenizerModel'"));
diff --git a/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/DoubleMetaphoneFilter.java b/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/DoubleMetaphoneFilter.java
index e1f267a..50ce615 100644
--- a/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/DoubleMetaphoneFilter.java
+++ b/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/DoubleMetaphoneFilter.java
@@ -27,6 +27,8 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
/** Filter for DoubleMetaphone (supporting secondary codes) */
public final class DoubleMetaphoneFilter extends TokenFilter {
+ private static final String TOKEN_TYPE = "DoubleMetaphone";
+
private final LinkedList<State> remainingTokens = new LinkedList<>();
private final DoubleMetaphone encoder = new DoubleMetaphone();
private final boolean inject;
diff --git a/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/BigramDictionary.java b/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/BigramDictionary.java
index c0014b4..766ea5e 100644
--- a/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/BigramDictionary.java
+++ b/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/BigramDictionary.java
@@ -53,6 +53,8 @@ class BigramDictionary extends AbstractDictionary {
private int max = 0;
+ private int repeat = 0;
+
// static Logger log = Logger.getLogger(BigramDictionary.class);
public static synchronized BigramDictionary getInstance() {
@@ -141,7 +143,7 @@ class BigramDictionary extends AbstractDictionary {
*/
public void loadFromFile(String dctFilePath) throws IOException {
- int i, cnt, length;
+ int i, cnt, length, total = 0;
// The file only counted 6763 Chinese characters plus 5 reserved slots 3756~3760.
// The 3756th is used (as a header) to store information.
int[] buffer = new int[3];
@@ -161,6 +163,7 @@ class BigramDictionary extends AbstractDictionary {
if (cnt <= 0) {
continue;
}
+ total += cnt;
int j = 0;
while (j < cnt) {
dctFile.read(intBuffer);
@@ -229,11 +232,13 @@ class BigramDictionary extends AbstractDictionary {
if (hash2 < 0) hash2 = PRIME_BIGRAM_LENGTH + hash2;
int index = hash1;
int i = 1;
+ repeat++;
while (bigramHashTable[index] != 0
&& bigramHashTable[index] != hashId
&& i < PRIME_BIGRAM_LENGTH) {
index = (hash1 + i * hash2) % PRIME_BIGRAM_LENGTH;
i++;
+ repeat++;
if (i > max) max = i;
}
// System.out.println(i - 1);
diff --git a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Trie.java b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Trie.java
index f09656e..471cb98 100644
--- a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Trie.java
+++ b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Trie.java
@@ -228,6 +228,7 @@ public class Trie {
int cmd = -1;
StrEnum e = new StrEnum(key, forward);
Character ch = null;
+ Character aux = null;
for (int i = 0; i < key.length(); ) {
ch = e.next();
@@ -242,7 +243,7 @@ public class Trie {
for (int skip = c.skip; skip > 0; skip--) {
if (i < key.length()) {
- e.next();
+ aux = e.next();
} else {
return null;
}
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/compressing/Lucene50CompressingTermVectorsReader.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/compressing/Lucene50CompressingTermVectorsReader.java
index c2f975d..6d8a248 100644
--- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/compressing/Lucene50CompressingTermVectorsReader.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene50/compressing/Lucene50CompressingTermVectorsReader.java
@@ -16,6 +16,7 @@
*/
package org.apache.lucene.backward_codecs.lucene50.compressing;
+import java.io.Closeable;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
@@ -57,7 +58,8 @@ import org.apache.lucene.util.packed.PackedInts;
*
* @lucene.experimental
*/
-public final class Lucene50CompressingTermVectorsReader extends TermVectorsReader {
+public final class Lucene50CompressingTermVectorsReader extends TermVectorsReader
+ implements Closeable {
// hard limit on the maximum number of documents per chunk
static final int MAX_DOCUMENTS_PER_CHUNK = 128;
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene60/Lucene60PointsReader.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene60/Lucene60PointsReader.java
index f755057..75ca01c 100644
--- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene60/Lucene60PointsReader.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene60/Lucene60PointsReader.java
@@ -16,6 +16,7 @@
*/
package org.apache.lucene.backward_codecs.lucene60;
+import java.io.Closeable;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
@@ -31,7 +32,7 @@ import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.bkd.BKDReader;
/** Reads point values previously written with Lucene60PointsWriter */
-public class Lucene60PointsReader extends PointsReader {
+public class Lucene60PointsReader extends PointsReader implements Closeable {
final IndexInput dataIn;
final SegmentReadState readState;
final Map<Integer, BKDReader> readers = new HashMap<>();
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene70/Lucene70DocValuesConsumer.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene70/Lucene70DocValuesConsumer.java
index d1173b5..a8c1bad 100644
--- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene70/Lucene70DocValuesConsumer.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene70/Lucene70DocValuesConsumer.java
@@ -20,6 +20,7 @@ import static org.apache.lucene.backward_codecs.lucene70.Lucene70DocValuesFormat
import static org.apache.lucene.backward_codecs.lucene70.Lucene70DocValuesFormat.NUMERIC_BLOCK_SHIFT;
import static org.apache.lucene.backward_codecs.lucene70.Lucene70DocValuesFormat.NUMERIC_BLOCK_SIZE;
+import java.io.Closeable; // javadocs
import java.io.IOException;
import java.util.Arrays;
import java.util.HashMap;
@@ -53,7 +54,7 @@ import org.apache.lucene.util.packed.DirectMonotonicWriter;
import org.apache.lucene.util.packed.DirectWriter;
/** writer for {@link Lucene70DocValuesFormat} */
-final class Lucene70DocValuesConsumer extends DocValuesConsumer {
+final class Lucene70DocValuesConsumer extends DocValuesConsumer implements Closeable {
IndexOutput data, meta;
final int maxDoc;
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene70/Lucene70DocValuesProducer.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene70/Lucene70DocValuesProducer.java
index a52989d..2e2f20d 100644
--- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene70/Lucene70DocValuesProducer.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene70/Lucene70DocValuesProducer.java
@@ -16,6 +16,7 @@
*/
package org.apache.lucene.backward_codecs.lucene70;
+import java.io.Closeable;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
@@ -48,7 +49,7 @@ import org.apache.lucene.util.packed.DirectMonotonicReader;
import org.apache.lucene.util.packed.DirectReader;
/** reader for {@link Lucene70DocValuesFormat} */
-final class Lucene70DocValuesProducer extends DocValuesProducer {
+final class Lucene70DocValuesProducer extends DocValuesProducer implements Closeable {
private final Map<String, NumericEntry> numerics = new HashMap<>();
private final Map<String, BinaryEntry> binaries = new HashMap<>();
private final Map<String, SortedEntry> sorted = new HashMap<>();
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene70/Lucene70NormsProducer.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene70/Lucene70NormsProducer.java
index ca4c015..1446199 100644
--- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene70/Lucene70NormsProducer.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene70/Lucene70NormsProducer.java
@@ -327,6 +327,20 @@ final class Lucene70NormsProducer extends NormsProducer implements Cloneable {
};
}
+ private IndexInput getDisiInput2(FieldInfo field, NormsEntry entry) throws IOException {
+ IndexInput slice = null;
+ if (merging) {
+ slice = disiInputs.get(field.number);
+ }
+ if (slice == null) {
+ slice = data.slice("docs", entry.docsWithFieldOffset, entry.docsWithFieldLength);
+ if (merging) {
+ disiInputs.put(field.number, slice);
+ }
+ }
+ return slice;
+ }
+
@Override
public NumericDocValues getNorms(FieldInfo field) throws IOException {
final NormsEntry entry = norms.get(field.number);
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene70/Lucene70SegmentInfoFormat.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene70/Lucene70SegmentInfoFormat.java
index 2d96dba..d67992d 100644
--- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene70/Lucene70SegmentInfoFormat.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene70/Lucene70SegmentInfoFormat.java
@@ -106,14 +106,17 @@ public class Lucene70SegmentInfoFormat extends SegmentInfoFormat {
Throwable priorE = null;
SegmentInfo si = null;
try {
- CodecUtil.checkIndexHeader(
- input,
- Lucene70SegmentInfoFormat.CODEC_NAME,
- Lucene70SegmentInfoFormat.VERSION_START,
- Lucene70SegmentInfoFormat.VERSION_CURRENT,
- segmentID,
- "");
+ int format =
+ CodecUtil.checkIndexHeader(
+ input,
+ Lucene70SegmentInfoFormat.CODEC_NAME,
+ Lucene70SegmentInfoFormat.VERSION_START,
+ Lucene70SegmentInfoFormat.VERSION_CURRENT,
+ segmentID,
+ "");
+
si = parseSegmentInfo(dir, input, segment, segmentID);
+
} catch (Throwable exception) {
priorE = exception;
} finally {
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene80/Lucene80DocValuesConsumer.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene80/Lucene80DocValuesConsumer.java
index 12d97c0..bfb0335 100644
--- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene80/Lucene80DocValuesConsumer.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene80/Lucene80DocValuesConsumer.java
@@ -62,7 +62,7 @@ import org.apache.lucene.util.packed.DirectMonotonicWriter;
import org.apache.lucene.util.packed.DirectWriter;
/** writer for {@link Lucene80DocValuesFormat} */
-final class Lucene80DocValuesConsumer extends DocValuesConsumer {
+final class Lucene80DocValuesConsumer extends DocValuesConsumer implements Closeable {
final Lucene80DocValuesFormat.Mode mode;
IndexOutput data, meta;
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene80/Lucene80DocValuesProducer.java b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene80/Lucene80DocValuesProducer.java
index 3bff879..67e69ee 100644
--- a/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene80/Lucene80DocValuesProducer.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/backward_codecs/lucene80/Lucene80DocValuesProducer.java
@@ -16,6 +16,7 @@
*/
package org.apache.lucene.backward_codecs.lucene80;
+import java.io.Closeable;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
@@ -51,7 +52,7 @@ import org.apache.lucene.util.packed.DirectMonotonicReader;
import org.apache.lucene.util.packed.DirectReader;
/** reader for {@link Lucene80DocValuesFormat} */
-final class Lucene80DocValuesProducer extends DocValuesProducer {
+final class Lucene80DocValuesProducer extends DocValuesProducer implements Closeable {
private final Map<String, NumericEntry> numerics = new HashMap<>();
private final Map<String, BinaryEntry> binaries = new HashMap<>();
private final Map<String, SortedEntry> sorted = new HashMap<>();
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene60/Lucene60PointsWriter.java b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene60/Lucene60PointsWriter.java
index 4dbfd38..ea07ce5 100644
--- a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene60/Lucene60PointsWriter.java
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene60/Lucene60PointsWriter.java
@@ -16,6 +16,7 @@
*/
package org.apache.lucene.backward_codecs.lucene60;
+import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
@@ -40,7 +41,7 @@ import org.apache.lucene.util.bkd.BKDReader;
import org.apache.lucene.util.bkd.BKDWriter;
/** Writes dimensional values */
-public class Lucene60PointsWriter extends PointsWriter {
+public class Lucene60PointsWriter extends PointsWriter implements Closeable {
/** Output used to write the BKD tree data file */
protected final IndexOutput dataOut;
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene80/TestIndexedDISI.java b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene80/TestIndexedDISI.java
index de8b8f0..bf54bf3 100644
--- a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene80/TestIndexedDISI.java
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene80/TestIndexedDISI.java
@@ -97,6 +97,7 @@ public class TestIndexedDISI extends LuceneTestCase {
private void assertAdvanceBeyondEnd(BitSet set, Directory dir) throws IOException {
final int cardinality = set.cardinality();
final byte denseRankPower = 9; // Not tested here so fixed to isolate factors
+ long length;
int jumpTableentryCount;
try (IndexOutput out = dir.createOutput("bar", IOContext.DEFAULT)) {
jumpTableentryCount =
@@ -433,7 +434,9 @@ public class TestIndexedDISI extends LuceneTestCase {
length = out.getFilePointer();
}
try (IndexInput in = dir.openInput("foo", IOContext.DEFAULT)) {
- new IndexedDISI(in, 0L, length, jumpTableEntryCount, denseRankPowerRead, set.cardinality());
+ IndexedDISI disi =
+ new IndexedDISI(
+ in, 0L, length, jumpTableEntryCount, denseRankPowerRead, set.cardinality());
}
// This tests the legality of the denseRankPower only, so we don't do anything with the disi
}
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/EnwikiQueryMaker.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/EnwikiQueryMaker.java
index fefbc7b..c944af8 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/EnwikiQueryMaker.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/EnwikiQueryMaker.java
@@ -36,7 +36,7 @@ import org.apache.lucene.search.spans.SpanTermQuery;
* A QueryMaker that uses common and uncommon actual Wikipedia queries for searching the English
* Wikipedia collection. 90 queries total.
*/
-public class EnwikiQueryMaker extends AbstractQueryMaker {
+public class EnwikiQueryMaker extends AbstractQueryMaker implements QueryMaker {
// common and a few uncommon queries from wikipedia search logs
private static String[] STANDARD_QUERIES = {
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/FileBasedQueryMaker.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/FileBasedQueryMaker.java
index 283178e..2eb257a 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/FileBasedQueryMaker.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/FileBasedQueryMaker.java
@@ -43,7 +43,7 @@ import org.apache.lucene.util.IOUtils;
* <pre>file.query.maker.file=c:/myqueries.txt
* file.query.maker.default.field=body</pre>
*/
-public class FileBasedQueryMaker extends AbstractQueryMaker {
+public class FileBasedQueryMaker extends AbstractQueryMaker implements QueryMaker {
@Override
protected Query[] prepareQueries() throws Exception {
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ReutersQueryMaker.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ReutersQueryMaker.java
index 1f3216d..795e3a6 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ReutersQueryMaker.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ReutersQueryMaker.java
@@ -34,7 +34,7 @@ import org.apache.lucene.search.spans.SpanTermQuery;
* A QueryMaker that makes queries devised manually (by Grant Ingersoll) for searching in the
* Reuters collection.
*/
-public class ReutersQueryMaker extends AbstractQueryMaker {
+public class ReutersQueryMaker extends AbstractQueryMaker implements QueryMaker {
private static String[] STANDARD_QUERIES = {
// Start with some short queries
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleQueryMaker.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleQueryMaker.java
index ceaabcb..c7036f5 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleQueryMaker.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleQueryMaker.java
@@ -30,7 +30,7 @@ import org.apache.lucene.search.TermQuery;
* A QueryMaker that makes queries for a collection created using {@link
* org.apache.lucene.benchmark.byTask.feeds.SingleDocSource}.
*/
-public class SimpleQueryMaker extends AbstractQueryMaker {
+public class SimpleQueryMaker extends AbstractQueryMaker implements QueryMaker {
/**
* Prepare the queries for this test. Extending classes can override this method for preparing
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTokensTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTokensTask.java
index ec35782..b7b693e 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTokensTask.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTokensTask.java
@@ -97,6 +97,12 @@ public class ReadTokensTask extends PerfTask {
int left;
String s;
+ void init(String s) {
+ this.s = s;
+ left = s.length();
+ this.upto = 0;
+ }
+
@Override
public int read(char[] c) {
return read(c, 0, c.length);
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetHighlightTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetHighlightTask.java
index a8a9232..54797f5 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetHighlightTask.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetHighlightTask.java
@@ -178,7 +178,6 @@ public class SearchTravRetHighlightTask extends SearchTravTask {
void withTopDocs(IndexSearcher searcher, Query q, TopDocs hits) throws Exception;
}
- @SuppressWarnings("unused")
private volatile int preventOptimizeAway = 0;
private class StandardHLImpl implements HLImpl {
diff --git a/lucene/classification/src/test/org/apache/lucene/classification/Test20NewsgroupsClassification.java b/lucene/classification/src/test/org/apache/lucene/classification/Test20NewsgroupsClassification.java
index 778b014..0625023 100644
--- a/lucene/classification/src/test/org/apache/lucene/classification/Test20NewsgroupsClassification.java
+++ b/lucene/classification/src/test/org/apache/lucene/classification/Test20NewsgroupsClassification.java
@@ -437,7 +437,7 @@ public final class Test20NewsgroupsClassification extends LuceneTestCase {
}
}
}
- return new NewsPost(body.toString(), subject, groupName);
+ return new NewsPost(body.toString(), subject, groupName, number);
} catch (Throwable e) {
return null;
}
@@ -447,11 +447,13 @@ public final class Test20NewsgroupsClassification extends LuceneTestCase {
private final String body;
private final String subject;
private final String group;
+ private final String number;
- private NewsPost(String body, String subject, String group) {
+ private NewsPost(String body, String subject, String group, String number) {
this.body = body;
this.subject = subject;
this.group = group;
+ this.number = number;
}
public String getBody() {
@@ -465,5 +467,9 @@ public final class Test20NewsgroupsClassification extends LuceneTestCase {
public String getGroup() {
return group;
}
+
+ public String getNumber() {
+ return number;
+ }
}
}
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsWriter.java
index 3784f41..fff2c56 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsWriter.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsWriter.java
@@ -16,6 +16,7 @@
*/
package org.apache.lucene.codecs.blockterms;
+import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
@@ -51,7 +52,7 @@ import org.apache.lucene.util.IOUtils;
*
* @lucene.experimental
*/
-public class BlockTermsWriter extends FieldsConsumer {
+public class BlockTermsWriter extends FieldsConsumer implements Closeable {
static final String CODEC_NAME = "BlockTermsWriter";
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java
index efea36d..a13522b 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java
@@ -1920,6 +1920,14 @@ public final class DirectPostingsFormat extends PostingsFormat {
public HighFreqDocsEnum() {}
+ public int[] getDocIDs() {
+ return docIDs;
+ }
+
+ public int[] getFreqs() {
+ return freqs;
+ }
+
public PostingsEnum reset(int[] docIDs, int[] freqs) {
this.docIDs = docIDs;
this.freqs = freqs;
@@ -2098,6 +2106,18 @@ public final class DirectPostingsFormat extends PostingsFormat {
posJump = hasOffsets ? 3 : 1;
}
+ public int[] getDocIDs() {
+ return docIDs;
+ }
+
+ public int[][] getPositions() {
+ return positions;
+ }
+
+ public int getPosJump() {
+ return posJump;
+ }
+
public PostingsEnum reset(int[] docIDs, int[] freqs, int[][] positions, byte[][][] payloads) {
this.docIDs = docIDs;
this.freqs = freqs;
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java
index abe075d..abebcd9 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java
@@ -559,7 +559,7 @@ public class FSTTermsReader extends FieldsProducer {
if (term == null) {
return SeekStatus.END;
} else {
- return term.get().equals(target) ? SeekStatus.FOUND : SeekStatus.NOT_FOUND;
+ return term.equals(target) ? SeekStatus.FOUND : SeekStatus.NOT_FOUND;
}
}
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDReader.java
index 6c74567..c630e43 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDReader.java
@@ -22,6 +22,7 @@ import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.BLOCK_V
import java.io.IOException;
import java.nio.charset.StandardCharsets;
+import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.PointValues;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.util.Accountable;
@@ -194,6 +195,59 @@ final class SimpleTextBKDReader extends PointValues implements Accountable {
}
}
+ private void visitCompressedDocValues(
+ int[] commonPrefixLengths,
+ byte[] scratchPackedValue,
+ IndexInput in,
+ int[] docIDs,
+ int count,
+ IntersectVisitor visitor,
+ int compressedDim)
+ throws IOException {
+ // the byte at `compressedByteOffset` is compressed using run-length compression,
+ // other suffix bytes are stored verbatim
+ final int compressedByteOffset =
+ compressedDim * bytesPerDim + commonPrefixLengths[compressedDim];
+ commonPrefixLengths[compressedDim]++;
+ int i;
+ for (i = 0; i < count; ) {
+ scratchPackedValue[compressedByteOffset] = in.readByte();
+ final int runLen = Byte.toUnsignedInt(in.readByte());
+ for (int j = 0; j < runLen; ++j) {
+ for (int dim = 0; dim < numDims; dim++) {
+ int prefix = commonPrefixLengths[dim];
+ in.readBytes(scratchPackedValue, dim * bytesPerDim + prefix, bytesPerDim - prefix);
+ }
+ visitor.visit(docIDs[i + j], scratchPackedValue);
+ }
+ i += runLen;
+ }
+ if (i != count) {
+ throw new CorruptIndexException(
+ "Sub blocks do not add up to the expected count: " + count + " != " + i, in);
+ }
+ }
+
+ private int readCompressedDim(IndexInput in) throws IOException {
+ int compressedDim = in.readByte();
+ if (compressedDim < -1 || compressedDim >= numIndexDims) {
+ throw new CorruptIndexException("Got compressedDim=" + compressedDim, in);
+ }
+ return compressedDim;
+ }
+
+ private void readCommonPrefixes(
+ int[] commonPrefixLengths, byte[] scratchPackedValue, IndexInput in) throws IOException {
+ for (int dim = 0; dim < numDims; dim++) {
+ int prefix = in.readVInt();
+ commonPrefixLengths[dim] = prefix;
+ if (prefix > 0) {
+ in.readBytes(scratchPackedValue, dim * bytesPerDim, prefix);
+ }
+ // System.out.println("R: " + dim + " of " + numDims + " prefix=" + prefix);
+ }
+ }
+
private void intersect(
IntersectState state, int nodeID, byte[] cellMinPacked, byte[] cellMaxPacked)
throws IOException {
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDWriter.java
index d46bad2..211ef04 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDWriter.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextBKDWriter.java
@@ -816,6 +816,40 @@ final class SimpleTextBKDWriter implements Closeable {
}
}
+ private void writeLeafBlockPackedValuesRange(
+ IndexOutput out,
+ int[] commonPrefixLengths,
+ int start,
+ int end,
+ IntFunction<BytesRef> packedValues)
+ throws IOException {
+ for (int i = start; i < end; ++i) {
+ BytesRef ref = packedValues.apply(i);
+ assert ref.length == config.packedBytesLength;
+
+ for (int dim = 0; dim < config.numDims; dim++) {
+ int prefix = commonPrefixLengths[dim];
+ out.writeBytes(
+ ref.bytes, ref.offset + dim * config.bytesPerDim + prefix, config.bytesPerDim - prefix);
+ }
+ }
+ }
+
+ private static int runLen(
+ IntFunction<BytesRef> packedValues, int start, int end, int byteOffset) {
+ BytesRef first = packedValues.apply(start);
+ byte b = first.bytes[first.offset + byteOffset];
+ for (int i = start + 1; i < end; ++i) {
+ BytesRef ref = packedValues.apply(i);
+ byte b2 = ref.bytes[ref.offset + byteOffset];
+ assert Byte.toUnsignedInt(b2) >= Byte.toUnsignedInt(b);
+ if (b != b2) {
+ return i - start;
+ }
+ }
+ return end - start;
+ }
+
@Override
public void close() throws IOException {
if (tempInput != null) {
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointsWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointsWriter.java
index 4dab17d..faa90c1 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointsWriter.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPointsWriter.java
@@ -157,6 +157,14 @@ class SimpleTextPointsWriter extends PointsWriter {
SimpleTextUtil.write(out, s, scratch);
}
+ private void writeInt(IndexOutput out, int x) throws IOException {
+ SimpleTextUtil.write(out, Integer.toString(x), scratch);
+ }
+
+ private void writeLong(IndexOutput out, long x) throws IOException {
+ SimpleTextUtil.write(out, Long.toString(x), scratch);
+ }
+
private void write(IndexOutput out, BytesRef b) throws IOException {
SimpleTextUtil.write(out, b);
}
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextVectorWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextVectorWriter.java
index 5494810..1699537 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextVectorWriter.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextVectorWriter.java
@@ -74,8 +74,8 @@ public class SimpleTextVectorWriter extends VectorWriter {
public void writeField(FieldInfo fieldInfo, VectorValues vectors) throws IOException {
long vectorDataOffset = vectorData.getFilePointer();
List<Integer> docIds = new ArrayList<>();
- int docV;
- for (docV = vectors.nextDoc(); docV != NO_MORE_DOCS; docV = vectors.nextDoc()) {
+ int docV, ord = 0;
+ for (docV = vectors.nextDoc(); docV != NO_MORE_DOCS; docV = vectors.nextDoc(), ord++) {
writeVectorValue(vectors);
docIds.add(docV);
}
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java b/lucene/core/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java
index 621c2f0..0629118 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java
@@ -39,7 +39,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
* <li><EMOJI>: A sequence of Emoji characters</li>
* </ul>
*/
-@SuppressWarnings({"unused","fallthrough"})
+@SuppressWarnings("fallthrough")
public final class StandardTokenizerImpl {
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex b/lucene/core/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex
index 702866d..e95a9b4 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex
+++ b/lucene/core/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex
@@ -37,7 +37,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
* <li><EMOJI>: A sequence of Emoji characters</li>
* </ul>
*/
-@SuppressWarnings({"unused","fallthrough"})
+@SuppressWarnings("fallthrough")
%%
%unicode 9.0
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/BytesTermAttributeImpl.java b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/BytesTermAttributeImpl.java
index e115ee9..8783bbf 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/BytesTermAttributeImpl.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/BytesTermAttributeImpl.java
@@ -26,7 +26,8 @@ import org.apache.lucene.util.BytesRef;
*
* @lucene.internal
*/
-public class BytesTermAttributeImpl extends AttributeImpl implements BytesTermAttribute {
+public class BytesTermAttributeImpl extends AttributeImpl
+ implements BytesTermAttribute, TermToBytesRefAttribute {
private BytesRef bytes;
/** Initialize this attribute with no bytes. */
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/CharTermAttributeImpl.java b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/CharTermAttributeImpl.java
index c494bda..bae5c60 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/CharTermAttributeImpl.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/CharTermAttributeImpl.java
@@ -26,7 +26,7 @@ import org.apache.lucene.util.BytesRefBuilder;
/** Default implementation of {@link CharTermAttribute}. */
public class CharTermAttributeImpl extends AttributeImpl
- implements CharTermAttribute, TermToBytesRefAttribute {
+ implements CharTermAttribute, TermToBytesRefAttribute, Cloneable {
private static int MIN_BUFFER_SIZE = 10;
private char[] termBuffer = new char[ArrayUtil.oversize(MIN_BUFFER_SIZE, Character.BYTES)];
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/FlagsAttributeImpl.java b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/FlagsAttributeImpl.java
index 9fe02aa..9a1bdc3 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/FlagsAttributeImpl.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/FlagsAttributeImpl.java
@@ -20,7 +20,7 @@ import org.apache.lucene.util.AttributeImpl;
import org.apache.lucene.util.AttributeReflector;
/** Default implementation of {@link FlagsAttribute}. */
-public class FlagsAttributeImpl extends AttributeImpl implements FlagsAttribute {
+public class FlagsAttributeImpl extends AttributeImpl implements FlagsAttribute, Cloneable {
private int flags = 0;
/** Initialize this attribute with no bits set */
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/OffsetAttributeImpl.java b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/OffsetAttributeImpl.java
index 8e05ece..8ddae00 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/OffsetAttributeImpl.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/OffsetAttributeImpl.java
@@ -20,7 +20,7 @@ import org.apache.lucene.util.AttributeImpl;
import org.apache.lucene.util.AttributeReflector;
/** Default implementation of {@link OffsetAttribute}. */
-public class OffsetAttributeImpl extends AttributeImpl implements OffsetAttribute {
+public class OffsetAttributeImpl extends AttributeImpl implements OffsetAttribute, Cloneable {
private int startOffset;
private int endOffset;
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttributeImpl.java b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttributeImpl.java
index b17f018..da36ebd 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttributeImpl.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttributeImpl.java
@@ -21,7 +21,7 @@ import org.apache.lucene.util.AttributeReflector;
import org.apache.lucene.util.BytesRef;
/** Default implementation of {@link PayloadAttribute}. */
-public class PayloadAttributeImpl extends AttributeImpl implements PayloadAttribute {
+public class PayloadAttributeImpl extends AttributeImpl implements PayloadAttribute, Cloneable {
private BytesRef payload;
/** Initialize this attribute with no payload. */
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttributeImpl.java b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttributeImpl.java
index 31ceca1..80ef0d4 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttributeImpl.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttributeImpl.java
@@ -21,7 +21,7 @@ import org.apache.lucene.util.AttributeReflector;
/** Default implementation of {@link PositionIncrementAttribute}. */
public class PositionIncrementAttributeImpl extends AttributeImpl
- implements PositionIncrementAttribute {
+ implements PositionIncrementAttribute, Cloneable {
private int positionIncrement = 1;
/** Initialize this attribute with position increment of 1 */
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionLengthAttributeImpl.java b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionLengthAttributeImpl.java
index b7dd308..3a59faa 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionLengthAttributeImpl.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionLengthAttributeImpl.java
@@ -20,7 +20,8 @@ import org.apache.lucene.util.AttributeImpl;
import org.apache.lucene.util.AttributeReflector;
/** Default implementation of {@link PositionLengthAttribute}. */
-public class PositionLengthAttributeImpl extends AttributeImpl implements PositionLengthAttribute {
+public class PositionLengthAttributeImpl extends AttributeImpl
+ implements PositionLengthAttribute, Cloneable {
private int positionLength = 1;
/** Initializes this attribute with position length of 1. */
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/TermFrequencyAttributeImpl.java b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/TermFrequencyAttributeImpl.java
index f578e28..4e8520f 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/TermFrequencyAttributeImpl.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/TermFrequencyAttributeImpl.java
@@ -20,7 +20,8 @@ import org.apache.lucene.util.AttributeImpl;
import org.apache.lucene.util.AttributeReflector;
/** Default implementation of {@link TermFrequencyAttribute}. */
-public class TermFrequencyAttributeImpl extends AttributeImpl implements TermFrequencyAttribute {
+public class TermFrequencyAttributeImpl extends AttributeImpl
+ implements TermFrequencyAttribute, Cloneable {
private int termFrequency = 1;
/** Initialize this attribute with term frequency of 1 */
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/TypeAttributeImpl.java b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/TypeAttributeImpl.java
index 0f44404..dbe5e94 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/TypeAttributeImpl.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/TypeAttributeImpl.java
@@ -20,7 +20,7 @@ import org.apache.lucene.util.AttributeImpl;
import org.apache.lucene.util.AttributeReflector;
/** Default implementation of {@link TypeAttribute}. */
-public class TypeAttributeImpl extends AttributeImpl implements TypeAttribute {
+public class TypeAttributeImpl extends AttributeImpl implements TypeAttribute, Cloneable {
private String type;
/** Initialize this attribute with {@link TypeAttribute#DEFAULT_TYPE} */
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/VectorWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/VectorWriter.java
index b5c9681..fd3e90f 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/VectorWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/VectorWriter.java
@@ -118,6 +118,7 @@ public abstract class VectorWriter implements Closeable {
/** Tracks state of one sub-reader that we are merging */
private static class VectorValuesSub extends DocIDMerger.Sub {
+ final MergeState.DocMap docMap;
final VectorValues values;
final int segmentIndex;
int count;
@@ -126,6 +127,7 @@ public abstract class VectorWriter implements Closeable {
super(docMap);
this.values = values;
this.segmentIndex = segmentIndex;
+ this.docMap = docMap;
assert values.docID() == -1;
}
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene86/Lucene86PointsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene86/Lucene86PointsReader.java
index 3b21a40..affe4fe 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene86/Lucene86PointsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene86/Lucene86PointsReader.java
@@ -16,6 +16,7 @@
*/
package org.apache.lucene.codecs.lucene86;
+import java.io.Closeable;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
@@ -32,7 +33,7 @@ import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.bkd.BKDReader;
/** Reads point values previously written with {@link Lucene86PointsWriter} */
-public class Lucene86PointsReader extends PointsReader {
+public class Lucene86PointsReader extends PointsReader implements Closeable {
final IndexInput indexIn, dataIn;
final SegmentReadState readState;
final Map<Integer, BKDReader> readers = new HashMap<>();
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene86/Lucene86PointsWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene86/Lucene86PointsWriter.java
index ba29bf8..0fcf77d 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene86/Lucene86PointsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene86/Lucene86PointsWriter.java
@@ -16,6 +16,7 @@
*/
package org.apache.lucene.codecs.lucene86;
+import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
@@ -38,7 +39,7 @@ import org.apache.lucene.util.bkd.BKDReader;
import org.apache.lucene.util.bkd.BKDWriter;
/** Writes dimensional values */
-public class Lucene86PointsWriter extends PointsWriter {
+public class Lucene86PointsWriter extends PointsWriter implements Closeable {
/** Outputs used to write the BKD tree data files. */
protected final IndexOutput metaOut, indexOut, dataOut;
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene86/Lucene86SegmentInfoFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene86/Lucene86SegmentInfoFormat.java
index 32a141b..0f55de1 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene86/Lucene86SegmentInfoFormat.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene86/Lucene86SegmentInfoFormat.java
@@ -103,9 +103,12 @@ public class Lucene86SegmentInfoFormat extends SegmentInfoFormat {
Throwable priorE = null;
SegmentInfo si = null;
try {
- CodecUtil.checkIndexHeader(
- input, CODEC_NAME, VERSION_START, VERSION_CURRENT, segmentID, "");
+ int format =
+ CodecUtil.checkIndexHeader(
+ input, CODEC_NAME, VERSION_START, VERSION_CURRENT, segmentID, "");
+
si = parseSegmentInfo(dir, input, segment, segmentID);
+
} catch (Throwable exception) {
priorE = exception;
} finally {
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesConsumer.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesConsumer.java
index 2e6389a..80a5868 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesConsumer.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesConsumer.java
@@ -62,7 +62,7 @@ import org.apache.lucene.util.packed.DirectMonotonicWriter;
import org.apache.lucene.util.packed.DirectWriter;
/** writer for {@link Lucene90DocValuesFormat} */
-final class Lucene90DocValuesConsumer extends DocValuesConsumer {
+final class Lucene90DocValuesConsumer extends DocValuesConsumer implements Closeable {
final Lucene90DocValuesFormat.Mode mode;
IndexOutput data, meta;
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesProducer.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesProducer.java
index eaa4a45..0a2bb13 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesProducer.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesProducer.java
@@ -16,6 +16,7 @@
*/
package org.apache.lucene.codecs.lucene90;
+import java.io.Closeable;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
@@ -51,7 +52,7 @@ import org.apache.lucene.util.packed.DirectMonotonicReader;
import org.apache.lucene.util.packed.DirectReader;
/** reader for {@link Lucene90DocValuesFormat} */
-final class Lucene90DocValuesProducer extends DocValuesProducer {
+final class Lucene90DocValuesProducer extends DocValuesProducer implements Closeable {
private final Map<String, NumericEntry> numerics = new HashMap<>();
private final Map<String, BinaryEntry> binaries = new HashMap<>();
private final Map<String, SortedEntry> sorted = new HashMap<>();
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90FieldInfosFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90FieldInfosFormat.java
index 1ee6102..43f4215 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90FieldInfosFormat.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90FieldInfosFormat.java
@@ -125,13 +125,14 @@ public final class Lucene90FieldInfosFormat extends FieldInfosFormat {
Throwable priorE = null;
FieldInfo infos[] = null;
try {
- CodecUtil.checkIndexHeader(
- input,
- Lucene90FieldInfosFormat.CODEC_NAME,
- Lucene90FieldInfosFormat.FORMAT_START,
- Lucene90FieldInfosFormat.FORMAT_CURRENT,
- segmentInfo.getId(),
- segmentSuffix);
+ int version =
+ CodecUtil.checkIndexHeader(
+ input,
+ Lucene90FieldInfosFormat.CODEC_NAME,
+ Lucene90FieldInfosFormat.FORMAT_START,
+ Lucene90FieldInfosFormat.FORMAT_CURRENT,
+ segmentInfo.getId(),
+ segmentSuffix);
final int size = input.readVInt(); // read in the size
infos = new FieldInfo[size];
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90VectorReader.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90VectorReader.java
index 4dc25cb..58fc919 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90VectorReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/Lucene90VectorReader.java
@@ -21,6 +21,7 @@ import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS;
import java.io.IOException;
import java.nio.ByteBuffer;
+import java.nio.FloatBuffer;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
@@ -323,6 +324,7 @@ public final class Lucene90VectorReader extends VectorReader {
final BytesRef binaryValue;
final ByteBuffer byteBuffer;
+ final FloatBuffer floatBuffer;
final int byteSize;
final float[] value;
@@ -334,6 +336,7 @@ public final class Lucene90VectorReader extends VectorReader {
this.dataIn = dataIn;
byteSize = Float.BYTES * fieldEntry.dimension;
byteBuffer = ByteBuffer.allocate(byteSize);
+ floatBuffer = byteBuffer.asFloatBuffer();
value = new float[fieldEntry.dimension];
binaryValue = new BytesRef(byteBuffer.array(), byteBuffer.arrayOffset(), byteSize);
}
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/blocktree/IntersectTermsEnumFrame.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/blocktree/IntersectTermsEnumFrame.java
index eb60d7f..ff7e58b 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/blocktree/IntersectTermsEnumFrame.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/blocktree/IntersectTermsEnumFrame.java
@@ -97,12 +97,14 @@ final class IntersectTermsEnumFrame {
int suffix;
private final IntersectTermsEnum ite;
+ private final int version;
public IntersectTermsEnumFrame(IntersectTermsEnum ite, int ord) throws IOException {
this.ite = ite;
this.ord = ord;
this.termState = ite.fr.parent.postingsReader.newTermState();
this.termState.totalTermFreq = -1;
+ this.version = ite.fr.parent.version;
suffixLengthBytes = new byte[32];
suffixLengthsReader = new ByteArrayDataInput();
}
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/blocktree/Lucene90BlockTreeTermsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/blocktree/Lucene90BlockTreeTermsReader.java
index 05549f8..9e9e3a8 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/blocktree/Lucene90BlockTreeTermsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/blocktree/Lucene90BlockTreeTermsReader.java
@@ -271,6 +271,13 @@ public final class Lucene90BlockTreeTermsReader extends FieldsProducer {
return bytes;
}
+ /** Seek {@code input} to the directory offset. */
+ private static void seekDir(IndexInput input) throws IOException {
+ input.seek(input.length() - CodecUtil.footerLength() - 8);
+ long offset = input.readLong();
+ input.seek(offset);
+ }
+
// for debugging
// private static String toHex(int v) {
// return "0x" + Integer.toHexString(v);
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/blocktree/SegmentTermsEnumFrame.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/blocktree/SegmentTermsEnumFrame.java
index 48c4fd0..8c742bd 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/blocktree/SegmentTermsEnumFrame.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/blocktree/SegmentTermsEnumFrame.java
@@ -94,12 +94,14 @@ final class SegmentTermsEnumFrame {
final ByteArrayDataInput bytesReader = new ByteArrayDataInput();
private final SegmentTermsEnum ste;
+ private final int version;
public SegmentTermsEnumFrame(SegmentTermsEnum ste, int ord) throws IOException {
this.ste = ste;
this.ord = ord;
this.state = ste.fr.parent.postingsReader.newTermState();
this.state.totalTermFreq = -1;
+ this.version = ste.fr.parent.version;
suffixLengthBytes = new byte[32];
suffixLengthsReader = new ByteArrayDataInput();
}
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/compressing/Lucene90CompressingTermVectorsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/compressing/Lucene90CompressingTermVectorsReader.java
index 7aca22b..6d501ec 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene90/compressing/Lucene90CompressingTermVectorsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene90/compressing/Lucene90CompressingTermVectorsReader.java
@@ -29,6 +29,7 @@ import static org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingT
import static org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingTermVectorsWriter.VERSION_CURRENT;
import static org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingTermVectorsWriter.VERSION_START;
+import java.io.Closeable;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
@@ -70,7 +71,8 @@ import org.apache.lucene.util.packed.PackedInts;
*
* @lucene.experimental
*/
-public final class Lucene90CompressingTermVectorsReader extends TermVectorsReader {
+public final class Lucene90CompressingTermVectorsReader extends TermVectorsReader
+ implements Closeable {
private final FieldInfos fieldInfos;
final FieldsIndex indexReader;
diff --git a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
index 8ce1cfe..cb946ea 100644
--- a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
+++ b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
@@ -64,6 +64,8 @@ import org.apache.lucene.util.LongBitSet;
import org.apache.lucene.util.StringHelper;
import org.apache.lucene.util.SuppressForbidden;
import org.apache.lucene.util.Version;
+import org.apache.lucene.util.automaton.Automata;
+import org.apache.lucene.util.automaton.CompiledAutomaton;
/**
* Basic tool and API to check the health of an index and write a new segments file that removes
@@ -1093,6 +1095,171 @@ public final class CheckIndex implements Closeable {
}
/**
+ * Visits all terms in the range minTerm (inclusive) to maxTerm (exclusive), marking all doc IDs
+ * encountered into allDocsSeen, and returning the total number of terms visited.
+ */
+ private static long getDocsFromTermRange(
+ String field,
+ int maxDoc,
+ TermsEnum termsEnum,
+ FixedBitSet docsSeen,
+ BytesRef minTerm,
+ BytesRef maxTerm,
+ boolean isIntersect)
+ throws IOException {
+ docsSeen.clear(0, docsSeen.length());
+
+ long termCount = 0;
+ PostingsEnum postingsEnum = null;
+ BytesRefBuilder lastTerm = null;
+ while (true) {
+ BytesRef term;
+
+ // Kinda messy: for intersect, we must first next(), but for "normal", we are already on our
+ // first term:
+ if (isIntersect || termCount != 0) {
+ term = termsEnum.next();
+ } else {
+ term = termsEnum.term();
+ }
+
+ if (term == null) {
+ if (isIntersect == false) {
+ throw new RuntimeException("didn't see max term field=" + field + " term=" + maxTerm);
+ }
+ // System.out.println(" terms=" + termCount);
+ return termCount;
+ }
+
+ assert term.isValid();
+
+ if (lastTerm == null) {
+ lastTerm = new BytesRefBuilder();
+ lastTerm.copyBytes(term);
+ } else {
+ if (lastTerm.get().compareTo(term) >= 0) {
+ throw new RuntimeException(
+ "terms out of order: lastTerm=" + lastTerm.get() + " term=" + term);
+ }
+ lastTerm.copyBytes(term);
+ }
+
+ // System.out.println(" term=" + term);
+
+ // Caller already ensured terms enum positioned >= minTerm:
+ if (term.compareTo(minTerm) < 0) {
+ throw new RuntimeException("saw term before min term field=" + field + " term=" + minTerm);
+ }
+
+ if (isIntersect == false) {
+ int cmp = term.compareTo(maxTerm);
+ if (cmp == 0) {
+ // Done!
+ // System.out.println(" terms=" + termCount);
+ return termCount;
+ } else if (cmp > 0) {
+ throw new RuntimeException("didn't see end term field=" + field + " term=" + maxTerm);
+ }
+ }
+
+ postingsEnum = termsEnum.postings(postingsEnum, 0);
+
+ int lastDoc = -1;
+ while (true) {
+ int doc = postingsEnum.nextDoc();
+ if (doc == DocIdSetIterator.NO_MORE_DOCS) {
+ break;
+ }
+ if (doc <= lastDoc) {
+ throw new RuntimeException("term " + term + ": doc " + doc + " <= lastDoc " + lastDoc);
+ }
+ if (doc >= maxDoc) {
+ throw new RuntimeException("term " + term + ": doc " + doc + " >= maxDoc " + maxDoc);
+ }
+
+ // System.out.println(" doc=" + doc);
+ docsSeen.set(doc);
+
+ lastDoc = doc;
+ }
+
+ termCount++;
+ }
+ }
+
+ /**
+ * Test Terms.intersect on this range, and validates that it returns the same doc ids as using
+ * non-intersect TermsEnum. Returns true if any fake terms were seen.
+ */
+ private static boolean checkSingleTermRange(
+ String field,
+ int maxDoc,
+ Terms terms,
+ BytesRef minTerm,
+ BytesRef maxTerm,
+ FixedBitSet normalDocs,
+ FixedBitSet intersectDocs)
+ throws IOException {
+ // System.out.println(" check minTerm=" + minTerm.utf8ToString() + " maxTerm=" +
+ // maxTerm.utf8ToString());
+ assert minTerm.compareTo(maxTerm) <= 0;
+
+ TermsEnum termsEnum = terms.iterator();
+ TermsEnum.SeekStatus status = termsEnum.seekCeil(minTerm);
+ if (status != TermsEnum.SeekStatus.FOUND) {
+ throw new RuntimeException(
+ "failed to seek to existing term field=" + field + " term=" + minTerm);
+ }
+
+ // Do "dumb" iteration to visit all terms in the range:
+ long normalTermCount =
+ getDocsFromTermRange(field, maxDoc, termsEnum, normalDocs, minTerm, maxTerm, false);
+
+ // Now do the same operation using intersect:
+ long intersectTermCount =
+ getDocsFromTermRange(
+ field,
+ maxDoc,
+ terms.intersect(
+ new CompiledAutomaton(
+ Automata.makeBinaryInterval(minTerm, true, maxTerm, false),
+ true,
+ false,
+ Integer.MAX_VALUE,
+ true),
+ null),
+ intersectDocs,
+ minTerm,
+ maxTerm,
+ true);
+
+ if (intersectTermCount > normalTermCount) {
+ throw new RuntimeException(
+ "intersect returned too many terms: field="
+ + field
+ + " intersectTermCount="
+ + intersectTermCount
+ + " normalTermCount="
+ + normalTermCount);
+ }
+
+ if (normalDocs.equals(intersectDocs) == false) {
+ throw new RuntimeException(
+ "intersect visited different docs than straight terms enum: "
+ + normalDocs.cardinality()
+ + " for straight enum, vs "
+ + intersectDocs.cardinality()
+ + " for intersect, minTerm="
+ + minTerm
+ + " maxTerm="
+ + maxTerm);
+ }
+ // System.out.println(" docs=" + normalTermCount);
+ // System.out.println(" " + intersectTermCount + " vs " + normalTermCount);
+ return intersectTermCount != normalTermCount;
+ }
+
+ /**
* checks Fields api is consistent with itself. searcher is optional, to verify with queries. Can
* be null.
*/
@@ -2386,6 +2553,7 @@ public final class CheckIndex implements Closeable {
public static class VerifyPointsVisitor implements PointValues.IntersectVisitor {
private long pointCountSeen;
private int lastDocID = -1;
+ private final int maxDoc;
private final FixedBitSet docsSeen;
private final byte[] lastMinPackedValue;
private final byte[] lastMaxPackedValue;
@@ -2402,6 +2570,7 @@ public final class CheckIndex implements Closeable {
/** Sole constructor */
public VerifyPointsVisitor(String fieldName, int maxDoc, PointValues values)
throws IOException {
+ this.maxDoc = maxDoc;
this.fieldName = fieldName;
numDataDims = values.getNumDimensions();
numIndexDims = values.getNumIndexDimensions();
diff --git a/lucene/core/src/java/org/apache/lucene/index/OrdinalMap.java b/lucene/core/src/java/org/apache/lucene/index/OrdinalMap.java
index ae92991..558449f 100644
--- a/lucene/core/src/java/org/apache/lucene/index/OrdinalMap.java
+++ b/lucene/core/src/java/org/apache/lucene/index/OrdinalMap.java
@@ -49,6 +49,7 @@ public class OrdinalMap implements Accountable {
// TODO: use more efficient packed ints structures?
private static class TermsEnumIndex {
+ public static final TermsEnumIndex[] EMPTY_ARRAY = new TermsEnumIndex[0];
final int subIndex;
final TermsEnum termsEnum;
BytesRef currentTerm;
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanScorer.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanScorer.java
index b6382c7..f410887 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanScorer.java
@@ -35,6 +35,8 @@ public class SpanScorer extends Scorer {
/** accumulated sloppy freq (computed in setFreqCurrentDoc) */
private float freq;
+ /** number of matches (computed in setFreqCurrentDoc) */
+ private int numMatches;
private int lastScoredDoc = -1; // last doc we called setFreqCurrentDoc() for
@@ -75,12 +77,13 @@ public class SpanScorer extends Scorer {
}
/**
- * Sets {@link #freq} for the current document.
+ * Sets {@link #freq} and {@link #numMatches} for the current document.
*
* <p>This will be called at most once per document.
*/
protected final void setFreqCurrentDoc() throws IOException {
freq = 0.0f;
+ numMatches = 0;
spans.doStartCurrentDoc();
@@ -99,6 +102,7 @@ public class SpanScorer extends Scorer {
// assert (startPos != prevStartPos) || (endPos > prevEndPos) : "non increased
// endPos="+endPos;
assert (startPos != prevStartPos) || (endPos >= prevEndPos) : "decreased endPos=" + endPos;
+ numMatches++;
if (docScorer == null) { // scores not required, break out here
freq = 1;
return;
diff --git a/lucene/core/src/java/org/apache/lucene/store/IndexInput.java b/lucene/core/src/java/org/apache/lucene/store/IndexInput.java
index 3d3d90b..10f4a37 100644
--- a/lucene/core/src/java/org/apache/lucene/store/IndexInput.java
+++ b/lucene/core/src/java/org/apache/lucene/store/IndexInput.java
@@ -37,7 +37,7 @@ import java.io.IOException;
*
* @see Directory
*/
-public abstract class IndexInput extends DataInput implements Closeable {
+public abstract class IndexInput extends DataInput implements Cloneable, Closeable {
private final String resourceDescription;
diff --git a/lucene/core/src/java/org/apache/lucene/util/FixedBitSet.java b/lucene/core/src/java/org/apache/lucene/util/FixedBitSet.java
index 707ee76..7b79ece 100644
--- a/lucene/core/src/java/org/apache/lucene/util/FixedBitSet.java
+++ b/lucene/core/src/java/org/apache/lucene/util/FixedBitSet.java
@@ -28,7 +28,7 @@ import org.apache.lucene.search.DocIdSetIterator;
*
* @lucene.internal
*/
-public final class FixedBitSet extends BitSet {
+public final class FixedBitSet extends BitSet implements Bits, Accountable {
private static final long BASE_RAM_BYTES_USED =
RamUsageEstimator.shallowSizeOfInstance(FixedBitSet.class);
diff --git a/lucene/core/src/java/org/apache/lucene/util/OfflineSorter.java b/lucene/core/src/java/org/apache/lucene/util/OfflineSorter.java
index 049d5af..32baa6c 100644
--- a/lucene/core/src/java/org/apache/lucene/util/OfflineSorter.java
+++ b/lucene/core/src/java/org/apache/lucene/util/OfflineSorter.java
@@ -374,6 +374,7 @@ public class OfflineSorter {
/** Merge the most recent {@code maxTempFile} partitions into a new partition. */
void mergePartitions(Directory trackingDir, List<Future<Partition>> segments) throws IOException {
+ long start = System.currentTimeMillis();
List<Future<Partition>> segmentsToMerge;
if (segments.size() > maxTempFiles) {
segmentsToMerge = segments.subList(segments.size() - maxTempFiles, segments.size());
@@ -428,6 +429,7 @@ public class OfflineSorter {
long start = System.currentTimeMillis();
SortableBytesRefArray buffer;
boolean exhausted = false;
+ int count;
if (valueLength != -1) {
// fixed length case
buffer = new FixedLengthBytesRefArray(valueLength);
diff --git a/lucene/core/src/java/org/apache/lucene/util/SparseFixedBitSet.java b/lucene/core/src/java/org/apache/lucene/util/SparseFixedBitSet.java
index 2a1762a..2952ee2 100644
--- a/lucene/core/src/java/org/apache/lucene/util/SparseFixedBitSet.java
+++ b/lucene/core/src/java/org/apache/lucene/util/SparseFixedBitSet.java
@@ -33,7 +33,7 @@ import org.apache.lucene.search.DocIdSetIterator;
*
* @lucene.internal
*/
-public class SparseFixedBitSet extends BitSet {
+public class SparseFixedBitSet extends BitSet implements Bits, Accountable {
private static final long BASE_RAM_BYTES_USED =
RamUsageEstimator.shallowSizeOfInstance(SparseFixedBitSet.class);
diff --git a/lucene/core/src/java/org/apache/lucene/util/fst/FST.java b/lucene/core/src/java/org/apache/lucene/util/fst/FST.java
index 6857f90..bd00302 100644
--- a/lucene/core/src/java/org/apache/lucene/util/fst/FST.java
+++ b/lucene/core/src/java/org/apache/lucene/util/fst/FST.java
@@ -71,6 +71,8 @@ public final class FST<T> implements Accountable {
private static final long BASE_RAM_BYTES_USED =
RamUsageEstimator.shallowSizeOfInstance(FST.class);
+ private static final long ARC_SHALLOW_RAM_BYTES_USED =
+ RamUsageEstimator.shallowSizeOfInstance(Arc.class);
private static final int BIT_FINAL_ARC = 1 << 0;
static final int BIT_LAST_ARC = 1 << 1;
diff --git a/lucene/core/src/java/org/apache/lucene/util/hnsw/HnswGraph.java b/lucene/core/src/java/org/apache/lucene/util/hnsw/HnswGraph.java
index 2def856..9f872ff 100644
--- a/lucene/core/src/java/org/apache/lucene/util/hnsw/HnswGraph.java
+++ b/lucene/core/src/java/org/apache/lucene/util/hnsw/HnswGraph.java
@@ -59,6 +59,7 @@ import org.apache.lucene.util.SparseFixedBitSet;
public final class HnswGraph extends KnnGraphValues {
private final int maxConn;
+ private final VectorValues.SearchStrategy searchStrategy;
// Each entry lists the top maxConn neighbors of a node. The nodes correspond to vectors added to
// HnswBuilder, and the
@@ -69,12 +70,13 @@ public final class HnswGraph extends KnnGraphValues {
private int upto;
private NeighborArray cur;
- HnswGraph(int maxConn) {
+ HnswGraph(int maxConn, VectorValues.SearchStrategy searchStrategy) {
graph = new ArrayList<>();
// Typically with diversity criteria we see nodes not fully occupied; average fanout seems to be
// about 1/2 maxConn. There is some indexing time penalty for under-allocating, but saves RAM
graph.add(new NeighborArray(Math.max(32, maxConn / 4)));
this.maxConn = maxConn;
+ this.searchStrategy = searchStrategy;
}
/**
diff --git a/lucene/core/src/java/org/apache/lucene/util/hnsw/HnswGraphBuilder.java b/lucene/core/src/java/org/apache/lucene/util/hnsw/HnswGraphBuilder.java
index 4dc4b15..f51a5df 100644
--- a/lucene/core/src/java/org/apache/lucene/util/hnsw/HnswGraphBuilder.java
+++ b/lucene/core/src/java/org/apache/lucene/util/hnsw/HnswGraphBuilder.java
@@ -99,7 +99,7 @@ public final class HnswGraphBuilder {
}
this.maxConn = maxConn;
this.beamWidth = beamWidth;
- this.hnsw = new HnswGraph(maxConn);
+ this.hnsw = new HnswGraph(maxConn, searchStrategy);
bound = BoundsChecker.create(searchStrategy.reversed);
random = new Random(seed);
scratch = new NeighborArray(Math.max(beamWidth, maxConn + 1));
diff --git a/lucene/core/src/java/org/apache/lucene/util/hnsw/NeighborArray.java b/lucene/core/src/java/org/apache/lucene/util/hnsw/NeighborArray.java
index 9deaa64..b026d6c 100644
--- a/lucene/core/src/java/org/apache/lucene/util/hnsw/NeighborArray.java
+++ b/lucene/core/src/java/org/apache/lucene/util/hnsw/NeighborArray.java
@@ -28,6 +28,7 @@ import org.apache.lucene.util.ArrayUtil;
public class NeighborArray {
private int size;
+ private int upto;
float[] score;
int[] node;
diff --git a/lucene/core/src/java/org/apache/lucene/util/packed/gen_BulkOperation.py b/lucene/core/src/java/org/apache/lucene/util/packed/gen_BulkOperation.py
index ddb79cb..16ed30a 100644
--- a/lucene/core/src/java/org/apache/lucene/util/packed/gen_BulkOperation.py
+++ b/lucene/core/src/java/org/apache/lucene/util/packed/gen_BulkOperation.py
@@ -15,12 +15,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-try:
- # python 3.9+
- from math import gcd
-except ImportError:
- # old python
- from fractions import gcd
+from fractions import gcd
"""Code generation for bulk operations"""
diff --git a/lucene/core/src/test/org/apache/lucene/analysis/TestStopFilter.java b/lucene/core/src/test/org/apache/lucene/analysis/TestStopFilter.java
index 13b2ea9..0172e67 100644
--- a/lucene/core/src/test/org/apache/lucene/analysis/TestStopFilter.java
+++ b/lucene/core/src/test/org/apache/lucene/analysis/TestStopFilter.java
@@ -191,7 +191,8 @@ public class TestStopFilter extends BaseTokenStreamTestCase {
StopFilter stopfilter, List<Integer> stopwordPositions, final int numberOfTokens)
throws IOException {
CharTermAttribute termAtt = stopfilter.getAttribute(CharTermAttribute.class);
- stopfilter.getAttribute(PositionIncrementAttribute.class);
+ PositionIncrementAttribute posIncrAtt =
+ stopfilter.getAttribute(PositionIncrementAttribute.class);
stopfilter.reset();
log("Test stopwords positions:");
for (int i = 0; i < numberOfTokens; i++) {
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestIndexedDISI.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestIndexedDISI.java
index 8c21c74..97f22bd 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestIndexedDISI.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene90/TestIndexedDISI.java
@@ -98,6 +98,7 @@ public class TestIndexedDISI extends LuceneTestCase {
private void assertAdvanceBeyondEnd(BitSet set, Directory dir) throws IOException {
final int cardinality = set.cardinality();
final byte denseRankPower = 9; // Not tested here so fixed to isolate factors
+ long length;
int jumpTableentryCount;
try (IndexOutput out = dir.createOutput("bar", IOContext.DEFAULT)) {
jumpTableentryCount =
@@ -434,7 +435,9 @@ public class TestIndexedDISI extends LuceneTestCase {
length = out.getFilePointer();
}
try (IndexInput in = dir.openInput("foo", IOContext.DEFAULT)) {
- new IndexedDISI(in, 0L, length, jumpTableEntryCount, denseRankPowerRead, set.cardinality());
+ IndexedDISI disi =
+ new IndexedDISI(
+ in, 0L, length, jumpTableEntryCount, denseRankPowerRead, set.cardinality());
}
// This tests the legality of the denseRankPower only, so we don't do anything with the disi
}
diff --git a/lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValuesOrds.java b/lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValuesOrds.java
index a91695c..eb25138 100644
--- a/lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValuesOrds.java
+++ b/lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValuesOrds.java
@@ -84,6 +84,7 @@ public class Test2BSortedDocValuesOrds extends LuceneTestCase {
int counter = 0;
for (LeafReaderContext context : r.leaves()) {
LeafReader reader = context.reader();
+ BytesRef scratch = new BytesRef();
BinaryDocValues dv = DocValues.getBinary(reader, "dv");
for (int i = 0; i < reader.maxDoc(); i++) {
assertEquals(i, dv.nextDoc());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java b/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
index 2ad2d60..7994171 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
@@ -341,11 +341,13 @@ public class TestCodecs extends LuceneTestCase {
private static class Verify extends Thread {
final Fields termsDict;
final FieldData[] fields;
+ final SegmentInfo si;
volatile boolean failed;
Verify(final SegmentInfo si, final FieldData[] fields, final Fields termsDict) {
this.fields = fields;
this.termsDict = termsDict;
+ this.si = si;
}
@Override
@@ -375,6 +377,8 @@ public class TestCodecs extends LuceneTestCase {
assertEquals(DocIdSetIterator.NO_MORE_DOCS, postingsEnum.nextDoc());
}
+ byte[] data = new byte[10];
+
private void verifyPositions(final PositionData[] positions, final PostingsEnum posEnum)
throws Throwable {
for (int i = 0; i < positions.length; i++) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDemoParallelLeafReader.java b/lucene/core/src/test/org/apache/lucene/index/TestDemoParallelLeafReader.java
index 37b5388..b2732df 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDemoParallelLeafReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDemoParallelLeafReader.java
@@ -95,6 +95,7 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
public final ReaderManager mgr;
private final Directory indexDir;
+ private final Path root;
private final Path segsPath;
/** Which segments have been closed, but their parallel index is not yet not removed. */
@@ -118,6 +119,8 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
}
public ReindexingReader(Path root) throws IOException {
+ this.root = root;
+
// Normal index is stored under "index":
indexDir = openDirectory(root.resolve("index"));
@@ -866,7 +869,7 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
for (int i = 0; i < maxDoc; i++) {
// TODO: is this still O(blockSize^2)?
assertEquals(i, oldValues.nextDoc());
- reader.document(i);
+ Document oldDoc = reader.document(i);
Document newDoc = new Document();
newDoc.add(new NumericDocValuesField("number_" + newSchemaGen, oldValues.longValue()));
w.addDocument(newDoc);
@@ -993,7 +996,7 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
assertNotNull("oldSchemaGen=" + oldSchemaGen, oldValues);
for (int i = 0; i < maxDoc; i++) {
// TODO: is this still O(blockSize^2)?
- reader.document(i);
+ Document oldDoc = reader.document(i);
Document newDoc = new Document();
assertEquals(i, oldValues.nextDoc());
newDoc.add(
@@ -1515,6 +1518,7 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
NumericDocValues numbers = MultiDocValues.getNumericValues(r, fieldName);
int maxDoc = r.maxDoc();
boolean failed = false;
+ long t0 = System.currentTimeMillis();
for (int i = 0; i < maxDoc; i++) {
Document oldDoc = r.document(i);
long value = multiplier * Long.parseLong(oldDoc.get("text").split(" ")[1]);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDoc.java b/lucene/core/src/test/org/apache/lucene/index/TestDoc.java
index ae635a3..d062ee7 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDoc.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDoc.java
@@ -238,7 +238,7 @@ public class TestDoc extends LuceneTestCase {
new FieldInfos.FieldNumbers(null),
context);
- merger.merge();
+ MergeState mergeState = merger.merge();
r1.close();
r2.close();
si.setFiles(new HashSet<>(trackingDir.getCreatedFiles()));
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexSorting.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexSorting.java
index 4bce7ce..deb938d 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexSorting.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexSorting.java
@@ -1720,6 +1720,7 @@ public class TestIndexSorting extends LuceneTestCase {
}
public void testRandom1() throws IOException {
+ boolean withDeletes = random().nextBoolean();
Directory dir = newDirectory();
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
Sort indexSort = new Sort(new SortField("foo", SortField.Type.LONG));
@@ -1790,6 +1791,7 @@ public class TestIndexSorting extends LuceneTestCase {
}
public void testMultiValuedRandom1() throws IOException {
+ boolean withDeletes = random().nextBoolean();
Directory dir = newDirectory();
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
Sort indexSort = new Sort(new SortedNumericSortField("foo", SortField.Type.LONG));
@@ -2410,6 +2412,7 @@ public class TestIndexSorting extends LuceneTestCase {
}
private static final class RandomDoc {
+ public final int id;
public final int intValue;
public final int[] intValues;
public final long longValue;
@@ -2422,6 +2425,7 @@ public class TestIndexSorting extends LuceneTestCase {
public final byte[][] bytesValues;
public RandomDoc(int id) {
+ this.id = id;
intValue = random().nextInt();
longValue = random().nextLong();
floatValue = random().nextFloat();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
index bd496ad..1ee5813 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
@@ -2967,6 +2967,7 @@ public class TestIndexWriter extends LuceneTestCase {
// Use WindowsFS to prevent open files from being deleted:
FileSystem fs = new WindowsFS(path.getFileSystem()).getFileSystem(URI.create("file:///"));
Path root = new FilterPath(path, fs);
+ DirectoryReader reader;
// MMapDirectory doesn't work because it closes its file handles after mapping!
try (FSDirectory dir = new NIOFSDirectory(root)) {
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterCommit.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterCommit.java
index c18d5fc..489a8c5 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterCommit.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterCommit.java
@@ -182,6 +182,8 @@ public class TestIndexWriterCommit extends LuceneTestCase {
// sum because the merged FST may use array encoding for
// some arcs (which uses more space):
+ final String idFormat = TestUtil.getPostingsFormat("id");
+ final String contentFormat = TestUtil.getPostingsFormat("content");
MockDirectoryWrapper dir = newMockDirectory();
Analyzer analyzer;
if (random().nextBoolean()) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
index a9c6d87..dcb605c 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
@@ -1430,7 +1430,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
w.close();
IndexReader reader = DirectoryReader.open(dir);
assertTrue(reader.numDocs() > 0);
- SegmentInfos.readLatestCommit(dir);
+ SegmentInfos sis = SegmentInfos.readLatestCommit(dir);
for (LeafReaderContext context : reader.leaves()) {
assertFalse(context.reader().getFieldInfos().hasVectors());
}
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java
index bec09c5..8f1d114 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java
@@ -159,6 +159,9 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase {
// sum because the merged FST may use array encoding for
// some arcs (which uses more space):
+ final String idFormat = TestUtil.getPostingsFormat("id");
+ final String contentFormat = TestUtil.getPostingsFormat("content");
+
int START_COUNT = 57;
int NUM_DIR = TEST_NIGHTLY ? 50 : 5;
int END_COUNT = START_COUNT + NUM_DIR * (TEST_NIGHTLY ? 25 : 5);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java
index 4fddce3..710f2b0 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java
@@ -424,6 +424,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
IndexWriter mainWriter;
final List<Throwable> failures = new ArrayList<>();
DirectoryReader[] readers;
+ boolean didClose = false;
AtomicInteger count = new AtomicInteger(0);
AtomicInteger numaddIndexes = new AtomicInteger(0);
@@ -459,6 +460,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
}
void close(boolean doWait) throws Throwable {
+ didClose = true;
if (doWait) {
mainWriter.close();
} else {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterThreadsToSegments.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterThreadsToSegments.java
index b989e8c..102fcc3 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterThreadsToSegments.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterThreadsToSegments.java
@@ -112,6 +112,7 @@ public class TestIndexWriterThreadsToSegments extends LuceneTestCase {
private static final int MAX_THREADS_AT_ONCE = 10;
static class CheckSegmentCount implements Runnable, Closeable {
+ private final IndexWriter w;
private final AtomicInteger maxThreadCountPerIter;
private final AtomicInteger indexingCount;
private DirectoryReader r;
@@ -119,6 +120,7 @@ public class TestIndexWriterThreadsToSegments extends LuceneTestCase {
public CheckSegmentCount(
IndexWriter w, AtomicInteger maxThreadCountPerIter, AtomicInteger indexingCount)
throws IOException {
+ this.w = w;
this.maxThreadCountPerIter = maxThreadCountPerIter;
this.indexingCount = indexingCount;
r = DirectoryReader.open(w);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java
index fb4052b..2628ed1 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java
@@ -54,6 +54,7 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
private static class IndexerThread extends Thread {
private final CyclicBarrier syncStart;
+ boolean diskFull;
Throwable error;
IndexWriter writer;
boolean noErrors;
@@ -99,6 +100,7 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
// ioe.printStackTrace(System.out);
if (ioe.getMessage().startsWith("fake disk full at")
|| ioe.getMessage().equals("now failing on purpose")) {
+ diskFull = true;
try {
Thread.sleep(1);
} catch (InterruptedException ie) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestNumericDocValuesUpdates.java b/lucene/core/src/test/org/apache/lucene/index/TestNumericDocValuesUpdates.java
index ed6d9aa..0d54180 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestNumericDocValuesUpdates.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestNumericDocValuesUpdates.java
@@ -138,6 +138,9 @@ public class TestNumericDocValuesUpdates extends LuceneTestCase {
writer.addDocument(doc(i, val));
}
+ int numDocUpdates = 0;
+ int numValueUpdates = 0;
+
for (int i = 0; i < numOperations; i++) {
final int op = TestUtil.nextInt(random(), 1, 100);
final long val = random().nextLong();
@@ -149,8 +152,10 @@ public class TestNumericDocValuesUpdates extends LuceneTestCase {
final int id = TestUtil.nextInt(random(), 0, expected.size() - 1);
expected.put(id, val);
if (op <= UPD_CUTOFF) {
+ numDocUpdates++;
writer.updateDocument(new Term("id", "doc-" + id), doc(id, val));
} else {
+ numValueUpdates++;
writer.updateNumericDocValue(new Term("id", "doc-" + id), "val", val);
}
}
@@ -827,6 +832,7 @@ public class TestNumericDocValuesUpdates extends LuceneTestCase {
int refreshChance = TestUtil.nextInt(random(), 5, 200);
int deleteChance = TestUtil.nextInt(random(), 2, 100);
+ int idUpto = 0;
int deletedCount = 0;
List<OneSortDoc> docs = new ArrayList<>();
@@ -1594,6 +1600,7 @@ public class TestNumericDocValuesUpdates extends LuceneTestCase {
// update all doc values
long value = random().nextInt();
+ NumericDocValuesField[] update = new NumericDocValuesField[numDocs];
for (int i = 0; i < numDocs; i++) {
Term term = new Term("id", new BytesRef(Integer.toString(i)));
writer.updateDocValues(term, new NumericDocValuesField("ndv", value));
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java b/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java
index ec25b96..7ae8908 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java
@@ -365,6 +365,11 @@ public class TestPayloads extends LuceneTestCase {
super(PER_FIELD_REUSE_STRATEGY);
}
+ public PayloadAnalyzer(String field, byte[] data, int offset, int length) {
+ super(PER_FIELD_REUSE_STRATEGY);
+ setPayloadData(field, data, offset, length);
+ }
+
void setPayloadData(String field, byte[] data, int offset, int length) {
fieldToData.put(field, new PayloadData(data, offset, length));
}
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing.java b/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing.java
index c65b1f3..d1c2395 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing.java
@@ -19,12 +19,14 @@ package org.apache.lucene.index;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.*;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.search.*;
import org.apache.lucene.store.*;
import org.apache.lucene.util.*;
public class TestStressIndexing extends LuceneTestCase {
private abstract static class TimedThread extends Thread {
volatile boolean failed;
+ int count;
private static int RUN_TIME_MSEC = atLeast(1000);
private TimedThread[] allThreads;
@@ -38,10 +40,13 @@ public class TestStressIndexing extends LuceneTestCase {
public void run() {
final long stopTime = System.currentTimeMillis() + RUN_TIME_MSEC;
+ count = 0;
+
try {
do {
if (anyErrors()) break;
doWork();
+ count++;
} while (System.currentTimeMillis() < stopTime);
} catch (Throwable e) {
System.out.println(Thread.currentThread() + ": exc");
@@ -98,9 +103,10 @@ public class TestStressIndexing extends LuceneTestCase {
public void doWork() throws Throwable {
for (int i = 0; i < 100; i++) {
IndexReader ir = DirectoryReader.open(directory);
- newSearcher(ir);
+ IndexSearcher is = newSearcher(ir);
ir.close();
}
+ count += 100;
}
}
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTerm.java b/lucene/core/src/test/org/apache/lucene/index/TestTerm.java
index 1c2b3db..438f414 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTerm.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTerm.java
@@ -20,7 +20,6 @@ import org.apache.lucene.util.LuceneTestCase;
public class TestTerm extends LuceneTestCase {
- @SuppressWarnings("unlikely-arg-type")
public void testEquals() {
final Term base = new Term("same", "same");
final Term same = new Term("same", "same");
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java
index 3ab1d5b..3feeec5 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java
@@ -249,6 +249,7 @@ public class TestTermVectorsReader extends LuceneTestCase {
Codec.getDefault()
.termVectorsFormat()
.vectorsReader(dir, seg.info, fieldInfos, newIOContext(random()));
+ BytesRef[] terms;
Terms vector = reader.get(0).terms(testFields[0]);
assertNotNull(vector);
assertEquals(testTerms.length, vector.size());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermsHashPerField.java b/lucene/core/src/test/org/apache/lucene/index/TestTermsHashPerField.java
index 823dbb9..971cf4c 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTermsHashPerField.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermsHashPerField.java
@@ -263,7 +263,7 @@ public class TestTermsHashPerField extends LuceneTestCase {
for (int i = 0; i < numDocs; i++) {
int numTerms = 1 + random().nextInt(200);
int doc = i;
- for (int j = 0; j < numTerms; j++) {
+ for (int j = 0; i < numTerms; i++) {
BytesRef ref = RandomPicks.randomFrom(random(), bytesRefs);
Posting posting = postingMap.get(ref);
if (posting.termId == -1) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTwoPhaseCommitTool.java b/lucene/core/src/test/org/apache/lucene/index/TestTwoPhaseCommitTool.java
index e8f7615..f477715 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTwoPhaseCommitTool.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTwoPhaseCommitTool.java
@@ -28,6 +28,8 @@ public class TestTwoPhaseCommitTool extends LuceneTestCase {
final boolean failOnCommit;
final boolean failOnRollback;
boolean rollbackCalled = false;
+ Map<String, String> prepareCommitData = null;
+ Map<String, String> commitData = null;
public TwoPhaseCommitImpl(boolean failOnPrepare, boolean failOnCommit, boolean failOnRollback) {
this.failOnPrepare = failOnPrepare;
@@ -41,6 +43,7 @@ public class TestTwoPhaseCommitTool extends LuceneTestCase {
}
public long prepareCommit(Map<String, String> commitData) throws IOException {
+ this.prepareCommitData = commitData;
assertFalse("commit should not have been called before all prepareCommit were", commitCalled);
if (failOnPrepare) {
throw new IOException("failOnPrepare");
@@ -54,6 +57,7 @@ public class TestTwoPhaseCommitTool extends LuceneTestCase {
}
public long commit(Map<String, String> commitData) throws IOException {
+ this.commitData = commitData;
commitCalled = true;
if (failOnCommit) {
throw new RuntimeException("failOnCommit");
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBoolean2.java b/lucene/core/src/test/org/apache/lucene/search/TestBoolean2.java
index 5181c67..8e0136d 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBoolean2.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBoolean2.java
@@ -356,6 +356,8 @@ public class TestBoolean2 extends LuceneTestCase {
public void testRandomQueries() throws Exception {
String[] vals = {"w1", "w2", "w3", "w4", "w5", "xx", "yy", "zzz"};
+ int tot = 0;
+
BooleanQuery q1 = null;
try {
@@ -393,6 +395,7 @@ public class TestBoolean2 extends LuceneTestCase {
collector = TopFieldCollector.create(sort, 1000, 1);
searcher.search(q1, collector);
ScoreDoc[] hits2 = collector.topDocs().scoreDocs;
+ tot += hits2.length;
CheckHits.checkEqual(q1, hits1, hits2);
BooleanQuery.Builder q3 = new BooleanQuery.Builder();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestControlledRealTimeReopenThread.java b/lucene/core/src/test/org/apache/lucene/search/TestControlledRealTimeReopenThread.java
index c1882e0..9dcdcd8 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestControlledRealTimeReopenThread.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestControlledRealTimeReopenThread.java
@@ -585,7 +585,7 @@ public class TestControlledRealTimeReopenThread extends ThreadedIndexingAndSearc
nrtDeletesThread.setDaemon(true);
nrtDeletesThread.start();
- w.addDocument(new Document());
+ long gen1 = w.addDocument(new Document());
long gen2 = w.deleteAll();
nrtDeletesThread.waitForGeneration(gen2);
IOUtils.close(nrtDeletesThread, nrtDeletes, w, dir);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestLongValuesSource.java b/lucene/core/src/test/org/apache/lucene/search/TestLongValuesSource.java
index be8ce3c..4d19a82 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestLongValuesSource.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestLongValuesSource.java
@@ -46,6 +46,7 @@ public class TestLongValuesSource extends LuceneTestCase {
dir = newDirectory();
RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
int numDocs = TestUtil.nextInt(random(), 2049, 4000);
+ int leastValue = 45;
for (int i = 0; i < numDocs; i++) {
Document document = new Document();
document.add(newTextField("english", English.intToEnglish(i), Field.Store.NO));
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMatchesIterator.java b/lucene/core/src/test/org/apache/lucene/search/TestMatchesIterator.java
index 4c4222e..c8888be 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestMatchesIterator.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestMatchesIterator.java
@@ -646,6 +646,8 @@ public class TestMatchesIterator extends LuceneTestCase {
// "a phrase sentence with many phrase sentence iterations of a phrase sentence",
public void testSloppyPhraseQueryWithRepeats() throws IOException {
+ Term p = new Term(FIELD_WITH_OFFSETS, "phrase");
+ Term s = new Term(FIELD_WITH_OFFSETS, "sentence");
PhraseQuery pq = new PhraseQuery(10, FIELD_WITH_OFFSETS, "phrase", "sentence", "sentence");
checkMatches(
pq,
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSortedNumericSortField.java b/lucene/core/src/test/org/apache/lucene/search/TestSortedNumericSortField.java
index b6cac82..d056886 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSortedNumericSortField.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSortedNumericSortField.java
@@ -47,7 +47,6 @@ public class TestSortedNumericSortField extends LuceneTestCase {
}
}
- @SuppressWarnings("unlikely-arg-type")
public void testEquals() throws Exception {
SortField sf = new SortedNumericSortField("a", SortField.Type.LONG);
assertFalse(sf.equals(null));
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSortedSetSortField.java b/lucene/core/src/test/org/apache/lucene/search/TestSortedSetSortField.java
index b338d5c..66e8be6 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSortedSetSortField.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSortedSetSortField.java
@@ -47,7 +47,6 @@ public class TestSortedSetSortField extends LuceneTestCase {
}
}
- @SuppressWarnings("unlikely-arg-type")
public void testEquals() throws Exception {
SortField sf = new SortedSetSortField("a", false);
assertFalse(sf.equals(null));
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTermQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestTermQuery.java
index 32437a1..cd7f35c 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestTermQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestTermQuery.java
@@ -126,6 +126,9 @@ public class TestTermQuery extends LuceneTestCase {
w.addDocument(new Document());
DirectoryReader reader = w.getReader();
+ FilterDirectoryReader noSeekReader = new NoSeekDirectoryReader(reader);
+ IndexSearcher noSeekSearcher = new IndexSearcher(noSeekReader);
+ Query query = new TermQuery(new Term("foo", "bar"));
TermQuery queryWithContext =
new TermQuery(
new Term("foo", "bar"),
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTotalHits.java b/lucene/core/src/test/org/apache/lucene/search/TestTotalHits.java
index 4bd74bf..f975f41 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestTotalHits.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestTotalHits.java
@@ -22,7 +22,6 @@ import org.apache.lucene.util.LuceneTestCase;
public class TestTotalHits extends LuceneTestCase {
- @SuppressWarnings("unlikely-arg-type")
public void testEqualsAndHashcode() {
TotalHits totalHits1 = randomTotalHits();
assertFalse(totalHits1.equals(null));
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestWildcard.java b/lucene/core/src/test/org/apache/lucene/search/TestWildcard.java
index b891826..69df652 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestWildcard.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestWildcard.java
@@ -32,7 +32,6 @@ import org.apache.lucene.util.LuceneTestCase;
/** TestWildcard tests the '*' and '?' wildcard characters. */
public class TestWildcard extends LuceneTestCase {
- @SuppressWarnings("unlikely-arg-type")
public void testEquals() {
WildcardQuery wq1 = new WildcardQuery(new Term("field", "b*a"));
WildcardQuery wq2 = new WildcardQuery(new Term("field", "b*a"));
diff --git a/lucene/core/src/test/org/apache/lucene/store/TestMultiMMap.java b/lucene/core/src/test/org/apache/lucene/store/TestMultiMMap.java
index e34cf78..b664a45 100644
--- a/lucene/core/src/test/org/apache/lucene/store/TestMultiMMap.java
+++ b/lucene/core/src/test/org/apache/lucene/store/TestMultiMMap.java
@@ -181,7 +181,7 @@ public class TestMultiMMap extends BaseDirectoryTestCase {
public void testSeekSliceZero() throws Exception {
int upto = TEST_NIGHTLY ? 31 : 3;
- for (int i = 0; i < upto; i++) {
+ for (int i = 0; i < 3; i++) {
MMapDirectory mmapDir = new MMapDirectory(createTempDir("testSeekSliceZero"), 1 << i);
IndexOutput io = mmapDir.createOutput("zeroBytes", newIOContext(random()));
io.close();
diff --git a/lucene/core/src/test/org/apache/lucene/store/TestNRTCachingDirectory.java b/lucene/core/src/test/org/apache/lucene/store/TestNRTCachingDirectory.java
index ee40c3a..d530310 100644
--- a/lucene/core/src/test/org/apache/lucene/store/TestNRTCachingDirectory.java
+++ b/lucene/core/src/test/org/apache/lucene/store/TestNRTCachingDirectory.java
@@ -30,6 +30,7 @@ import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LineFileDocs;
import org.apache.lucene.util.TestUtil;
@@ -81,7 +82,7 @@ public class TestNRTCachingDirectory extends BaseDirectoryTestCase {
final IndexSearcher s = newSearcher(r);
// Just make sure search can run; we can't assert
// totHits since it could be 0
- s.search(new TermQuery(new Term("body", "the")), 10);
+ TopDocs hits = s.search(new TermQuery(new Term("body", "the")), 10);
// System.out.println("tot hits " + hits.totalHits);
}
}
diff --git a/lucene/core/src/test/org/apache/lucene/util/bkd/TestBKD.java b/lucene/core/src/test/org/apache/lucene/util/bkd/TestBKD.java
index 37a6cf6..6e5c600 100644
--- a/lucene/core/src/test/org/apache/lucene/util/bkd/TestBKD.java
+++ b/lucene/core/src/test/org/apache/lucene/util/bkd/TestBKD.java
@@ -1723,6 +1723,7 @@ public class TestBKD extends LuceneTestCase {
public void testTooManyPoints() throws Exception {
Directory dir = newDirectory();
final int numValues = 10;
+ final int numPointsAdded = 50; // exceeds totalPointCount
final int numBytesPerDim = TestUtil.nextInt(random(), 1, 4);
final byte[] pointValue = new byte[numBytesPerDim];
BKDWriter w =
@@ -1754,6 +1755,7 @@ public class TestBKD extends LuceneTestCase {
public void testTooManyPoints1D() throws Exception {
Directory dir = newDirectory();
final int numValues = 10;
+ final int numPointsAdded = 50; // exceeds totalPointCount
final int numBytesPerDim = TestUtil.nextInt(random(), 1, 4);
final byte[][] pointValue = new byte[11][numBytesPerDim];
BKDWriter w =
diff --git a/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java b/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java
index 036b7b0..a437f06 100644
--- a/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java
+++ b/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java
@@ -294,6 +294,7 @@ public class TestFSTs extends LuceneTestCase {
for (int inputMode = 0; inputMode < 2; inputMode++) {
final int numWords = random.nextInt(maxNumWords + 1);
Set<IntsRef> termsSet = new HashSet<>();
+ IntsRef[] terms = new IntsRef[numWords];
while (termsSet.size() < numWords) {
final String term = getRandomString(random);
termsSet.add(toIntsRef(term, inputMode));
@@ -526,7 +527,7 @@ public class TestFSTs extends LuceneTestCase {
BufferedReader is = Files.newBufferedReader(wordsFileIn, StandardCharsets.UTF_8);
try {
- final IntsRefBuilder intsRefBuilder = new IntsRefBuilder();
+ final IntsRefBuilder intsRef = new IntsRefBuilder();
long tStart = System.currentTimeMillis();
int ord = 0;
while (true) {
@@ -534,8 +535,8 @@ public class TestFSTs extends LuceneTestCase {
if (w == null) {
break;
}
- toIntsRef(w, inputMode, intsRefBuilder);
- fstCompiler.add(intsRefBuilder.get(), getOutput(intsRefBuilder.get(), ord));
+ toIntsRef(w, inputMode, intsRef);
+ fstCompiler.add(intsRef.get(), getOutput(intsRef.get(), ord));
ord++;
if (ord % 500000 == 0) {
@@ -612,10 +613,10 @@ public class TestFSTs extends LuceneTestCase {
if (w == null) {
break;
}
- toIntsRef(w, inputMode, intsRefBuilder);
+ toIntsRef(w, inputMode, intsRef);
if (iter == 0) {
- T expected = getOutput(intsRefBuilder.get(), ord);
- T actual = Util.get(fst, intsRefBuilder.get());
+ T expected = getOutput(intsRef.get(), ord);
+ T actual = Util.get(fst, intsRef.get());
if (actual == null) {
throw new RuntimeException("unexpected null output on input=" + w);
}
@@ -630,18 +631,18 @@ public class TestFSTs extends LuceneTestCase {
}
} else {
// Get by output
- final Long output = (Long) getOutput(intsRefBuilder.get(), ord);
+ final Long output = (Long) getOutput(intsRef.get(), ord);
@SuppressWarnings({"unchecked", "deprecation"})
final IntsRef actual = Util.getByOutput((FST<Long>) fst, output.longValue());
if (actual == null) {
throw new RuntimeException("unexpected null input from output=" + output);
}
- if (!actual.equals(intsRefBuilder.get())) {
+ if (!actual.equals(intsRef)) {
throw new RuntimeException(
"wrong input (got "
+ actual
+ " but expected "
- + intsRefBuilder
+ + intsRef
+ " from output="
+ output);
}
diff --git a/lucene/core/src/test/org/apache/lucene/util/fst/TestUtil.java b/lucene/core/src/test/org/apache/lucene/util/fst/TestUtil.java
index 17295c5..d0c66ce 100644
--- a/lucene/core/src/test/org/apache/lucene/util/fst/TestUtil.java
+++ b/lucene/core/src/test/org/apache/lucene/util/fst/TestUtil.java
@@ -16,6 +16,7 @@
*/
package org.apache.lucene.util.fst;
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.lucene.util.BytesRef;
@@ -98,4 +99,26 @@ public class TestUtil extends LuceneTestCase {
}
return fstCompiler.compile();
}
+
+ private List<String> createRandomDictionary(int width, int depth) {
+ return createRandomDictionary(new ArrayList<>(), new StringBuilder(), width, depth);
+ }
+
+ private List<String> createRandomDictionary(
+ List<String> dict, StringBuilder buf, int width, int depth) {
+ char c = (char) random().nextInt(128);
+ assert width < Character.MIN_SURROGATE / 8 - 128; // avoid surrogate chars
+ int len = buf.length();
+ for (int i = 0; i < width; i++) {
+ buf.append(c);
+ if (depth > 0) {
+ createRandomDictionary(dict, buf, width, depth - 1);
+ } else {
+ dict.add(buf.toString());
+ }
+ c += random().nextInt(8);
+ buf.setLength(len);
+ }
+ return dict;
+ }
}
diff --git a/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionRescorer.java b/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionRescorer.java
index 3634e28..8119881 100644
--- a/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionRescorer.java
+++ b/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionRescorer.java
@@ -20,6 +20,7 @@ import java.io.IOException;
import java.util.List;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.ReaderUtil;
+import org.apache.lucene.search.DoubleValues;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Rescorer;
@@ -46,6 +47,21 @@ class ExpressionRescorer extends SortRescorer {
this.bindings = bindings;
}
+ private static DoubleValues scores(int doc, float score) {
+ return new DoubleValues() {
+ @Override
+ public double doubleValue() throws IOException {
+ return score;
+ }
+
+ @Override
+ public boolean advanceExact(int target) throws IOException {
+ assert doc == target;
+ return true;
+ }
+ };
+ }
+
@Override
public Explanation explain(IndexSearcher searcher, Explanation firstPassExplanation, int docID)
throws IOException {
diff --git a/lucene/expressions/src/java/org/apache/lucene/expressions/js/JavascriptCompiler.java b/lucene/expressions/src/java/org/apache/lucene/expressions/js/JavascriptCompiler.java
index d7663d2..5bb7d2e 100644
--- a/lucene/expressions/src/java/org/apache/lucene/expressions/js/JavascriptCompiler.java
+++ b/lucene/expressions/src/java/org/apache/lucene/expressions/js/JavascriptCompiler.java
@@ -166,7 +166,7 @@ public final class JavascriptCompiler {
@SuppressWarnings({"unused", "null"})
private static void unusedTestCompile() throws IOException {
DoubleValues f = null;
- f.doubleValue();
+ double ret = f.doubleValue();
}
/**
diff --git a/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionValueSource.java b/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionValueSource.java
index 2d60d98..0472b8f 100644
--- a/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionValueSource.java
+++ b/lucene/expressions/src/test/org/apache/lucene/expressions/TestExpressionValueSource.java
@@ -94,7 +94,6 @@ public class TestExpressionValueSource extends LuceneTestCase {
assertEquals(4, values.doubleValue(), 0);
}
- @SuppressWarnings("unlikely-arg-type")
public void testDoubleValuesSourceEquals() throws Exception {
Expression expr = JavascriptCompiler.compile("sqrt(a) + ln(b)");
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/FacetsCollector.java b/lucene/facet/src/java/org/apache/lucene/facet/FacetsCollector.java
index 93ef7e8..df9ef10 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/FacetsCollector.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/FacetsCollector.java
@@ -46,7 +46,8 @@ import org.apache.lucene.util.DocIdSetBuilder;
* instantiate one of the {@link Facets} subclasses to do the facet counting. Use the {@code search}
* utility methods to perform an "ordinary" search but also collect into a {@link Collector}.
*/
-public class FacetsCollector extends SimpleCollector {
+// redundant 'implements Collector' to workaround javadocs bugs
+public class FacetsCollector extends SimpleCollector implements Collector {
private LeafReaderContext context;
private Scorable scorer;
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/FacetsConfig.java b/lucene/facet/src/java/org/apache/lucene/facet/FacetsConfig.java
index 4d4fedd..f4f396e 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/FacetsConfig.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/FacetsConfig.java
@@ -441,7 +441,15 @@ public class FacetsConfig {
System.arraycopy(field.assoc.bytes, field.assoc.offset, bytes, upto, field.assoc.length);
upto += field.assoc.length;
+ FacetsConfig.DimConfig ft = getDimConfig(field.dim);
+
// Drill down:
+ int start;
+ if (ft.requireDimensionDrillDown) {
+ start = 1;
+ } else {
+ start = 2;
+ }
for (int i = 1; i <= label.length; i++) {
doc.add(
new StringField(indexFieldName, pathToString(label.components, i), Field.Store.NO));
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/LongValueFacetCounts.java b/lucene/facet/src/java/org/apache/lucene/facet/LongValueFacetCounts.java
index 4379ab7..07853c8 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/LongValueFacetCounts.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/LongValueFacetCounts.java
@@ -218,7 +218,8 @@ public class LongValueFacetCounts extends Facets {
}
private void countAllOneSegment(NumericDocValues values) throws IOException {
- while (values.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+ int doc;
+ while ((doc = values.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
totCount++;
increment(values.longValue());
}
@@ -254,7 +255,8 @@ public class LongValueFacetCounts extends Facets {
if (singleValues != null) {
countAllOneSegment(singleValues);
} else {
- while (values.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+ int doc;
+ while ((doc = values.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
int limit = values.docValueCount();
totCount += limit;
for (int i = 0; i < limit; i++) {
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetLabels.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetLabels.java
index f0c66ba..7e12189 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetLabels.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetLabels.java
@@ -31,6 +31,9 @@ import org.apache.lucene.util.IntsRef;
*/
public class TaxonomyFacetLabels {
+ /** Index field name provided to the constructor */
+ private final String indexFieldName;
+
/** {@code TaxonomyReader} provided to the constructor */
private final TaxonomyReader taxoReader;
@@ -46,6 +49,7 @@ public class TaxonomyFacetLabels {
*/
public TaxonomyFacetLabels(TaxonomyReader taxoReader, String indexFieldName) throws IOException {
this.taxoReader = taxoReader;
+ this.indexFieldName = indexFieldName;
this.ordsReader = new DocValuesOrdinalsReader(indexFieldName);
}
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/TestLongValueFacetCounts.java b/lucene/facet/src/test/org/apache/lucene/facet/TestLongValueFacetCounts.java
index 1c4b5eb..0e17bd2 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/TestLongValueFacetCounts.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/TestLongValueFacetCounts.java
@@ -385,6 +385,7 @@ public class TestLongValueFacetCounts extends LuceneTestCase {
}
long[][] values = new long[valueCount][];
+ int missingCount = 0;
for (int i = 0; i < valueCount; i++) {
Document doc = new Document();
doc.add(new IntPoint("id", i));
@@ -406,6 +407,8 @@ public class TestLongValueFacetCounts extends LuceneTestCase {
}
} else {
+ missingCount++;
+
if (VERBOSE) {
System.out.println(" doc=" + i + " missing values");
}
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestFacetLabel.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestFacetLabel.java
index f1aa8c3..f6f21d3 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestFacetLabel.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestFacetLabel.java
@@ -92,7 +92,6 @@ public class TestFacetLabel extends FacetTestCase {
}
}
- @SuppressWarnings("unlikely-arg-type")
@Test
public void testEquals() {
assertEquals(new FacetLabel(), new FacetLabel());
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestSearcherTaxonomyManager.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestSearcherTaxonomyManager.java
index 2bb98f6..ee3972c 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestSearcherTaxonomyManager.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestSearcherTaxonomyManager.java
@@ -31,14 +31,18 @@ import org.apache.lucene.facet.Facets;
import org.apache.lucene.facet.FacetsCollector;
import org.apache.lucene.facet.FacetsConfig;
import org.apache.lucene.facet.taxonomy.SearcherTaxonomyManager.SearcherAndTaxonomy;
+import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyReader;
import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter;
+import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexNotFoundException;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.SegmentInfos;
import org.apache.lucene.index.TieredMergePolicy;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.ReferenceManager;
+import org.apache.lucene.search.SearcherFactory;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LuceneTestCase;
@@ -356,4 +360,15 @@ public class TestSearcherTaxonomyManager extends FacetTestCase {
expectThrows(IndexNotFoundException.class, mgr::maybeRefreshBlocking);
IOUtils.close(w, tw, mgr, indexDir, taxoDir);
}
+
+ private SearcherTaxonomyManager getSearcherTaxonomyManager(
+ Directory indexDir, Directory taxoDir, SearcherFactory searcherFactory) throws IOException {
+ if (random().nextBoolean()) {
+ return new SearcherTaxonomyManager(indexDir, taxoDir, searcherFactory);
+ } else {
+ IndexReader reader = DirectoryReader.open(indexDir);
+ DirectoryTaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);
+ return new SearcherTaxonomyManager(reader, taxoReader, searcherFactory);
+ }
+ }
}
diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestDirectoryTaxonomyReader.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestDirectoryTaxonomyReader.java
index bb34dee..47e42dc 100644
--- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestDirectoryTaxonomyReader.java
+++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestDirectoryTaxonomyReader.java
@@ -545,6 +545,7 @@ public class TestDirectoryTaxonomyReader extends FacetTestCase {
Directory dir = newDirectory();
DirectoryTaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(dir);
int numCategories = atLeast(10);
+ int numA = 0, numB = 0;
Random random = random();
// add the two categories for which we'll also add children (so asserts are simpler)
taxoWriter.addCategory(new FacetLabel("a"));
@@ -552,8 +553,10 @@ public class TestDirectoryTaxonomyReader extends FacetTestCase {
for (int i = 0; i < numCategories; i++) {
if (random.nextBoolean()) {
taxoWriter.addCategory(new FacetLabel("a", Integer.toString(i)));
+ ++numA;
} else {
taxoWriter.addCategory(new FacetLabel("b", Integer.toString(i)));
+ ++numB;
}
}
// add category with no children
diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestAllGroupHeadsCollector.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestAllGroupHeadsCollector.java
index 1e4553c..9b7baae 100644
--- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestAllGroupHeadsCollector.java
+++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestAllGroupHeadsCollector.java
@@ -228,6 +228,7 @@ public class TestAllGroupHeadsCollector extends LuceneTestCase {
Directory dir = newDirectory();
RandomIndexWriter w =
new RandomIndexWriter(random(), dir, newIndexWriterConfig(new MockAnalyzer(random())));
+ DocValuesType valueType = DocValuesType.SORTED;
Document doc = new Document();
Document docNoGroup = new Document();
diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
index 3cbf735..df53dff 100644
--- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
+++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
@@ -1371,6 +1371,7 @@ public class TestGrouping extends LuceneTestCase {
final List<Collection<SearchGroup<BytesRef>>> shardGroups = new ArrayList<>();
List<FirstPassGroupingCollector<?>> firstPassGroupingCollectors = new ArrayList<>();
FirstPassGroupingCollector<?> firstPassCollector = null;
+ boolean shardsCanUseIDV = canUseIDV;
String groupField = "group";
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenGroup.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenGroup.java
index 96384ab..eb903fc 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenGroup.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenGroup.java
@@ -36,10 +36,11 @@ public class TokenGroup {
private int matchEndOffset;
private OffsetAttribute offsetAtt;
+ private CharTermAttribute termAtt;
public TokenGroup(TokenStream tokenStream) {
offsetAtt = tokenStream.addAttribute(OffsetAttribute.class);
- tokenStream.addAttribute(CharTermAttribute.class);
+ termAtt = tokenStream.addAttribute(CharTermAttribute.class);
}
void addToken(float score) {
diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterMTQ.java b/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterMTQ.java
index 2eebfd9..f70ce3e 100644
--- a/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterMTQ.java
+++ b/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterMTQ.java
@@ -911,7 +911,7 @@ public class TestUnifiedHighlighterMTQ extends LuceneTestCase {
BooleanQuery query = queryBuilder.build();
TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
try {
- highlighter.highlight("body", query, topDocs, 2);
+ String snippets[] = highlighter.highlight("body", query, topDocs, 2);
// don't even care what the results are; just want to test exception behavior
if (fieldType == UHTestHelper.reanalysisType) {
fail("Expecting EXPECTED IOException");
diff --git a/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java b/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java
index 6180587..f6a5e76 100644
--- a/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java
+++ b/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java
@@ -1665,7 +1665,7 @@ public class TestJoinUtil extends LuceneTestCase {
multipleValuesPerDocument
? Math.min(2 + random.nextInt(10), context.randomUniqueValues.length)
: 1;
- docs[i] = new RandomDoc(id, numberOfLinkValues);
+ docs[i] = new RandomDoc(id, numberOfLinkValues, value, from);
if (globalOrdinalJoin) {
document.add(newStringField("type", from ? "from" : "to", Field.Store.NO));
}
@@ -2061,10 +2061,14 @@ public class TestJoinUtil extends LuceneTestCase {
final String id;
final List<String> linkValues;
+ final String value;
+ final boolean from;
- private RandomDoc(String id, int numberOfLinkValues) {
+ private RandomDoc(String id, int numberOfLinkValues, String value, boolean from) {
this.id = id;
+ this.from = from;
linkValues = new ArrayList<>(numberOfLinkValues);
+ this.value = value;
}
}
diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/AnalysisPanelProvider.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/AnalysisPanelProvider.java
index b21e9c5..b031437 100644
--- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/AnalysisPanelProvider.java
+++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/AnalysisPanelProvider.java
@@ -407,5 +407,7 @@ public final class AnalysisPanelProvider implements AnalysisTabOperator {
AnalysisPanelProvider.this.executeAnalysis();
}
}
+
+ void executeAnalysisStepByStep(ActionEvent e) {}
}
}
diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/OptimizeIndexDialogFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/OptimizeIndexDialogFactory.java
index 2a463f8..2389613 100644
--- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/OptimizeIndexDialogFactory.java
+++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/menubar/OptimizeIndexDialogFactory.java
@@ -26,6 +26,7 @@ import java.awt.Insets;
import java.awt.Window;
import java.awt.event.ActionEvent;
import java.io.IOException;
+import java.lang.invoke.MethodHandles;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import javax.swing.BorderFactory;
@@ -41,6 +42,7 @@ import javax.swing.JSpinner;
import javax.swing.JTextArea;
import javax.swing.SpinnerNumberModel;
import javax.swing.SwingWorker;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.luke.app.IndexHandler;
import org.apache.lucene.luke.app.IndexObserver;
import org.apache.lucene.luke.app.LukeState;
@@ -54,11 +56,14 @@ import org.apache.lucene.luke.app.desktop.util.StyleConstants;
import org.apache.lucene.luke.app.desktop.util.TextAreaPrintStream;
import org.apache.lucene.luke.models.tools.IndexTools;
import org.apache.lucene.luke.models.tools.IndexToolsFactory;
+import org.apache.lucene.luke.util.LoggerFactory;
import org.apache.lucene.util.NamedThreadFactory;
/** Factory of optimize index dialog */
public final class OptimizeIndexDialogFactory implements DialogOpener.DialogFactory {
+ private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
private static OptimizeIndexDialogFactory instance;
private final Preferences prefs;
diff --git a/lucene/luke/src/test/org/apache/lucene/luke/models/documents/TestDocumentsImpl.java b/lucene/luke/src/test/org/apache/lucene/luke/models/documents/TestDocumentsImpl.java
index ddddbef..cf08f1f 100644
--- a/lucene/luke/src/test/org/apache/lucene/luke/models/documents/TestDocumentsImpl.java
+++ b/lucene/luke/src/test/org/apache/lucene/luke/models/documents/TestDocumentsImpl.java
@@ -150,7 +150,7 @@ public class TestDocumentsImpl extends DocumentsTestBase {
assertEquals("adventures", term.text());
while (documents.nextTerm().isPresent()) {
- documents.getDocFreq().orElseThrow(IllegalStateException::new);
+ Integer freq = documents.getDocFreq().orElseThrow(IllegalStateException::new);
}
}
@@ -208,16 +208,16 @@ public class TestDocumentsImpl extends DocumentsTestBase {
@Test
public void testNextTermDoc_unPositioned() {
DocumentsImpl documents = new DocumentsImpl(reader);
- documents.firstTerm("title").orElseThrow(IllegalStateException::new);
+ Term term = documents.firstTerm("title").orElseThrow(IllegalStateException::new);
assertFalse(documents.nextTermDoc().isPresent());
}
@Test
public void testTermPositions() {
DocumentsImpl documents = new DocumentsImpl(reader);
- documents.firstTerm("author").orElseThrow(IllegalStateException::new);
- documents.seekTerm("carroll").orElseThrow(IllegalStateException::new);
- documents.firstTermDoc().orElseThrow(IllegalStateException::new);
+ Term term = documents.firstTerm("author").orElseThrow(IllegalStateException::new);
+ term = documents.seekTerm("carroll").orElseThrow(IllegalStateException::new);
+ int docid = documents.firstTermDoc().orElseThrow(IllegalStateException::new);
List<TermPosting> postings = documents.getTermPositions();
assertEquals(1, postings.size());
assertEquals(1, postings.get(0).getPosition());
@@ -228,21 +228,21 @@ public class TestDocumentsImpl extends DocumentsTestBase {
@Test
public void testTermPositions_unPositioned() {
DocumentsImpl documents = new DocumentsImpl(reader);
- documents.firstTerm("author").orElseThrow(IllegalStateException::new);
+ Term term = documents.firstTerm("author").orElseThrow(IllegalStateException::new);
assertEquals(0, documents.getTermPositions().size());
}
@Test
public void testTermPositions_noPositions() {
DocumentsImpl documents = new DocumentsImpl(reader);
- documents.firstTerm("title").orElseThrow(IllegalStateException::new);
- documents.firstTermDoc().orElseThrow(IllegalStateException::new);
+ Term term = documents.firstTerm("title").orElseThrow(IllegalStateException::new);
+ int docid = documents.firstTermDoc().orElseThrow(IllegalStateException::new);
assertEquals(0, documents.getTermPositions().size());
}
@Test(expected = AlreadyClosedException.class)
public void testClose() throws Exception {
- new DocumentsImpl(reader);
+ DocumentsImpl documents = new DocumentsImpl(reader);
reader.close();
IndexUtils.getFieldNames(reader);
}
diff --git a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
index 9c3aad2..6dcf8d0 100644
--- a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
+++ b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
@@ -1440,8 +1440,11 @@ public class MemoryIndex {
@Override
public int size() {
- return Math.toIntExact(
- fields.entrySet().stream().filter(e -> e.getValue().numTokens > 0).count());
+ int size = 0;
+ for (String fieldName : this) {
+ size++;
+ }
+ return size;
}
}
@@ -1570,6 +1573,7 @@ public class MemoryIndex {
private boolean hasNext;
private int doc = -1;
private int freq;
+ private int pos;
private int startOffset;
private int endOffset;
private int payloadIndex;
@@ -1596,6 +1600,7 @@ public class MemoryIndex {
@Override
public int nextDoc() {
+ pos = -1;
if (hasNext) {
hasNext = false;
return doc = 0;
diff --git a/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstDirectory.java b/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstDirectory.java
index c66e4a9..cea0a9d 100644
--- a/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstDirectory.java
+++ b/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstDirectory.java
@@ -67,9 +67,12 @@ import org.apache.lucene.search.spans.SpanOrQuery;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.store.ByteBuffersDirectory;
import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.ByteBlockPool;
+import org.apache.lucene.util.ByteBlockPool.Allocator;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LineFileDocs;
+import org.apache.lucene.util.RecyclingByteBlockAllocator;
import org.apache.lucene.util.TestUtil;
/**
@@ -352,6 +355,14 @@ public class TestMemoryIndexAgainstDirectory extends BaseTokenStreamTestCase {
reader.close();
}
+ private Allocator randomByteBlockAllocator() {
+ if (random().nextBoolean()) {
+ return new RecyclingByteBlockAllocator();
+ } else {
+ return new ByteBlockPool.DirectAllocator();
+ }
+ }
+
private MemoryIndex randomMemoryIndex() {
return new MemoryIndex(
random().nextBoolean(), random().nextBoolean(), random().nextInt(50) * 1024 * 1024);
diff --git a/lucene/misc/src/test/org/apache/lucene/misc/util/fst/TestFSTsMisc.java b/lucene/misc/src/test/org/apache/lucene/misc/util/fst/TestFSTsMisc.java
index 55c1675..540ce25 100644
--- a/lucene/misc/src/test/org/apache/lucene/misc/util/fst/TestFSTsMisc.java
+++ b/lucene/misc/src/test/org/apache/lucene/misc/util/fst/TestFSTsMisc.java
@@ -67,6 +67,7 @@ public class TestFSTsMisc extends LuceneTestCase {
for (int inputMode = 0; inputMode < 2; inputMode++) {
final int numWords = random.nextInt(maxNumWords + 1);
Set<IntsRef> termsSet = new HashSet<>();
+ IntsRef[] terms = new IntsRef[numWords];
while (termsSet.size() < numWords) {
final String term = FSTTester.getRandomString(random);
termsSet.add(FSTTester.toIntsRef(term, inputMode));
diff --git a/lucene/monitor/src/test/org/apache/lucene/monitor/TestBooleanTermExtractor.java b/lucene/monitor/src/test/org/apache/lucene/monitor/TestBooleanTermExtractor.java
index ce82237..d1b1f9d 100644
--- a/lucene/monitor/src/test/org/apache/lucene/monitor/TestBooleanTermExtractor.java
+++ b/lucene/monitor/src/test/org/apache/lucene/monitor/TestBooleanTermExtractor.java
@@ -31,6 +31,7 @@ import org.apache.lucene.util.LuceneTestCase;
public class TestBooleanTermExtractor extends LuceneTestCase {
private static final QueryAnalyzer treeBuilder = new QueryAnalyzer();
+ private static final TermWeightor WEIGHTOR = TermWeightor.DEFAULT;
private Set<Term> collectTerms(Query query) {
Set<Term> terms = new HashSet<>();
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/EnumFieldSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/EnumFieldSource.java
index 01a3723..8d80677 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/EnumFieldSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/EnumFieldSource.java
@@ -102,6 +102,8 @@ public class EnumFieldSource extends FieldCacheSource {
final NumericDocValues arr = DocValues.getNumeric(readerContext.reader(), field);
return new IntDocValues(this) {
+ final MutableValueInt val = new MutableValueInt();
+
int lastDocID;
private int getValueForDoc(int doc) throws IOException {
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/intervals/IntervalQuery.java b/lucene/queries/src/java/org/apache/lucene/queries/intervals/IntervalQuery.java
index 239ec6a..2ffa781 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/intervals/IntervalQuery.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/intervals/IntervalQuery.java
@@ -121,7 +121,7 @@ public final class IntervalQuery extends Query {
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost)
throws IOException {
- return new IntervalWeight(this, boost);
+ return new IntervalWeight(this, boost, scoreMode);
}
@Override
@@ -147,10 +147,12 @@ public final class IntervalQuery extends Query {
private class IntervalWeight extends Weight {
+ final ScoreMode scoreMode;
final float boost;
- public IntervalWeight(Query query, float boost) {
+ public IntervalWeight(Query query, float boost, ScoreMode scoreMode) {
super(query);
+ this.scoreMode = scoreMode;
this.boost = boost;
}
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java b/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java
index 283a1b6..dba7840 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java
@@ -659,12 +659,12 @@ public final class MoreLikeThis {
if (queue.size() < limit) {
// there is still space in the queue
- queue.add(new ScoreTerm(word, fieldName, score));
+ queue.add(new ScoreTerm(word, fieldName, score, idf, docFreq, tf));
} else {
ScoreTerm term = queue.top();
// update the smallest in the queue in place and update the queue.
if (term.score < score) {
- term.update(word, fieldName, score);
+ term.update(word, fieldName, score, idf, docFreq, tf);
queue.updateTop();
}
}
@@ -935,17 +935,26 @@ public final class MoreLikeThis {
String word;
String topField;
float score;
+ float idf;
+ int docFreq;
+ int tf;
- ScoreTerm(String word, String topField, float score) {
+ ScoreTerm(String word, String topField, float score, float idf, int docFreq, int tf) {
this.word = word;
this.topField = topField;
this.score = score;
+ this.idf = idf;
+ this.docFreq = docFreq;
+ this.tf = tf;
}
- void update(String word, String topField, float score) {
+ void update(String word, String topField, float score, float idf, int docFreq, int tf) {
this.word = word;
this.topField = topField;
this.score = score;
+ this.idf = idf;
+ this.docFreq = docFreq;
+ this.tf = tf;
}
}
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParser.java
index a0f58ae..dd03bac 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParser.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParser.java
@@ -81,7 +81,7 @@ import org.apache.lucene.queryparser.charstream.FastCharStream;
* the same syntax as this class, but is more modular,
* enabling substantial customization to how a query is created.
*/
-@SuppressWarnings("unused") public class QueryParser extends QueryParserBase implements QueryParserConstants {
+public class QueryParser extends QueryParserBase implements QueryParserConstants {
/** The default operator for parsing queries.
* Use {@link QueryParserBase#setDefaultOperator} to change it.
*/
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserTokenManager.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserTokenManager.java
index ca89bf5..10d0950 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserTokenManager.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserTokenManager.java
@@ -18,7 +18,7 @@ package org.apache.lucene.queryparser.classic;
/** Token Manager. */
-@SuppressWarnings("unused") public class QueryParserTokenManager implements QueryParserConstants {
+public class QueryParserTokenManager implements QueryParserConstants {
/** Debug output. */
// (debugStream omitted).
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/NLS.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/NLS.java
index 326da0b..3462a06 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/NLS.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/NLS.java
@@ -143,7 +143,10 @@ public class NLS {
ResourceBundle resourceBundle =
ResourceBundle.getBundle(clazz.getName(), Locale.getDefault());
if (resourceBundle != null) {
- resourceBundle.getObject(key);
+ Object obj = resourceBundle.getObject(key);
+ // if (obj == null)
+ // System.err.println("WARN: Message with key:" + key + " and locale: "
+ // + Locale.getDefault() + " not found.");
}
} catch (MissingResourceException e) {
// System.err.println("WARN: Message with key:" + key + " and locale: "
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/ParseException.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/ParseException.java
index 65f22a0..a782e8b 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/ParseException.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/ParseException.java
@@ -1,11 +1,11 @@
/* Generated By:JavaCC: Do not edit this line. ParseException.java Version 7.0 */
/* JavaCCOptions:KEEP_LINE_COLUMN=true */
- package org.apache.lucene.queryparser.flexible.standard.parser;
-
- import org.apache.lucene.queryparser.flexible.messages.*;
- import org.apache.lucene.queryparser.flexible.core.*;
- import org.apache.lucene.queryparser.flexible.core.messages.*;
-
+ package org.apache.lucene.queryparser.flexible.standard.parser;
+
+ import org.apache.lucene.queryparser.flexible.messages.*;
+ import org.apache.lucene.queryparser.flexible.core.*;
+ import org.apache.lucene.queryparser.flexible.core.messages.*;
+
/**
* This exception is thrown when parse errors are encountered.
@@ -36,16 +36,16 @@ public class ParseException extends QueryNodeParseException {
* a new object of this type with the fields "currentToken",
* "expectedTokenSequences", and "tokenImage" set.
*/
- public ParseException(Token currentTokenVal,
- int[][] expectedTokenSequencesVal, String[] tokenImageVal)
- {
- super(new MessageImpl(QueryParserMessages.INVALID_SYNTAX, initialise(
- currentTokenVal, expectedTokenSequencesVal, tokenImageVal)));
- this.currentToken = currentTokenVal;
- this.expectedTokenSequences = expectedTokenSequencesVal;
- this.tokenImage = tokenImageVal;
- }
-
+ public ParseException(Token currentTokenVal,
+ int[][] expectedTokenSequencesVal, String[] tokenImageVal)
+ {
+ super(new MessageImpl(QueryParserMessages.INVALID_SYNTAX, initialise(
+ currentTokenVal, expectedTokenSequencesVal, tokenImageVal)));
+ this.currentToken = currentTokenVal;
+ this.expectedTokenSequences = expectedTokenSequencesVal;
+ this.tokenImage = tokenImageVal;
+ }
+
/**
* The following constructors are for use by you for whatever
@@ -57,18 +57,18 @@ public class ParseException extends QueryNodeParseException {
* these constructors.
*/
- public ParseException()
- {
- super(new MessageImpl(QueryParserMessages.INVALID_SYNTAX, "Error"));
- }
-
+ public ParseException()
+ {
+ super(new MessageImpl(QueryParserMessages.INVALID_SYNTAX, "Error"));
+ }
+
/** Constructor with message. */
- public ParseException(Message message)
- {
- super(message);
- }
-
+ public ParseException(Message message)
+ {
+ super(message);
+ }
+
/**
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/StandardSyntaxParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/StandardSyntaxParser.java
index d509a0d..61a3238 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/StandardSyntaxParser.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/StandardSyntaxParser.java
@@ -50,7 +50,7 @@ import static org.apache.lucene.queryparser.flexible.standard.parser.EscapeQuery
/**
* Parser for the standard Lucene syntax
*/
-@SuppressWarnings("unused") public class StandardSyntaxParser implements SyntaxParser, StandardSyntaxParserConstants {
+public class StandardSyntaxParser implements SyntaxParser, StandardSyntaxParserConstants {
public StandardSyntaxParser() {
this(new FastCharStream(Reader.nullReader()));
}
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/StandardSyntaxParserTokenManager.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/StandardSyntaxParserTokenManager.java
index 3dc6507..b0ef7fd 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/StandardSyntaxParserTokenManager.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/StandardSyntaxParserTokenManager.java
@@ -46,7 +46,7 @@ package org.apache.lucene.queryparser.flexible.standard.parser;
/** Token Manager. */
-@SuppressWarnings("unused") public class StandardSyntaxParserTokenManager implements StandardSyntaxParserConstants {
+public class StandardSyntaxParserTokenManager implements StandardSyntaxParserConstants {
/** Debug output. */
// (debugStream omitted).
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.java
index c90c362..aac680f 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.java
@@ -46,7 +46,7 @@ import org.apache.lucene.queryparser.charstream.FastCharStream;
* to two terms may appear between a and b. </p>
*/
-@SuppressWarnings("unused") public class QueryParser implements QueryParserConstants {
+public class QueryParser implements QueryParserConstants {
static final int MINIMUM_PREFIX_LENGTH = 3;
static final int MINIMUM_CHARS_IN_TRUNC = 3;
static final String TRUNCATION_ERROR_MESSAGE = "Too unrestrictive truncation: ";
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParserTokenManager.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParserTokenManager.java
index f8fef60..d12f207 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParserTokenManager.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParserTokenManager.java
@@ -19,7 +19,7 @@ package org.apache.lucene.queryparser.surround.parser;
/** Token Manager. */
-@SuppressWarnings("unused") public class QueryParserTokenManager implements QueryParserConstants {
+public class QueryParserTokenManager implements QueryParserConstants {
/** Debug output. */
// (debugStream omitted).
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CoreParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CoreParser.java
index f11477c..9db8d04 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CoreParser.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/CoreParser.java
@@ -34,7 +34,7 @@ import org.xml.sax.ErrorHandler;
import org.xml.sax.SAXException;
/** Assembles a QueryBuilder which uses only core Lucene Query objects */
-public class CoreParser implements SpanQueryBuilder {
+public class CoreParser implements QueryBuilder, SpanQueryBuilder {
protected String defaultField;
protected Analyzer analyzer;
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java
index cbb4aff..d362340 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java
@@ -63,6 +63,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
// verify parsing of query using a stopping analyzer
private void assertStopQueryIsMatchNoDocsQuery(String qtxt) throws Exception {
String[] fields = {"b", "t"};
+ Occur occur[] = {Occur.SHOULD, Occur.SHOULD};
TestQPHelper.QPTestAnalyzer a = new TestQPHelper.QPTestAnalyzer();
StandardQueryParser mfqp = new StandardQueryParser();
mfqp.setMultiFields(fields);
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java
index dca7f39..787c336 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java
@@ -70,6 +70,7 @@ import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.RegexpQuery;
+import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.search.WildcardQuery;
@@ -1254,6 +1255,17 @@ public class TestQPHelper extends LuceneTestCase {
}
}
+ private void assertHits(int expected, String query, IndexSearcher is)
+ throws IOException, QueryNodeException {
+ StandardQueryParser qp = new StandardQueryParser();
+ qp.setAnalyzer(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false));
+ qp.setLocale(Locale.ENGLISH);
+
+ Query q = qp.parse(query, "date");
+ ScoreDoc[] hits = is.search(q, 1000).scoreDocs;
+ assertEquals(expected, hits.length);
+ }
+
@Override
public void tearDown() throws Exception {
IndexSearcher.setMaxClauseCount(originalMaxClauses);
diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/ReplicaNode.java b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/ReplicaNode.java
index 2e616a8..29088a8 100644
--- a/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/ReplicaNode.java
+++ b/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/ReplicaNode.java
@@ -271,6 +271,7 @@ public abstract class ReplicaNode extends Node {
}
lastPrimaryGen = job.getCopyState().primaryGen;
+ byte[] infosBytes = job.getCopyState().infosBytes;
SegmentInfos syncInfos =
SegmentInfos.readCommit(
@@ -436,6 +437,7 @@ public abstract class ReplicaNode extends Node {
job.finish();
// Turn byte[] back to SegmentInfos:
+ byte[] infosBytes = copyState.infosBytes;
SegmentInfos infos =
SegmentInfos.readCommit(dir, toIndexInput(copyState.infosBytes), copyState.gen);
assert infos.getVersion() == copyState.version;
diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/TestIndexAndTaxonomyReplicationClient.java b/lucene/replicator/src/test/org/apache/lucene/replicator/TestIndexAndTaxonomyReplicationClient.java
index d9ae94b..4e80e30 100644
--- a/lucene/replicator/src/test/org/apache/lucene/replicator/TestIndexAndTaxonomyReplicationClient.java
+++ b/lucene/replicator/src/test/org/apache/lucene/replicator/TestIndexAndTaxonomyReplicationClient.java
@@ -419,12 +419,13 @@ public class TestIndexAndTaxonomyReplicationClient extends ReplicatorTestCase {
// verify taxonomy index is fully consistent (since we only add one
// category to all documents, there's nothing much more to validate.
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
+ CheckIndex.Status indexStatus = null;
try (CheckIndex checker = new CheckIndex(handlerTaxoDir.getDelegate())) {
checker.setFailFast(true);
checker.setInfoStream(new PrintStream(bos, false, IOUtils.UTF_8), false);
try {
- checker.checkIndex(null);
+ indexStatus = checker.checkIndex(null);
} catch (IOException | RuntimeException ioe) {
// ok: we fallback below
}
diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimplePrimaryNode.java b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimplePrimaryNode.java
index 30aaba4..51f1fc2 100644
--- a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimplePrimaryNode.java
+++ b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimplePrimaryNode.java
@@ -196,6 +196,9 @@ class SimplePrimaryNode extends PrimaryNode {
warmingSegments.add(preCopy);
try {
+
+ Set<String> fileNames = files.keySet();
+
// Ask all currently known replicas to pre-copy this newly merged segment's files:
for (int replicaTCPPort : replicaTCPPorts) {
try {
diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimpleReplicaNode.java b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimpleReplicaNode.java
index d32dcd6..17f6fd1 100644
--- a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimpleReplicaNode.java
+++ b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/SimpleReplicaNode.java
@@ -332,7 +332,7 @@ class SimpleReplicaNode extends ReplicaNode {
Map<String, FileMetaData> files = SimpleServer.readFilesMetaData(in);
message("done reading files to copy files=" + files.keySet());
AtomicBoolean finished = new AtomicBoolean();
- launchPreCopyMerge(finished, newPrimaryGen, files);
+ CopyJob job = launchPreCopyMerge(finished, newPrimaryGen, files);
message("done launching copy job files=" + files.keySet());
// Silly keep alive mechanism, else if e.g. we (replica node) crash, the primary
diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/TestNRTReplication.java b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/TestNRTReplication.java
index d02bd41..1c42d07 100644
--- a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/TestNRTReplication.java
+++ b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/TestNRTReplication.java
@@ -125,6 +125,7 @@ public class TestNRTReplication extends LuceneTestCase {
long initCommitVersion = -1;
long initInfosVersion = -1;
Pattern logTimeStart = Pattern.compile("^[0-9\\.]+s .*");
+ boolean sawExistingSegmentsFile = false;
while (true) {
String l = r.readLine();
@@ -158,6 +159,7 @@ public class TestNRTReplication extends LuceneTestCase {
} else if (l.startsWith("NODE STARTED")) {
break;
} else if (l.contains("replica cannot start: existing segments file=")) {
+ sawExistingSegmentsFile = true;
}
}
diff --git a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/TestStressNRTReplication.java b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/TestStressNRTReplication.java
index 09aaf8f..fd05821 100644
--- a/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/TestStressNRTReplication.java
+++ b/lucene/replicator/src/test/org/apache/lucene/replicator/nrt/TestStressNRTReplication.java
@@ -44,6 +44,9 @@ import java.util.concurrent.atomic.AtomicLong;
import java.util.regex.Pattern;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LineFileDocs;
@@ -441,6 +444,7 @@ public class TestStressNRTReplication extends LuceneTestCase {
return;
}
+ int id = replicaToPromote.id;
message("top: now startPrimary " + replicaToPromote);
startPrimary(replicaToPromote.id);
}
@@ -1001,6 +1005,9 @@ public class TestStressNRTReplication extends LuceneTestCase {
@Override
public void run() {
+ // Maps version to number of hits for silly 'the' TermQuery:
+ Query theQuery = new TermQuery(new Term("body", "the"));
+
// Persists connections
Map<Integer, Connection> connections = new HashMap<>();
@@ -1214,6 +1221,8 @@ public class TestStressNRTReplication extends LuceneTestCase {
message("top: indexer: updatePct=" + updatePct + " sleepChance=" + sleepChance);
+ long lastTransLogLoc = transLog.getNextLocation();
+
NodeProcess curPrimary = null;
Connection c = null;
diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/TermAutomatonQuery.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/TermAutomatonQuery.java
index fa1f22b..be8688e 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/TermAutomatonQuery.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/TermAutomatonQuery.java
@@ -429,7 +429,11 @@ public class TermAutomatonQuery extends Query implements Accountable {
if (any) {
return new TermAutomatonScorer(
- this, enums, anyTermID, new LeafSimScorer(stats, context.reader(), field, true));
+ this,
+ enums,
+ anyTermID,
+ idToTerm,
+ new LeafSimScorer(stats, context.reader(), field, true));
} else {
return null;
}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/TermAutomatonScorer.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/TermAutomatonScorer.java
index 17c8e58..8567ce0 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/TermAutomatonScorer.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/TermAutomatonScorer.java
@@ -17,12 +17,14 @@
package org.apache.lucene.sandbox.search;
import java.io.IOException;
+import java.util.Map;
import org.apache.lucene.sandbox.search.TermAutomatonQuery.EnumAndScorer;
import org.apache.lucene.sandbox.search.TermAutomatonQuery.TermAutomatonWeight;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.LeafSimScorer;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.PriorityQueue;
import org.apache.lucene.util.RamUsageEstimator;
import org.apache.lucene.util.automaton.Automaton;
@@ -30,10 +32,12 @@ import org.apache.lucene.util.automaton.RunAutomaton;
// TODO: add two-phase and needsScores support. maybe use conjunctionDISI internally?
class TermAutomatonScorer extends Scorer {
+ private final EnumAndScorer[] subs;
private final EnumAndScorer[] subsOnDoc;
private final PriorityQueue<EnumAndScorer> docIDQueue;
private final PriorityQueue<EnumAndScorer> posQueue;
private final RunAutomaton runAutomaton;
+ private final Map<Integer, BytesRef> idToTerm;
// We reuse this array to check for matches starting from an initial
// position; we increase posShift every time we move to a new possible
@@ -54,12 +58,18 @@ class TermAutomatonScorer extends Scorer {
private int freq;
public TermAutomatonScorer(
- TermAutomatonWeight weight, EnumAndScorer[] subs, int anyTermID, LeafSimScorer docScorer)
+ TermAutomatonWeight weight,
+ EnumAndScorer[] subs,
+ int anyTermID,
+ Map<Integer, BytesRef> idToTerm,
+ LeafSimScorer docScorer)
throws IOException {
super(weight);
// System.out.println(" automaton:\n" + weight.automaton.toDot());
this.runAutomaton = new TermRunAutomaton(weight.automaton, subs.length);
this.docScorer = docScorer;
+ this.idToTerm = idToTerm;
+ this.subs = subs;
this.docIDQueue = new DocIDQueue(subs.length);
this.posQueue = new PositionQueue(subs.length);
this.anyTermID = anyTermID;
diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/TokenStreamToTermAutomatonQuery.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/TokenStreamToTermAutomatonQuery.java
index 365b85b..1be61b9 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/TokenStreamToTermAutomatonQuery.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/search/TokenStreamToTermAutomatonQuery.java
@@ -65,6 +65,7 @@ public class TokenStreamToTermAutomatonQuery {
TermAutomatonQuery query = new TermAutomatonQuery(field);
int pos = -1;
+ int lastPos = 0;
int maxOffset = 0;
int maxPos = -1;
int state = -1;
diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/codecs/idversion/TestIDVersionPostingsFormat.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/codecs/idversion/TestIDVersionPostingsFormat.java
index c6c805a..683f329 100644
--- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/codecs/idversion/TestIDVersionPostingsFormat.java
+++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/codecs/idversion/TestIDVersionPostingsFormat.java
@@ -141,6 +141,8 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase {
}
ids =
new IDSource() {
+ final int radix =
+ TestUtil.nextInt(random(), Character.MIN_RADIX, Character.MAX_RADIX);
final String zeroPad =
String.format(Locale.ROOT, "%0" + TestUtil.nextInt(random(), 5, 20) + "d", 0);
int upto;
@@ -161,6 +163,7 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase {
new IDSource() {
final int radix =
TestUtil.nextInt(random(), Character.MIN_RADIX, Character.MAX_RADIX);
+ int upto;
@Override
public String next() {
@@ -177,6 +180,8 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase {
new IDSource() {
final int radix =
TestUtil.nextInt(random(), Character.MIN_RADIX, Character.MAX_RADIX);
+ final String zeroPad = String.format(Locale.ROOT, "%015d", 0);
+ int upto;
@Override
public String next() {
@@ -566,6 +571,7 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase {
payload.length = 8;
IDVersionPostingsFormat.longToBytes(17, payload);
ts.setValue("foo", payload);
+ Field field = new Field("id", ts, ft);
doc.add(new Field("id", ts, ft));
expectThrows(
IllegalArgumentException.class,
diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestTermAutomatonQuery.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestTermAutomatonQuery.java
index 7ca592e..3c2c83b 100644
--- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestTermAutomatonQuery.java
+++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/search/TestTermAutomatonQuery.java
@@ -775,7 +775,7 @@ public class TestTermAutomatonQuery extends LuceneTestCase {
public void testRewriteNoMatch() throws Exception {
TermAutomatonQuery q = new TermAutomatonQuery("field");
- q.createState(); // initState
+ int initState = q.createState();
q.finish();
Directory dir = newDirectory();
diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/DateRangePrefixTree.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/DateRangePrefixTree.java
index c1facb8..72ee20b 100644
--- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/DateRangePrefixTree.java
+++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/tree/DateRangePrefixTree.java
@@ -446,6 +446,7 @@ public class DateRangePrefixTree extends NumberRangePrefixTree {
private void appendPadded(StringBuilder builder, int integer, short positions) {
assert integer >= 0 && positions >= 1 && positions <= 4;
+ int preBuilderLen = builder.length();
int intStrLen;
if (integer > 999) {
intStrLen = 4;
diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/SpatialTestCase.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/SpatialTestCase.java
index 45586ec..3a3e69a 100644
--- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/SpatialTestCase.java
+++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/SpatialTestCase.java
@@ -161,6 +161,11 @@ public abstract class SpatialTestCase extends LuceneTestCase {
}
}
+ private double randomGaussianMinMeanMax(double min, double mean, double max) {
+ assert mean > min;
+ return randomGaussianMeanMax(mean - min, max - min) + min;
+ }
+
/**
* Within one standard deviation (68% of the time) the result is "close" to mean. By "close": when
* greater than mean, it's the lesser of 2*mean or half way to max, when lesser than mean, it's
diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/bbox/TestBBoxStrategy.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/bbox/TestBBoxStrategy.java
index b9d3384..0df4883 100644
--- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/bbox/TestBBoxStrategy.java
+++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/bbox/TestBBoxStrategy.java
@@ -170,6 +170,7 @@ public class TestBBoxStrategy extends RandomSpatialOpStrategyTestCase {
final Rectangle indexedShape = ctx.getShapeFactory().rect(180, 180, -10, 10);
final Rectangle queryShape = ctx.getShapeFactory().rect(-180, -180, -20, 20);
final SpatialOperation operation = SpatialOperation.IsWithin;
+ final boolean match = true; // yes it is within
// the rest is super.testOperation without leading assert:
@@ -178,7 +179,7 @@ public class TestBBoxStrategy extends RandomSpatialOpStrategyTestCase {
Query query = strategy.makeQuery(new SpatialArgs(operation, queryShape));
SearchResults got = executeQuery(query, 1);
assert got.numFound <= 1 : "unclean test env";
- if (got.numFound != 1) fail(operation + " I:" + indexedShape + " Q:" + queryShape);
+ if ((got.numFound == 1) != match) fail(operation + " I:" + indexedShape + " Q:" + queryShape);
deleteAll(); // clean up after ourselves
}
diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestRandomSpatialOpFuzzyPrefixTree.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestRandomSpatialOpFuzzyPrefixTree.java
index 6fae979..1556370 100644
--- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestRandomSpatialOpFuzzyPrefixTree.java
+++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestRandomSpatialOpFuzzyPrefixTree.java
@@ -292,6 +292,7 @@ public class TestRandomSpatialOpFuzzyPrefixTree extends StrategyTestCase {
Map<String, Shape> indexedShapes = new LinkedHashMap<>();
Map<String, Shape> indexedShapesGS = new LinkedHashMap<>(); // grid snapped
final int numIndexedShapes = randomIntBetween(1, 6);
+ boolean indexedAtLeastOneShapePair = false;
final boolean pointsOnly = ((PrefixTreeStrategy) strategy).isPointsOnly();
for (int i = 0; i < numIndexedShapes; i++) {
String id = "" + i;
@@ -304,6 +305,7 @@ public class TestRandomSpatialOpFuzzyPrefixTree extends StrategyTestCase {
} else if (R <= 4) { // 3 in 12
// comprised of more than one shape
indexedShape = randomShapePairRect(biasContains);
+ indexedAtLeastOneShapePair = true;
} else {
indexedShape = randomRectangle(); // just one rect
}
diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestTermQueryPrefixGridStrategy.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestTermQueryPrefixGridStrategy.java
index 5f0d8b3..306c9f8 100644
--- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestTermQueryPrefixGridStrategy.java
+++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/TestTermQueryPrefixGridStrategy.java
@@ -24,6 +24,7 @@ import org.apache.lucene.document.StoredField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.spatial.SpatialTestCase;
import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree;
+import org.apache.lucene.spatial.query.SpatialArgsParser;
import org.junit.Test;
import org.locationtech.spatial4j.context.SpatialContext;
import org.locationtech.spatial4j.shape.Shape;
@@ -50,7 +51,7 @@ public class TestTermQueryPrefixGridStrategy extends SpatialTestCase {
addDocumentsAndCommit(Arrays.asList(losAngeles));
// This won't work with simple spatial context...
- // SpatialArgsParser spatialArgsParser = new SpatialArgsParser();
+ SpatialArgsParser spatialArgsParser = new SpatialArgsParser();
// TODO... use a non polygon query
// SpatialArgs spatialArgs = spatialArgsParser.parse(
// "Intersects(POLYGON((-127.00390625 39.8125,-112.765625 39.98828125,-111.53515625
diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/tree/TestDateRangePrefixTree.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/tree/TestDateRangePrefixTree.java
index edc862c..44bc6b0 100644
--- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/tree/TestDateRangePrefixTree.java
+++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/prefix/tree/TestDateRangePrefixTree.java
@@ -184,6 +184,7 @@ public class TestDateRangePrefixTree extends LuceneTestCase {
private void roundTrip(Calendar calOrig) throws ParseException {
Calendar cal = (Calendar) calOrig.clone();
+ String lastString = null;
while (true) {
String calString;
{
@@ -230,6 +231,7 @@ public class TestDateRangePrefixTree extends LuceneTestCase {
if (e.getMessage().equals("Calendar underflow")) return;
throw e;
}
+ lastString = calString;
}
}
diff --git a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/TestGeo3dShapeWGS84ModelRectRelation.java b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/TestGeo3dShapeWGS84ModelRectRelation.java
index 45e7539..6376fc0 100644
--- a/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/TestGeo3dShapeWGS84ModelRectRelation.java
+++ b/lucene/spatial-extras/src/test/org/apache/lucene/spatial/spatial4j/TestGeo3dShapeWGS84ModelRectRelation.java
@@ -112,6 +112,8 @@ public class TestGeo3dShapeWGS84ModelRectRelation extends ShapeRectRelationTestC
16 * RADIANS_PER_DEGREE,
4 * RADIANS_PER_DEGREE,
36 * RADIANS_PER_DEGREE);
+ final GeoPoint pt =
+ new GeoPoint(planetModel, 16 * RADIANS_PER_DEGREE, 23.81626064835212 * RADIANS_PER_DEGREE);
final GeoPoint[] pathPoints =
new GeoPoint[] {
new GeoPoint(
diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoComplexPolygon.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoComplexPolygon.java
index e72b70b..cf46234 100644
--- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoComplexPolygon.java
+++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoComplexPolygon.java
@@ -113,10 +113,12 @@ class GeoComplexPolygon extends GeoBasePolygon {
}
if (lastEdge != null) {
lastEdge.next = edge;
+ edge.previous = lastEdge;
}
lastEdge = edge;
lastGeoPoint = thisGeoPoint;
}
+ firstEdge.previous = lastEdge;
lastEdge.next = firstEdge;
shapeStartEdges[edgePointIndex] = firstEdge;
edgePointIndex++;
@@ -918,6 +920,7 @@ class GeoComplexPolygon extends GeoBasePolygon {
public final SidedPlane backingPlane;
public final Plane plane;
public final XYZBounds planeBounds;
+ public Edge previous = null;
public Edge next = null;
public Edge(final PlanetModel pm, final GeoPoint startPoint, final GeoPoint endPoint) {
@@ -1187,6 +1190,8 @@ class GeoComplexPolygon extends GeoBasePolygon {
private abstract static class Tree {
private final Node rootNode;
+ protected static final Edge[] EMPTY_ARRAY = new Edge[0];
+
/**
* Constructor.
*
@@ -1278,6 +1283,7 @@ class GeoComplexPolygon extends GeoBasePolygon {
/** This is the z-tree. */
private static class ZTree extends Tree {
+ public Node rootNode = null;
public ZTree(final List<Edge> allEdges) {
super(allEdges);
@@ -1438,6 +1444,7 @@ class GeoComplexPolygon extends GeoBasePolygon {
/** Count the number of verifiable edge crossings for a full 1/2 a world. */
private class FullLinearCrossingEdgeIterator implements CountingEdgeIterator {
+ private final GeoPoint testPoint;
private final Plane plane;
private final Plane abovePlane;
private final Plane belowPlane;
@@ -1461,6 +1468,7 @@ class GeoComplexPolygon extends GeoBasePolygon {
assert plane.evaluateIsZero(thePointX, thePointY, thePointZ)
: "Check point is not on travel plane";
assert plane.evaluateIsZero(testPoint) : "Test point is not on travel plane";
+ this.testPoint = testPoint;
this.plane = plane;
this.abovePlane = abovePlane;
this.belowPlane = belowPlane;
@@ -1565,6 +1573,7 @@ class GeoComplexPolygon extends GeoBasePolygon {
/** Count the number of verifiable edge crossings for less than 1/2 a world. */
private class SectorLinearCrossingEdgeIterator implements CountingEdgeIterator {
+ private final GeoPoint testPoint;
private final Plane plane;
private final Plane abovePlane;
private final Plane belowPlane;
@@ -1589,6 +1598,7 @@ class GeoComplexPolygon extends GeoBasePolygon {
assert plane.evaluateIsZero(thePointX, thePointY, thePointZ)
: "Check point is not on travel plane";
assert plane.evaluateIsZero(testPoint) : "Test point is not on travel plane";
+ this.testPoint = testPoint;
this.plane = plane;
this.abovePlane = abovePlane;
this.belowPlane = belowPlane;
diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoCompositeMembershipShape.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoCompositeMembershipShape.java
index d02a149..0ab2efb 100755
--- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoCompositeMembershipShape.java
+++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoCompositeMembershipShape.java
@@ -24,8 +24,8 @@ import java.io.InputStream;
*
* @lucene.experimental
*/
-public class GeoCompositeMembershipShape
- extends GeoBaseCompositeMembershipShape<GeoMembershipShape> {
+public class GeoCompositeMembershipShape extends GeoBaseCompositeMembershipShape<GeoMembershipShape>
+ implements GeoMembershipShape {
/** Constructor. */
public GeoCompositeMembershipShape(PlanetModel planetModel) {
diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDegeneratePath.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDegeneratePath.java
index 51d8152..a6361cc 100644
--- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDegeneratePath.java
+++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoDegeneratePath.java
@@ -404,6 +404,10 @@ class GeoDegeneratePath extends GeoBasePath {
public final GeoPoint point;
/** Pertinent cutoff planes from adjoining segments */
public final Membership[] cutoffPlanes;
+ /** Notable points for this segment endpoint */
+ public final GeoPoint[] notablePoints;
+ /** No notable points from the circle itself */
+ public static final GeoPoint[] circlePoints = new GeoPoint[0];
/** Null membership */
public static final Membership[] NO_MEMBERSHIP = new Membership[0];
@@ -415,6 +419,7 @@ class GeoDegeneratePath extends GeoBasePath {
public SegmentEndpoint(final GeoPoint point) {
this.point = point;
this.cutoffPlanes = NO_MEMBERSHIP;
+ this.notablePoints = circlePoints;
}
/**
@@ -428,6 +433,7 @@ class GeoDegeneratePath extends GeoBasePath {
public SegmentEndpoint(final GeoPoint point, final SidedPlane cutoffPlane) {
this.point = point;
this.cutoffPlanes = new Membership[] {new SidedPlane(cutoffPlane)};
+ this.notablePoints = new GeoPoint[] {point};
}
/**
@@ -442,6 +448,17 @@ class GeoDegeneratePath extends GeoBasePath {
this.point = point;
this.cutoffPlanes =
new Membership[] {new SidedPlane(cutoffPlane1), new SidedPlane(cutoffPlane2)};
+ this.notablePoints = new GeoPoint[] {point};
+ }
+
+ /**
+ * Check if point is within this endpoint.
+ *
+ * @param point is the point.
+ * @return true of within.
+ */
+ public boolean isWithin(final Vector point) {
+ return this.point.isIdentical(point.x, point.y, point.z);
}
/**
@@ -474,6 +491,26 @@ class GeoDegeneratePath extends GeoBasePath {
}
/**
+ * Compute nearest path distance.
+ *
+ * @param distanceStyle is the distance style.
+ * @param x is the point x.
+ * @param y is the point y.
+ * @param z is the point z.
+ * @return the distance metric (always value zero), in aggregation form, or POSITIVE_INFINITY if
+ * the point is not within the bounds of the endpoint.
+ */
+ public double nearestPathDistance(
+ final DistanceStyle distanceStyle, final double x, final double y, final double z) {
+ for (final Membership m : cutoffPlanes) {
+ if (!m.isWithin(x, y, z)) {
+ return Double.POSITIVE_INFINITY;
+ }
+ }
+ return distanceStyle.toAggregationForm(0.0);
+ }
+
+ /**
* Compute path center distance.
*
* @param distanceStyle is the distance style.
@@ -636,6 +673,18 @@ class GeoDegeneratePath extends GeoBasePath {
/**
* Check if point is within this segment.
*
+ * @param point is the point.
+ * @return true of within.
+ */
+ public boolean isWithin(final Vector point) {
+ return startCutoffPlane.isWithin(point)
+ && endCutoffPlane.isWithin(point)
+ && normalizedConnectingPlane.evaluateIsZero(point);
+ }
+
+ /**
+ * Check if point is within this segment.
+ *
* @param x is the point x.
* @param y is the point y.
* @param z is the point z.
diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoMembershipShape.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoMembershipShape.java
index dfc0459..9ee092d 100755
--- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoMembershipShape.java
+++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoMembershipShape.java
@@ -21,4 +21,4 @@ package org.apache.lucene.spatial3d.geom;
*
* @lucene.experimental
*/
-public interface GeoMembershipShape extends GeoShape, GeoOutsideDistance {}
+public interface GeoMembershipShape extends GeoShape, GeoOutsideDistance, Membership {}
diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPolygonFactory.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPolygonFactory.java
index 29f097c..1b834a8 100755
--- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPolygonFactory.java
+++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoPolygonFactory.java
@@ -2022,6 +2022,8 @@ public class GeoPolygonFactory {
*/
private static class SafePath {
public final GeoPoint lastPoint;
+ public final int lastPointIndex;
+ public final Plane lastPlane;
public final SafePath previous;
/** Create a new safe end point. */
@@ -2031,6 +2033,8 @@ public class GeoPolygonFactory {
final int lastPointIndex,
final Plane lastPlane) {
this.lastPoint = lastPoint;
+ this.lastPointIndex = lastPointIndex;
+ this.lastPlane = lastPlane;
this.previous = previous;
}
diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoStandardPath.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoStandardPath.java
index 227f2e7..b49588d 100755
--- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoStandardPath.java
+++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/GeoStandardPath.java
@@ -745,6 +745,14 @@ class GeoStandardPath extends GeoBasePath {
}
}
+ /** Simplest possible implementation of segment endpoint: a single point. */
+ private static class DegenerateSegmentEndpoint extends BaseSegmentEndpoint {
+
+ public DegenerateSegmentEndpoint(final GeoPoint point) {
+ super(point);
+ }
+ }
+
/** Endpoint that's a simple circle. */
private static class CircleSegmentEndpoint extends BaseSegmentEndpoint {
/** A plane describing the circle */
@@ -1096,6 +1104,10 @@ class GeoStandardPath extends GeoBasePath {
public final GeoPoint[] upperConnectingPlanePoints;
/** Notable points for the lower connecting plane */
public final GeoPoint[] lowerConnectingPlanePoints;
+ /** Notable points for the start cutoff plane */
+ public final GeoPoint[] startCutoffPlanePoints;
+ /** Notable points for the end cutoff plane */
+ public final GeoPoint[] endCutoffPlanePoints;
/**
* Construct a path segment.
@@ -1169,6 +1181,8 @@ class GeoStandardPath extends GeoBasePath {
this.LRHC = points[0];
upperConnectingPlanePoints = new GeoPoint[] {ULHC, URHC};
lowerConnectingPlanePoints = new GeoPoint[] {LLHC, LRHC};
+ startCutoffPlanePoints = new GeoPoint[] {ULHC, LLHC};
+ endCutoffPlanePoints = new GeoPoint[] {URHC, LRHC};
}
/**
@@ -1193,6 +1207,19 @@ class GeoStandardPath extends GeoBasePath {
/**
* Check if point is within this segment.
*
+ * @param point is the point.
+ * @return true of within.
+ */
+ public boolean isWithin(final Vector point) {
+ return startCutoffPlane.isWithin(point)
+ && endCutoffPlane.isWithin(point)
+ && upperConnectingPlane.isWithin(point)
+ && lowerConnectingPlane.isWithin(point);
+ }
+
+ /**
+ * Check if point is within this segment.
+ *
* @param x is the point x.
* @param y is the point y.
* @param z is the point z.
diff --git a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/XYZBounds.java b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/XYZBounds.java
index 6f458b5..c675de1 100644
--- a/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/XYZBounds.java
+++ b/lucene/spatial3d/src/java/org/apache/lucene/spatial3d/geom/XYZBounds.java
@@ -45,6 +45,13 @@ public class XYZBounds implements Bounds {
/** Maximum z */
private Double maxZ = null;
+ /** Set to true if no longitude bounds can be stated */
+ private boolean noLongitudeBound = false;
+ /** Set to true if no top latitude bound can be stated */
+ private boolean noTopLatitudeBound = false;
+ /** Set to true if no bottom latitude bound can be stated */
+ private boolean noBottomLatitudeBound = false;
+
/** Construct an empty bounds object */
public XYZBounds() {}
diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/TestGeo3DPoint.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/TestGeo3DPoint.java
index 6589e66..d4616e1 100644
--- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/TestGeo3DPoint.java
+++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/TestGeo3DPoint.java
@@ -1180,12 +1180,12 @@ public class TestGeo3DPoint extends LuceneTestCase {
int iters = atLeast(100);
for (int i = 0; i < iters; i++) {
// Create a polygon that's less than 180 degrees
- makePoly(pm, randomPole, true, true);
+ final Polygon clockWise = makePoly(pm, randomPole, true, true);
}
iters = atLeast(100);
for (int i = 0; i < iters; i++) {
// Create a polygon that's greater than 180 degrees
- makePoly(pm, randomPole, false, true);
+ final Polygon counterClockWise = makePoly(pm, randomPole, false, true);
}
}
@@ -1260,12 +1260,12 @@ public class TestGeo3DPoint extends LuceneTestCase {
// the polygon, so we're going to use Geo3D to help us select those given the points we just
// made.
+ final int holeCount = createHoles ? TestUtil.nextInt(random(), 0, 2) : 0;
+
final List<Polygon> holeList = new ArrayList<>();
/* Hole logic is broken and needs rethinking
- final int holeCount = createHoles ? TestUtil.nextInt(random(), 0, 2) : 0;
-
// Create the geo3d polygon, so we can test out our poles.
final GeoPolygon poly;
try {
diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoBBox.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoBBox.java
index 5cb74fc..749c092 100755
--- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoBBox.java
+++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoBBox.java
@@ -31,6 +31,7 @@ public class TestGeoBBox {
@Test
public void testBBoxDegenerate() {
GeoBBox box;
+ GeoConvexPolygon cp;
int relationship;
List<GeoPoint> points = new ArrayList<GeoPoint>();
points.add(
diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoExactCircle.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoExactCircle.java
index 1ab6261..d3e354d 100644
--- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoExactCircle.java
+++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoExactCircle.java
@@ -146,16 +146,18 @@ public class TestGeoExactCircle extends RandomGeo3dShapeGenerator {
public void exactCircleLargeTest() {
boolean success = true;
try {
- GeoCircleFactory.makeExactGeoCircle(
- new PlanetModel(0.99, 1.05), 0.25 * Math.PI, 0, 0.35 * Math.PI, 1e-12);
+ GeoCircle circle =
+ GeoCircleFactory.makeExactGeoCircle(
+ new PlanetModel(0.99, 1.05), 0.25 * Math.PI, 0, 0.35 * Math.PI, 1e-12);
} catch (IllegalArgumentException e) {
success = false;
}
assertTrue(success);
success = false;
try {
- GeoCircleFactory.makeExactGeoCircle(
- PlanetModel.WGS84, 0.25 * Math.PI, 0, 0.9996 * Math.PI, 1e-12);
+ GeoCircle circle =
+ GeoCircleFactory.makeExactGeoCircle(
+ PlanetModel.WGS84, 0.25 * Math.PI, 0, 0.9996 * Math.PI, 1e-12);
} catch (IllegalArgumentException e) {
success = true;
}
@@ -166,8 +168,13 @@ public class TestGeoExactCircle extends RandomGeo3dShapeGenerator {
public void testExactCircleDoesNotFit() {
boolean exception = false;
try {
- GeoCircleFactory.makeExactGeoCircle(
- PlanetModel.WGS84, 1.5633796542562415, -1.0387149580695152, 3.1409865861032844, 1e-12);
+ GeoCircle circle =
+ GeoCircleFactory.makeExactGeoCircle(
+ PlanetModel.WGS84,
+ 1.5633796542562415,
+ -1.0387149580695152,
+ 3.1409865861032844,
+ 1e-12);
} catch (IllegalArgumentException e) {
exception = true;
}
@@ -308,8 +315,9 @@ public class TestGeoExactCircle extends RandomGeo3dShapeGenerator {
PlanetModel planetModel = new PlanetModel(1.6304230055804751, 1.0199671157571204);
boolean fail = false;
try {
- GeoCircleFactory.makeExactGeoCircle(
- planetModel, 0.8853814403571284, 0.9784990176851283, 0.9071033527030907, 1e-11);
+ GeoCircle circle =
+ GeoCircleFactory.makeExactGeoCircle(
+ planetModel, 0.8853814403571284, 0.9784990176851283, 0.9071033527030907, 1e-11);
} catch (IllegalArgumentException e) {
fail = true;
}
diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoPath.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoPath.java
index 59c1242..262a622 100755
--- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoPath.java
+++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoPath.java
@@ -130,6 +130,9 @@ public class TestGeoPath extends LuceneTestCase {
GeoStandardPath p;
GeoStandardPath c;
GeoPoint point;
+ GeoPoint pointApprox;
+ int relationship;
+ GeoArea area;
PlanetModel planetModel;
planetModel = new PlanetModel(1.151145876105594, 0.8488541238944061);
@@ -138,14 +141,16 @@ public class TestGeoPath extends LuceneTestCase {
c.addPoint(0.27828548161836364, 0.6785795524104564);
c.done();
point = new GeoPoint(planetModel, -0.49298555067758226, 0.9892440995026406);
- GeoAreaFactory.makeGeoArea(
- planetModel,
- 0.49937141144985997,
- 0.5161765426256085,
- 0.3337218719537796,
- 0.8544419570901649,
- -0.6347692823688085,
- 0.3069696588119369);
+ pointApprox = new GeoPoint(0.5110940362119821, 0.7774603209946239, -0.49984312299556544);
+ area =
+ GeoAreaFactory.makeGeoArea(
+ planetModel,
+ 0.49937141144985997,
+ 0.5161765426256085,
+ 0.3337218719537796,
+ 0.8544419570901649,
+ -0.6347692823688085,
+ 0.3069696588119369);
assertTrue(!c.isWithin(point));
// Start by testing the basic kinds of relationship, increasing in order of difficulty.
diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoPolygon.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoPolygon.java
index 4fdfe87..9516491 100755
--- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoPolygon.java
+++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestGeoPolygon.java
@@ -465,6 +465,7 @@ public class TestGeoPolygon extends LuceneTestCase {
@Test
public void testPolygonBoundsCase1() {
GeoPolygon c;
+ LatLonBounds b;
List<GeoPoint> points;
XYZBounds xyzb;
GeoPoint point1;
@@ -607,6 +608,10 @@ public class TestGeoPolygon extends LuceneTestCase {
c.addShape(new GeoConcavePolygon(pm, points2, p2bits, false));
// System.out.println(zScaling);
+ GeoPoint point = new GeoPoint(pm, -0.9825762558001477, 2.4832136904725273);
+ GeoPoint quantizedPoint =
+ new GeoPoint(-0.4505446160475436, 0.34850109186970535, -0.8539966368663765);
+
GeoArea xyzSolid =
GeoAreaFactory.makeGeoArea(
pm,
@@ -661,7 +666,7 @@ public class TestGeoPolygon extends LuceneTestCase {
boolean illegalArgumentException = false;
try {
- GeoPolygonFactory.makeGeoPolygon(PlanetModel.WGS84, points, null);
+ final GeoPolygon p = GeoPolygonFactory.makeGeoPolygon(PlanetModel.WGS84, points, null);
} catch (IllegalArgumentException e) {
illegalArgumentException = true;
}
@@ -694,7 +699,7 @@ public class TestGeoPolygon extends LuceneTestCase {
boolean illegalArgumentException = false;
try {
- GeoPolygonFactory.makeGeoPolygon(PlanetModel.WGS84, points, null);
+ final GeoPolygon p = GeoPolygonFactory.makeGeoPolygon(PlanetModel.WGS84, points, null);
} catch (IllegalArgumentException e) {
illegalArgumentException = true;
}
@@ -724,17 +729,18 @@ public class TestGeoPolygon extends LuceneTestCase {
final GeoCompositePolygon rval = new GeoCompositePolygon(PlanetModel.WGS84);
final GeoPolygonFactory.MutableBoolean mutableBoolean = new GeoPolygonFactory.MutableBoolean();
- GeoPolygonFactory.buildPolygonShape(
- rval,
- mutableBoolean,
- PlanetModel.WGS84,
- points,
- internal,
- 0,
- 1,
- new SidedPlane(p1, p3, p2),
- new ArrayList<GeoPolygon>(),
- null);
+ boolean result =
+ GeoPolygonFactory.buildPolygonShape(
+ rval,
+ mutableBoolean,
+ PlanetModel.WGS84,
+ points,
+ internal,
+ 0,
+ 1,
+ new SidedPlane(p1, p3, p2),
+ new ArrayList<GeoPolygon>(),
+ null);
assertFalse(mutableBoolean.value);
}
@@ -764,7 +770,7 @@ public class TestGeoPolygon extends LuceneTestCase {
shapeList.add(desc);
- GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.WGS84, shapeList);
+ GeoPolygon p = GeoPolygonFactory.makeLargeGeoPolygon(PlanetModel.WGS84, shapeList);
}
@Test
@@ -848,6 +854,8 @@ public class TestGeoPolygon extends LuceneTestCase {
*/
final GeoPoint point = new GeoPoint(PlanetModel.WGS84, -0.41518838180529244, 3.141592653589793);
+ final GeoPoint encodedPoint =
+ new GeoPoint(-0.9155623168963972, 2.3309121299774915E-10, -0.40359240449795253);
assertTrue(p.isWithin(point) ? solid.isWithin(point) : true);
}
@@ -1032,7 +1040,7 @@ public class TestGeoPolygon extends LuceneTestCase {
boolean result;
try {
- new GeoConvexPolygon(PlanetModel.WGS84, poly2List);
+ final GeoConvexPolygon poly2 = new GeoConvexPolygon(PlanetModel.WGS84, poly2List);
result = true;
} catch (IllegalArgumentException e) {
result = false;
@@ -1389,9 +1397,9 @@ public class TestGeoPolygon extends LuceneTestCase {
points.add(
new GeoPoint(
PlanetModel.SPHERE, Geo3DUtil.fromDegrees(64.53775), Geo3DUtil.fromDegrees(-52.19148)));
- GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points);
+ GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points);
Collections.reverse(points);
- GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points);
+ polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points);
}
@Test
@@ -1487,9 +1495,9 @@ public class TestGeoPolygon extends LuceneTestCase {
points.add(
new GeoPoint(
PlanetModel.SPHERE, Geo3DUtil.fromDegrees(50.455467), Geo3DUtil.fromDegrees(-3.48905)));
- GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points);
+ GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points);
Collections.reverse(points);
- GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points);
+ polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points);
}
/*
@@ -1574,6 +1582,7 @@ public class TestGeoPolygon extends LuceneTestCase {
// Is the north pole in set, or out of set?
final GeoPoint northPole = new GeoPoint(PlanetModel.WGS84, Math.PI * 0.5, 0.0);
final GeoPoint negativeX = new GeoPoint(PlanetModel.WGS84, 0.0, Math.PI);
+ final GeoPoint negativeY = new GeoPoint(PlanetModel.WGS84, 0.0, -Math.PI * 0.5);
final GeoPoint positiveY = new GeoPoint(PlanetModel.WGS84, 0.0, Math.PI * 0.5);
final GeoPoint testPoint =
new GeoPoint(-0.074161727332972, 0.5686488061123504, 0.8178445379383386);
@@ -1720,6 +1729,11 @@ public class TestGeoPolygon extends LuceneTestCase {
// These are too close to parallel. The only solution is to prevent the poly from being
// created. Let's see if Geo3d thinks they are parallel.
+ final Plane p1 = new Plane(-1.224646799147353E-16, -1.0, -7.498798913309287E-33, 0.0);
+ final Plane p2 =
+ new Plane(-3.0261581679831E-12, -0.9999999999999999, -1.8529874570670608E-28, 0.0);
+ final Plane p3 = new Plane(4.234084035470679E-12, 1.0, -1.5172037954732973E-12, 0.0);
+
assertFalse(shape.isWithin(unquantized));
// This point is indeed outside the shape but it doesn't matter
@@ -2690,6 +2704,7 @@ public class TestGeoPolygon extends LuceneTestCase {
final GeoPolygonFactory.PolygonDescription description =
new GeoPolygonFactory.PolygonDescription(points);
+ final GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.WGS84, description);
final GeoPolygon largePolygon =
GeoPolygonFactory.makeLargeGeoPolygon(
PlanetModel.WGS84, Collections.singletonList(description));
diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestRandomGeoPolygon.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestRandomGeoPolygon.java
index ac5fd70..74fcfee 100644
--- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestRandomGeoPolygon.java
+++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/TestRandomGeoPolygon.java
@@ -61,7 +61,7 @@ public class TestRandomGeoPolygon extends RandomGeo3dShapeGenerator {
points.add(point3);
points.add(point4);
try {
- GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points);
+ GeoPolygon polygon = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points);
} catch (Exception e) {
fail(points.toString());
}
@@ -288,4 +288,18 @@ public class TestRandomGeoPolygon extends RandomGeo3dShapeGenerator {
}
return false;
}
+
+ private GeoPoint getCenterOfMass(final PlanetModel planetModel, final List<GeoPoint> points) {
+ double x = 0;
+ double y = 0;
+ double z = 0;
+ // get center of mass
+ for (final GeoPoint point : points) {
+ x += point.x;
+ y += point.y;
+ z += point.z;
+ }
+ // Normalization is not needed because createSurfacePoint does the scaling anyway.
+ return planetModel.createSurfacePoint(x, y, z);
+ }
}
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java
index 8186fe3..4eaf4f5 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java
@@ -97,7 +97,8 @@ import org.apache.lucene.util.fst.Util.TopResults;
*
* @lucene.experimental
*/
-public class AnalyzingSuggester extends Lookup {
+// redundant 'implements Accountable' to workaround javadocs bugs
+public class AnalyzingSuggester extends Lookup implements Accountable {
/**
* FST<Weight,Surface>: input is the analyzed form, with a null byte between terms weights
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FreeTextSuggester.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FreeTextSuggester.java
index 4e5ff17..11c57a7 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FreeTextSuggester.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FreeTextSuggester.java
@@ -100,7 +100,8 @@ import org.apache.lucene.util.fst.Util.TopResults;
*
* @lucene.experimental
*/
-public class FreeTextSuggester extends Lookup {
+// redundant 'implements Accountable' to workaround javadocs bugs
+public class FreeTextSuggester extends Lookup implements Accountable {
/** Codec name used in the header for the saved model. */
public static final String CODEC_NAME = "freetextsuggest";
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/NRTSuggester.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/NRTSuggester.java
index f06f5f5..47a8035 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/NRTSuggester.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/NRTSuggester.java
@@ -364,7 +364,7 @@ public final class NRTSuggester implements Accountable {
* Label used to denote the end of an input in the FST and
* the beginning of dedup bytes
*/
- input.readVInt(); // endByte
+ int endByte = input.readVInt();
int payloadSep = input.readVInt();
return new NRTSuggester(fst, maxAnalyzedPathsPerOutput, payloadSep);
}
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletionLookup.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletionLookup.java
index 59335f1..fcf72b7 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletionLookup.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletionLookup.java
@@ -68,7 +68,7 @@ import org.apache.lucene.util.fst.NoOutputs;
* @see FSTCompletion
* @lucene.experimental
*/
-public class FSTCompletionLookup extends Lookup {
+public class FSTCompletionLookup extends Lookup implements Accountable {
/**
* An invalid bucket count if we're creating an object of this class from an existing FST.
*
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/WFSTCompletionLookup.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/WFSTCompletionLookup.java
index a9c3924..1b3661b 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/WFSTCompletionLookup.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/WFSTCompletionLookup.java
@@ -57,7 +57,8 @@ import org.apache.lucene.util.fst.Util.TopResults;
*
* @lucene.experimental
*/
-public class WFSTCompletionLookup extends Lookup {
+// redundant 'implements Accountable' to workaround javadocs bugs
+public class WFSTCompletionLookup extends Lookup implements Accountable {
/** FST<Long>, weights are encoded as costs: (Integer.MAX_VALUE-weight) */
// NOTE: like FSTSuggester, this is really a WFSA, if you want to
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellLookup.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellLookup.java
index f94c68d..1cf3666 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellLookup.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellLookup.java
@@ -25,6 +25,7 @@ import org.apache.lucene.search.suggest.Lookup;
import org.apache.lucene.search.suggest.jaspell.JaspellTernarySearchTrie.TSTNode;
import org.apache.lucene.store.DataInput;
import org.apache.lucene.store.DataOutput;
+import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CharsRef;
import org.apache.lucene.util.CharsRefBuilder;
@@ -36,7 +37,7 @@ import org.apache.lucene.util.CharsRefBuilder;
* @deprecated Migrate to one of the newer suggesters which are much more RAM efficient.
*/
@Deprecated
-public class JaspellLookup extends Lookup {
+public class JaspellLookup extends Lookup implements Accountable {
JaspellTernarySearchTrie trie = new JaspellTernarySearchTrie();
private boolean usePrefix = true;
private int editDistance = 2;
diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java
index 92beb05..3f5d746 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java
@@ -28,12 +28,14 @@ import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.store.Directory;
@@ -156,6 +158,23 @@ public abstract class CollationTestBase extends LuceneTestCase {
farsiIndex.close();
}
+ // Make sure the documents returned by the search match the expected list
+ // Copied from TestSort.java
+ private void assertMatches(IndexSearcher searcher, Query query, Sort sort, String expectedResult)
+ throws IOException {
+ ScoreDoc[] result = searcher.search(query, 1000, sort).scoreDocs;
+ StringBuilder buff = new StringBuilder(10);
+ int n = result.length;
+ for (int i = 0; i < n; ++i) {
+ Document doc = searcher.doc(result[i].doc);
+ IndexableField[] v = doc.getFields("tracer");
+ for (int j = 0; j < v.length; ++j) {
+ buff.append(v[j].stringValue());
+ }
+ }
+ assertEquals(expectedResult, buff.toString());
+ }
+
public void assertThreadSafe(final Analyzer analyzer) throws Exception {
int numTestPoints = 100;
int numThreads = TestUtil.nextInt(random(), 3, 5);
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingDocValuesFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingDocValuesFormat.java
index 27cedfe..2b85a9a 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingDocValuesFormat.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingDocValuesFormat.java
@@ -82,7 +82,7 @@ public class AssertingDocValuesFormat extends DocValuesFormat {
assert docID >= 0 && docID < maxDoc;
assert docID > lastDocID;
lastDocID = docID;
- values.longValue();
+ long value = values.longValue();
}
in.addNumericField(field, valuesProducer);
@@ -146,6 +146,7 @@ public class AssertingDocValuesFormat extends DocValuesFormat {
throws IOException {
SortedNumericDocValues values = valuesProducer.getSortedNumeric(field);
+ long valueCount = 0;
int lastDocID = -1;
while (true) {
int docID = values.nextDoc();
@@ -156,6 +157,7 @@ public class AssertingDocValuesFormat extends DocValuesFormat {
lastDocID = values.docID();
int count = values.docValueCount();
assert count > 0;
+ valueCount += count;
long previous = Long.MIN_VALUE;
for (int i = 0; i < count; i++) {
long nextValue = values.nextValue();
@@ -183,12 +185,14 @@ public class AssertingDocValuesFormat extends DocValuesFormat {
lastValue = BytesRef.deepCopyOf(b);
}
+ int docCount = 0;
LongBitSet seenOrds = new LongBitSet(valueCount);
while (true) {
int docID = values.nextDoc();
if (docID == NO_MORE_DOCS) {
break;
}
+ docCount++;
long lastOrd = -1;
while (true) {
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingNormsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingNormsFormat.java
index ae10d84..b6683d7 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingNormsFormat.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingNormsFormat.java
@@ -69,7 +69,7 @@ public class AssertingNormsFormat extends NormsFormat {
assert docID >= 0 && docID < maxDoc;
assert docID > lastDocID;
lastDocID = docID;
- values.longValue();
+ long value = values.longValue();
}
in.addNormsField(field, valuesProducer);
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
index 95e9b88..a95c346 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
@@ -357,6 +357,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
public void testLevel2Ghosts() throws Exception {
Directory dir = newDirectory();
+ Analyzer analyzer = new MockAnalyzer(random());
IndexWriterConfig iwc = newIndexWriterConfig(null);
iwc.setCodec(getCodec());
iwc.setMergePolicy(newLogMergePolicy());
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java
index 0d0604e..5f958cf 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java
@@ -428,6 +428,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
// to test reuse
private final ThreadLocal<PostingsEnum> docsEnum = new ThreadLocal<>();
+ private final ThreadLocal<PostingsEnum> docsAndPositionsEnum = new ThreadLocal<>();
protected void assertEquals(RandomTokenStream tk, FieldType ft, Terms terms) throws IOException {
assertEquals(1, terms.getDocCount());
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java b/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java
index 275cbee..2ecfe45 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java
@@ -298,6 +298,28 @@ public class RandomCodec extends AssertingCodec {
this.random = new Random(randomSeed);
}
+ private static boolean getRandomSingleValuePerDoc(boolean singleValuePerDoc, int randomSeed) {
+ // If we are single valued, sometimes pretend we aren't:
+ return singleValuePerDoc && (new Random(randomSeed).nextBoolean());
+ }
+
+ private static boolean getRandomLongOrds(
+ long totalPointCount, boolean singleValuePerDoc, int randomSeed) {
+ // Always use long ords if we have too many points, but sometimes randomly use it anyway when
+ // singleValuePerDoc is false:
+ return totalPointCount > Integer.MAX_VALUE
+ || (getRandomSingleValuePerDoc(singleValuePerDoc, randomSeed) == false
+ && new Random(randomSeed).nextBoolean());
+ }
+
+ private static long getRandomOfflineSorterBufferMB(int randomSeed) {
+ return TestUtil.nextInt(new Random(randomSeed), 1, 8);
+ }
+
+ private static int getRandomOfflineSorterMaxTempFiles(int randomSeed) {
+ return TestUtil.nextInt(new Random(randomSeed), 2, 20);
+ }
+
@Override
protected int split(byte[] minPackedValue, byte[] maxPackedValue, int[] parentDims) {
// BKD normally defaults by the widest dimension, to try to make as squarish cells as
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/RandomPostingsTester.java b/lucene/test-framework/src/java/org/apache/lucene/index/RandomPostingsTester.java
index 6883e82..cc8a7a7 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/RandomPostingsTester.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/RandomPostingsTester.java
@@ -65,6 +65,7 @@ import org.apache.lucene.util.automaton.CompiledAutomaton;
public class RandomPostingsTester {
private static final IntToLongFunction DOC_TO_NORM = doc -> 1 + (doc & 0x0f);
+ private static final long MAX_NORM = 0x10;
/** Which features to test. */
public enum Option {
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
index cd4c03c..08d18bc 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
@@ -2172,6 +2172,29 @@ public abstract class LuceneTestCase extends Assert {
}
}
+ private static class RandomBits implements Bits {
+ FixedBitSet bits;
+
+ RandomBits(int maxDoc, double pctLive, Random random) {
+ bits = new FixedBitSet(maxDoc);
+ for (int i = 0; i < maxDoc; i++) {
+ if (random.nextDouble() <= pctLive) {
+ bits.set(i);
+ }
+ }
+ }
+
+ @Override
+ public boolean get(int index) {
+ return bits.get(index);
+ }
+
+ @Override
+ public int length() {
+ return bits.length();
+ }
+ }
+
/**
* checks the terms enum sequentially if deep is false, it does a 'shallow' test that doesnt go
* down to the docsenums
diff --git a/lucene/test-framework/src/test/org/apache/lucene/util/TestExpectThrows.java b/lucene/test-framework/src/test/org/apache/lucene/util/TestExpectThrows.java
index 6b4a597..d29b28c 100644
--- a/lucene/test-framework/src/test/org/apache/lucene/util/TestExpectThrows.java
+++ b/lucene/test-framework/src/test/org/apache/lucene/util/TestExpectThrows.java
@@ -55,11 +55,12 @@ public class TestExpectThrows extends LuceneTestCase {
final AtomicBoolean ran = new AtomicBoolean(false);
AssertionError caught = null;
try {
- expectThrows(
- IOException.class,
- () -> {
- ran.getAndSet(true);
- });
+ final IOException returned =
+ expectThrows(
+ IOException.class,
+ () -> {
+ ran.getAndSet(true);
+ });
fail("must not complete"); // NOTE: we don't use expectThrows to test expectThrows
} catch (AssertionError ae) {
caught = ae;
@@ -77,12 +78,13 @@ public class TestExpectThrows extends LuceneTestCase {
final AtomicBoolean ran = new AtomicBoolean(false);
AssertionError caught = null;
try {
- expectThrows(
- IOException.class,
- () -> {
- ran.getAndSet(true);
- fail("this failure should propogate");
- });
+ final IOException returned =
+ expectThrows(
+ IOException.class,
+ () -> {
+ ran.getAndSet(true);
+ fail("this failure should propogate");
+ });
fail("must not complete"); // NOTE: we don't use expectThrows to test expectThrows
} catch (AssertionError ae) {
caught = ae;
@@ -101,12 +103,13 @@ public class TestExpectThrows extends LuceneTestCase {
final AtomicBoolean ran = new AtomicBoolean(false);
AssumptionViolatedException caught = null;
try {
- expectThrows(
- IOException.class,
- () -> {
- ran.getAndSet(true);
- assumeTrue("this assumption should propogate", false);
- });
+ final IOException returned =
+ expectThrows(
+ IOException.class,
+ () -> {
+ ran.getAndSet(true);
+ assumeTrue("this assumption should propogate", false);
+ });
fail("must not complete"); // NOTE: we don't use expectThrows to test expectThrows
} catch (AssumptionViolatedException ave) {
caught = ave;