You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by dw...@apache.org on 2020/12/23 11:42:06 UTC

[lucene-solr] branch master updated (98f12f4 -> 8c234b2)

This is an automated email from the ASF dual-hosted git repository.

dweiss pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git.


    from 98f12f4  SOLR-15031 Prevent null being wrapped in a QueryValueSource
     new 2d6ad2f  LUCENE-9570: code reformatting [partial].
     new 8c234b2  LUCENE-9570: code reformatting [record rev].

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .git-blame-ignore-revs                             |    1 +
 dev-tools/scripts/checkJavadocLinks.py             |    4 +-
 gradle/generation/util.gradle                      |   17 +
 gradle/validation/spotless.gradle                  |   55 +-
 .../lucene50/Lucene50SkipWriter.java               |    3 +-
 .../src/java/org/apache/lucene/LucenePackage.java  |    5 +-
 .../lucene/analysis/AbstractAnalysisFactory.java   |  197 +-
 .../apache/lucene/analysis/AnalysisSPILoader.java  |  161 +-
 .../java/org/apache/lucene/analysis/Analyzer.java  |  417 ++-
 .../apache/lucene/analysis/AnalyzerWrapper.java    |  112 +-
 .../apache/lucene/analysis/CachingTokenFilter.java |   37 +-
 .../org/apache/lucene/analysis/CharArrayMap.java   |  359 +-
 .../org/apache/lucene/analysis/CharArraySet.java   |  136 +-
 .../org/apache/lucene/analysis/CharFilter.java     |   56 +-
 .../apache/lucene/analysis/CharFilterFactory.java  |   43 +-
 .../org/apache/lucene/analysis/CharacterUtils.java |  145 +-
 .../lucene/analysis/DelegatingAnalyzerWrapper.java |   56 +-
 .../lucene/analysis/FilteringTokenFilter.java      |   20 +-
 .../apache/lucene/analysis/GraphTokenFilter.java   |   36 +-
 .../apache/lucene/analysis/LowerCaseFilter.java    |   15 +-
 .../lucene/analysis/ReusableStringReader.java      |   18 +-
 .../org/apache/lucene/analysis/StopFilter.java     |   65 +-
 .../lucene/analysis/StopwordAnalyzerBase.java      |   97 +-
 .../org/apache/lucene/analysis/TokenFilter.java    |   40 +-
 .../apache/lucene/analysis/TokenFilterFactory.java |   44 +-
 .../org/apache/lucene/analysis/TokenStream.java    |  222 +-
 .../lucene/analysis/TokenStreamToAutomaton.java    |   58 +-
 .../java/org/apache/lucene/analysis/Tokenizer.java |   78 +-
 .../apache/lucene/analysis/TokenizerFactory.java   |   46 +-
 .../org/apache/lucene/analysis/WordlistLoader.java |  117 +-
 .../org/apache/lucene/analysis/package-info.java   | 1139 +++---
 .../org/apache/lucene/codecs/BlockTermState.java   |   20 +-
 .../src/java/org/apache/lucene/codecs/Codec.java   |  104 +-
 .../java/org/apache/lucene/codecs/CodecUtil.java   |  504 +--
 .../codecs/CompetitiveImpactAccumulator.java       |   41 +-
 .../apache/lucene/codecs/CompoundDirectory.java    |   35 +-
 .../org/apache/lucene/codecs/CompoundFormat.java   |   18 +-
 .../apache/lucene/codecs/DocValuesConsumer.java    | 1035 +++---
 .../org/apache/lucene/codecs/DocValuesFormat.java  |  106 +-
 .../apache/lucene/codecs/DocValuesProducer.java    |   74 +-
 .../org/apache/lucene/codecs/FieldInfosFormat.java |   26 +-
 .../org/apache/lucene/codecs/FieldsConsumer.java   |   78 +-
 .../org/apache/lucene/codecs/FieldsProducer.java   |   37 +-
 .../java/org/apache/lucene/codecs/FilterCodec.java |   28 +-
 .../org/apache/lucene/codecs/LiveDocsFormat.java   |   31 +-
 .../lucene/codecs/MultiLevelSkipListReader.java    |  123 +-
 .../lucene/codecs/MultiLevelSkipListWriter.java    |   78 +-
 .../apache/lucene/codecs/MutablePointValues.java   |   10 +-
 .../org/apache/lucene/codecs/NormsConsumer.java    |  228 +-
 .../java/org/apache/lucene/codecs/NormsFormat.java |   30 +-
 .../org/apache/lucene/codecs/NormsProducer.java    |   41 +-
 .../org/apache/lucene/codecs/PointsFormat.java     |   80 +-
 .../org/apache/lucene/codecs/PointsReader.java     |   25 +-
 .../org/apache/lucene/codecs/PointsWriter.java     |   89 +-
 .../org/apache/lucene/codecs/PostingsFormat.java   |  105 +-
 .../apache/lucene/codecs/PostingsReaderBase.java   |   63 +-
 .../apache/lucene/codecs/PostingsWriterBase.java   |   61 +-
 .../lucene/codecs/PushPostingsWriterBase.java      |   78 +-
 .../apache/lucene/codecs/SegmentInfoFormat.java    |   25 +-
 .../apache/lucene/codecs/StoredFieldsFormat.java   |   24 +-
 .../apache/lucene/codecs/StoredFieldsReader.java   |   38 +-
 .../apache/lucene/codecs/StoredFieldsWriter.java   |   99 +-
 .../java/org/apache/lucene/codecs/TermStats.java   |   11 +-
 .../apache/lucene/codecs/TermVectorsFormat.java    |   25 +-
 .../apache/lucene/codecs/TermVectorsReader.java    |   48 +-
 .../apache/lucene/codecs/TermVectorsWriter.java    |  184 +-
 .../org/apache/lucene/codecs/VectorFormat.java     |   57 +-
 .../org/apache/lucene/codecs/VectorReader.java     |   22 +-
 .../org/apache/lucene/codecs/VectorWriter.java     |   49 +-
 .../codecs/blocktree/BlockTreeTermsReader.java     |  175 +-
 .../codecs/blocktree/BlockTreeTermsWriter.java     |  559 +--
 .../codecs/blocktree/CompressionAlgorithm.java     |   15 +-
 .../lucene/codecs/blocktree/FieldReader.java       |   82 +-
 .../codecs/blocktree/IntersectTermsEnum.java       |  104 +-
 .../codecs/blocktree/IntersectTermsEnumFrame.java  |   21 +-
 .../lucene/codecs/blocktree/SegmentTermsEnum.java  |  462 ++-
 .../codecs/blocktree/SegmentTermsEnumFrame.java    |  180 +-
 .../org/apache/lucene/codecs/blocktree/Stats.java  |  137 +-
 .../lucene/codecs/blocktree/package-info.java      |   20 +-
 .../compressing/CompressingStoredFieldsFormat.java |  141 +-
 .../compressing/CompressingStoredFieldsReader.java |  267 +-
 .../compressing/CompressingStoredFieldsWriter.java |  323 +-
 .../compressing/CompressingTermVectorsFormat.java  |   79 +-
 .../compressing/CompressingTermVectorsReader.java  |  375 +-
 .../compressing/CompressingTermVectorsWriter.java  |  249 +-
 .../lucene/codecs/compressing/CompressionMode.java |  209 +-
 .../lucene/codecs/compressing/Compressor.java      |   13 +-
 .../lucene/codecs/compressing/Decompressor.java    |   21 +-
 .../lucene/codecs/compressing/FieldsIndex.java     |    2 -
 .../codecs/compressing/FieldsIndexReader.java      |   46 +-
 .../codecs/compressing/FieldsIndexWriter.java      |   45 +-
 .../compressing/LegacyFieldsIndexReader.java       |   38 +-
 .../lucene/codecs/compressing/MatchingReaders.java |   23 +-
 .../lucene/codecs/compressing/package-info.java    |    6 +-
 .../codecs/lucene50/Lucene50CompoundFormat.java    |   76 +-
 .../codecs/lucene50/Lucene50CompoundReader.java    |  101 +-
 .../codecs/lucene50/Lucene50LiveDocsFormat.java    |   81 +-
 .../codecs/lucene50/Lucene50TermVectorsFormat.java |  208 +-
 .../lucene/codecs/lucene50/package-info.java       |    5 +-
 .../codecs/lucene60/Lucene60FieldInfosFormat.java  |  304 +-
 .../lucene/codecs/lucene60/package-info.java       |    4 +-
 .../apache/lucene/codecs/lucene80/IndexedDISI.java |  369 +-
 .../codecs/lucene80/Lucene80DocValuesConsumer.java |  319 +-
 .../codecs/lucene80/Lucene80DocValuesFormat.java   |  177 +-
 .../codecs/lucene80/Lucene80DocValuesProducer.java |  327 +-
 .../codecs/lucene80/Lucene80NormsConsumer.java     |   37 +-
 .../codecs/lucene80/Lucene80NormsFormat.java       |   95 +-
 .../codecs/lucene80/Lucene80NormsProducer.java     |  103 +-
 .../lucene/codecs/lucene80/package-info.java       |    5 +-
 .../lucene/codecs/lucene84/ForDeltaUtil.java       |   21 +-
 .../org/apache/lucene/codecs/lucene84/ForUtil.java | 1197 +++----
 .../codecs/lucene84/Lucene84PostingsFormat.java    |  635 ++--
 .../codecs/lucene84/Lucene84PostingsReader.java    |  486 ++-
 .../codecs/lucene84/Lucene84PostingsWriter.java    |  128 +-
 .../codecs/lucene84/Lucene84ScoreSkipReader.java   |   59 +-
 .../lucene/codecs/lucene84/Lucene84SkipReader.java |   66 +-
 .../lucene/codecs/lucene84/Lucene84SkipWriter.java |   69 +-
 .../apache/lucene/codecs/lucene84/PForUtil.java    |   35 +-
 .../lucene/codecs/lucene84/package-info.java       |    4 +-
 .../codecs/lucene86/Lucene86PointsFormat.java      |   29 +-
 .../codecs/lucene86/Lucene86PointsReader.java      |   49 +-
 .../codecs/lucene86/Lucene86PointsWriter.java      |  156 +-
 .../codecs/lucene86/Lucene86SegmentInfoFormat.java |  107 +-
 .../lucene/codecs/lucene86/package-info.java       |    4 +-
 .../codecs/lucene87/BugfixDeflater_JDK8252739.java |  216 +-
 .../DeflateWithPresetDictCompressionMode.java      |   32 +-
 .../lucene87/LZ4WithPresetDictCompressionMode.java |   15 +-
 .../lucene87/Lucene87StoredFieldsFormat.java       |  146 +-
 .../lucene/codecs/lucene87/package-info.java       |    4 +-
 .../lucene/codecs/lucene90/Lucene90Codec.java      |   83 +-
 .../codecs/lucene90/Lucene90FieldInfosFormat.java  |  329 +-
 .../codecs/lucene90/Lucene90VectorFormat.java      |    5 +-
 .../codecs/lucene90/Lucene90VectorReader.java      |  126 +-
 .../codecs/lucene90/Lucene90VectorWriter.java      |   81 +-
 .../lucene/codecs/lucene90/package-info.java       |  526 ++-
 .../org/apache/lucene/codecs/package-info.java     |   74 +-
 .../codecs/perfield/PerFieldDocValuesFormat.java   |  124 +-
 .../lucene/codecs/perfield/PerFieldMergeState.java |   46 +-
 .../codecs/perfield/PerFieldPostingsFormat.java    |  155 +-
 .../lucene/codecs/perfield/package-info.java       |    4 +-
 .../lucene/document/BinaryDocValuesField.java      |   33 +-
 .../org/apache/lucene/document/BinaryPoint.java    |  167 +-
 .../lucene/document/BinaryRangeDocValues.java      |    7 +-
 .../lucene/document/BinaryRangeDocValuesField.java |    5 +-
 .../document/BinaryRangeFieldRangeQuery.java       |   41 +-
 .../java/org/apache/lucene/document/DateTools.java |  166 +-
 .../java/org/apache/lucene/document/Document.java  |  172 +-
 .../document/DocumentStoredFieldVisitor.java       |   39 +-
 .../lucene/document/DoubleDocValuesField.java      |   25 +-
 .../org/apache/lucene/document/DoublePoint.java    |  182 +-
 .../org/apache/lucene/document/DoubleRange.java    |  121 +-
 .../lucene/document/DoubleRangeDocValuesField.java |   23 +-
 .../lucene/document/DoubleRangeSlowRangeQuery.java |   10 +-
 .../lucene/document/FeatureDoubleValuesSource.java |   25 +-
 .../org/apache/lucene/document/FeatureField.java   |  353 +-
 .../org/apache/lucene/document/FeatureQuery.java   |   20 +-
 .../apache/lucene/document/FeatureSortField.java   |   24 +-
 .../src/java/org/apache/lucene/document/Field.java |  308 +-
 .../java/org/apache/lucene/document/FieldType.java |  229 +-
 .../lucene/document/FloatDocValuesField.java       |   26 +-
 .../org/apache/lucene/document/FloatPoint.java     |  176 +-
 .../org/apache/lucene/document/FloatRange.java     |  121 +-
 .../lucene/document/FloatRangeDocValuesField.java  |   23 +-
 .../lucene/document/FloatRangeSlowRangeQuery.java  |    7 +-
 .../apache/lucene/document/InetAddressPoint.java   |  172 +-
 .../apache/lucene/document/InetAddressRange.java   |   61 +-
 .../java/org/apache/lucene/document/IntPoint.java  |  159 +-
 .../java/org/apache/lucene/document/IntRange.java  |  121 +-
 .../lucene/document/IntRangeDocValuesField.java    |   26 +-
 .../lucene/document/IntRangeSlowRangeQuery.java    |    6 +-
 .../lucene/document/LatLonDocValuesBoxQuery.java   |   88 +-
 .../lucene/document/LatLonDocValuesField.java      |  167 +-
 .../LatLonDocValuesPointInGeometryQuery.java       |   60 +-
 .../org/apache/lucene/document/LatLonPoint.java    |  180 +-
 .../document/LatLonPointDistanceComparator.java    |   72 +-
 .../document/LatLonPointDistanceFeatureQuery.java  |  321 +-
 .../lucene/document/LatLonPointDistanceQuery.java  |  119 +-
 .../document/LatLonPointInGeometryQuery.java       |  158 +-
 .../lucene/document/LatLonPointSortField.java      |   15 +-
 .../org/apache/lucene/document/LatLonShape.java    |  140 +-
 .../document/LatLonShapeBoundingBoxQuery.java      |  459 ++-
 .../apache/lucene/document/LatLonShapeQuery.java   |  200 +-
 .../lucene/document/LongDistanceFeatureQuery.java  |  163 +-
 .../java/org/apache/lucene/document/LongPoint.java |  186 +-
 .../java/org/apache/lucene/document/LongRange.java |  117 +-
 .../lucene/document/LongRangeDocValuesField.java   |   23 +-
 .../lucene/document/LongRangeSlowRangeQuery.java   |    7 +-
 .../lucene/document/NumericDocValuesField.java     |   66 +-
 .../apache/lucene/document/RangeFieldQuery.java    |  319 +-
 .../org/apache/lucene/document/ShapeField.java     |  147 +-
 .../org/apache/lucene/document/ShapeQuery.java     |  310 +-
 .../lucene/document/SortedDocValuesField.java      |   68 +-
 .../document/SortedNumericDocValuesField.java      |   79 +-
 .../document/SortedNumericDocValuesRangeQuery.java |   68 +-
 .../lucene/document/SortedSetDocValuesField.java   |   72 +-
 .../document/SortedSetDocValuesRangeQuery.java     |   80 +-
 .../org/apache/lucene/document/StoredField.java    |   88 +-
 .../org/apache/lucene/document/StringField.java    |   46 +-
 .../java/org/apache/lucene/document/TextField.java |   24 +-
 .../org/apache/lucene/document/VectorField.java    |   57 +-
 .../apache/lucene/document/XYDocValuesField.java   |  141 +-
 .../document/XYDocValuesPointInGeometryQuery.java  |   44 +-
 .../lucene/document/XYPointDistanceComparator.java |   43 +-
 .../org/apache/lucene/document/XYPointField.java   |   89 +-
 .../lucene/document/XYPointInGeometryQuery.java    |   94 +-
 .../apache/lucene/document/XYPointSortField.java   |   12 +-
 .../java/org/apache/lucene/document/XYShape.java   |  105 +-
 .../org/apache/lucene/document/XYShapeQuery.java   |  177 +-
 .../org/apache/lucene/document/package-info.java   |   55 +-
 .../src/java/org/apache/lucene/geo/Circle.java     |   18 +-
 .../src/java/org/apache/lucene/geo/Circle2D.java   |  219 +-
 .../java/org/apache/lucene/geo/Component2D.java    |  215 +-
 .../java/org/apache/lucene/geo/ComponentTree.java  |  161 +-
 .../src/java/org/apache/lucene/geo/EdgeTree.java   |  264 +-
 .../org/apache/lucene/geo/GeoEncodingUtils.java    |  172 +-
 .../src/java/org/apache/lucene/geo/GeoUtils.java   |  133 +-
 .../java/org/apache/lucene/geo/LatLonGeometry.java |    4 +-
 .../core/src/java/org/apache/lucene/geo/Line.java  |   22 +-
 .../src/java/org/apache/lucene/geo/Line2D.java     |  113 +-
 .../core/src/java/org/apache/lucene/geo/Point.java |   16 +-
 .../src/java/org/apache/lucene/geo/Point2D.java    |   99 +-
 .../src/java/org/apache/lucene/geo/Polygon.java    |   69 +-
 .../src/java/org/apache/lucene/geo/Polygon2D.java  |  169 +-
 .../src/java/org/apache/lucene/geo/Rectangle.java  |   40 +-
 .../java/org/apache/lucene/geo/Rectangle2D.java    |  148 +-
 .../lucene/geo/SimpleGeoJSONPolygonParser.java     |   81 +-
 .../apache/lucene/geo/SimpleWKTShapeParser.java    |  112 +-
 .../java/org/apache/lucene/geo/Tessellator.java    |  782 ++--
 .../src/java/org/apache/lucene/geo/XYCircle.java   |   16 +-
 .../org/apache/lucene/geo/XYEncodingUtils.java     |   12 +-
 .../src/java/org/apache/lucene/geo/XYGeometry.java |    4 +-
 .../src/java/org/apache/lucene/geo/XYLine.java     |   18 +-
 .../src/java/org/apache/lucene/geo/XYPoint.java    |   16 +-
 .../src/java/org/apache/lucene/geo/XYPolygon.java  |   42 +-
 .../java/org/apache/lucene/geo/XYRectangle.java    |    7 +-
 .../java/org/apache/lucene/geo/package-info.java   |    6 +-
 .../apache/lucene/index/AutomatonTermsEnum.java    |  181 +-
 .../apache/lucene/index/BaseCompositeReader.java   |  110 +-
 .../org/apache/lucene/index/BaseTermsEnum.java     |   19 +-
 .../org/apache/lucene/index/BinaryDocValues.java   |   16 +-
 .../lucene/index/BinaryDocValuesFieldUpdates.java  |   34 +-
 .../apache/lucene/index/BinaryDocValuesWriter.java |   78 +-
 .../java/org/apache/lucene/index/BitsSlice.java    |    8 +-
 .../org/apache/lucene/index/BufferedUpdates.java   |   92 +-
 .../apache/lucene/index/BufferedUpdatesStream.java |  135 +-
 .../org/apache/lucene/index/ByteSliceReader.java   |   36 +-
 .../org/apache/lucene/index/ByteSliceWriter.java   |   22 +-
 .../java/org/apache/lucene/index/CheckIndex.java   | 2289 ++++++++----
 .../java/org/apache/lucene/index/CodecReader.java  |   98 +-
 .../org/apache/lucene/index/CompositeReader.java   |   84 +-
 .../lucene/index/CompositeReaderContext.java       |   65 +-
 .../lucene/index/ConcurrentMergeScheduler.java     |  321 +-
 .../apache/lucene/index/CorruptIndexException.java |   21 +-
 .../org/apache/lucene/index/DirectoryReader.java   |  342 +-
 .../java/org/apache/lucene/index/DocIDMerger.java  |   53 +-
 .../java/org/apache/lucene/index/DocValues.java    |  196 +-
 .../apache/lucene/index/DocValuesFieldUpdates.java |  171 +-
 .../org/apache/lucene/index/DocValuesIterator.java |   14 +-
 .../apache/lucene/index/DocValuesLeafReader.java   |    1 -
 .../org/apache/lucene/index/DocValuesType.java     |   45 +-
 .../org/apache/lucene/index/DocValuesUpdate.java   |   42 +-
 .../org/apache/lucene/index/DocValuesWriter.java   |    6 +-
 .../org/apache/lucene/index/DocsWithFieldSet.java  |   13 +-
 .../org/apache/lucene/index/DocumentsWriter.java   |  345 +-
 .../lucene/index/DocumentsWriterDeleteQueue.java   |  196 +-
 .../lucene/index/DocumentsWriterFlushControl.java  |  388 +-
 .../lucene/index/DocumentsWriterFlushQueue.java    |   32 +-
 .../lucene/index/DocumentsWriterPerThread.java     |  318 +-
 .../lucene/index/DocumentsWriterPerThreadPool.java |   74 +-
 .../lucene/index/DocumentsWriterStallControl.java  |   57 +-
 .../lucene/index/EmptyDocValuesProducer.java       |    8 +-
 .../lucene/index/ExitableDirectoryReader.java      |  464 +--
 .../java/org/apache/lucene/index/FieldInfo.java    |  368 +-
 .../java/org/apache/lucene/index/FieldInfos.java   |  603 +++-
 .../org/apache/lucene/index/FieldInvertState.java  |   82 +-
 .../org/apache/lucene/index/FieldTermIterator.java |   23 +-
 .../apache/lucene/index/FieldUpdatesBuffer.java    |  139 +-
 .../src/java/org/apache/lucene/index/Fields.java   |   31 +-
 .../apache/lucene/index/FilterBinaryDocValues.java |   13 +-
 .../org/apache/lucene/index/FilterCodecReader.java |   38 +-
 .../apache/lucene/index/FilterDirectoryReader.java |   47 +-
 .../org/apache/lucene/index/FilterLeafReader.java  |   71 +-
 .../org/apache/lucene/index/FilterMergePolicy.java |   30 +-
 .../lucene/index/FilterNumericDocValues.java       |   12 +-
 .../apache/lucene/index/FilterSortedDocValues.java |    7 +-
 .../lucene/index/FilterSortedNumericDocValues.java |    7 +-
 .../lucene/index/FilterSortedSetDocValues.java     |    7 +-
 .../org/apache/lucene/index/FilteredTermsEnum.java |  165 +-
 .../lucene/index/FlushByRamOrCountsPolicy.java     |  105 +-
 .../java/org/apache/lucene/index/FlushPolicy.java  |   91 +-
 .../org/apache/lucene/index/FreqProxFields.java    |   43 +-
 .../apache/lucene/index/FreqProxTermsWriter.java   |  104 +-
 .../lucene/index/FreqProxTermsWriterPerField.java  |   77 +-
 .../apache/lucene/index/FrozenBufferedUpdates.java |  205 +-
 .../src/java/org/apache/lucene/index/Impact.java   |   17 +-
 .../src/java/org/apache/lucene/index/Impacts.java  |   28 +-
 .../java/org/apache/lucene/index/ImpactsEnum.java  |    5 +-
 .../org/apache/lucene/index/ImpactsSource.java     |   34 +-
 .../java/org/apache/lucene/index/IndexCommit.java  |   96 +-
 .../apache/lucene/index/IndexDeletionPolicy.java   |  106 +-
 .../org/apache/lucene/index/IndexFileDeleter.java  |  357 +-
 .../org/apache/lucene/index/IndexFileNames.java    |  118 +-
 .../lucene/index/IndexFormatTooNewException.java   |   67 +-
 .../lucene/index/IndexFormatTooOldException.java   |  106 +-
 .../lucene/index/IndexNotFoundException.java       |    9 +-
 .../java/org/apache/lucene/index/IndexOptions.java |   36 +-
 .../java/org/apache/lucene/index/IndexReader.java  |  435 ++-
 .../apache/lucene/index/IndexReaderContext.java    |   43 +-
 .../java/org/apache/lucene/index/IndexSorter.java  |  225 +-
 .../org/apache/lucene/index/IndexUpgrader.java     |  133 +-
 .../java/org/apache/lucene/index/IndexWriter.java  | 3726 +++++++++++---------
 .../org/apache/lucene/index/IndexWriterConfig.java |  363 +-
 .../org/apache/lucene/index/IndexableField.java    |   37 +-
 .../apache/lucene/index/IndexableFieldType.java    |  108 +-
 .../org/apache/lucene/index/IndexingChain.java     |  519 ++-
 .../index/KeepOnlyLastCommitDeletionPolicy.java    |   22 +-
 .../org/apache/lucene/index/KnnGraphValues.java    |   45 +-
 .../java/org/apache/lucene/index/LeafMetaData.java |   24 +-
 .../java/org/apache/lucene/index/LeafReader.java   |  204 +-
 .../org/apache/lucene/index/LeafReaderContext.java |   30 +-
 .../apache/lucene/index/LiveIndexWriterConfig.java |  302 +-
 .../lucene/index/LogByteSizeMergePolicy.java       |  119 +-
 .../org/apache/lucene/index/LogDocMergePolicy.java |   40 +-
 .../org/apache/lucene/index/LogMergePolicy.java    |  383 +-
 .../org/apache/lucene/index/MappedMultiFields.java |   27 +-
 .../lucene/index/MappingMultiPostingsEnum.java     |   35 +-
 .../java/org/apache/lucene/index/MergePolicy.java  |  559 ++-
 .../org/apache/lucene/index/MergeRateLimiter.java  |   47 +-
 .../apache/lucene/index/MergeReaderWrapper.java    |   21 +-
 .../org/apache/lucene/index/MergeScheduler.java    |   62 +-
 .../java/org/apache/lucene/index/MergeState.java   |   93 +-
 .../java/org/apache/lucene/index/MergeTrigger.java |   34 +-
 .../java/org/apache/lucene/index/MultiBits.java    |   49 +-
 .../org/apache/lucene/index/MultiDocValues.java    |  261 +-
 .../java/org/apache/lucene/index/MultiFields.java  |   41 +-
 .../org/apache/lucene/index/MultiLeafReader.java   |    8 +-
 .../org/apache/lucene/index/MultiPostingsEnum.java |   55 +-
 .../java/org/apache/lucene/index/MultiReader.java  |   55 +-
 .../java/org/apache/lucene/index/MultiSorter.java  |   85 +-
 .../java/org/apache/lucene/index/MultiTerms.java   |   86 +-
 .../org/apache/lucene/index/MultiTermsEnum.java    |   80 +-
 .../org/apache/lucene/index/NoDeletionPolicy.java  |   10 +-
 .../org/apache/lucene/index/NoMergePolicy.java     |   42 +-
 .../org/apache/lucene/index/NoMergeScheduler.java  |   17 +-
 .../org/apache/lucene/index/NormValuesWriter.java  |   79 +-
 .../org/apache/lucene/index/NumericDocValues.java  |   16 +-
 .../lucene/index/NumericDocValuesFieldUpdates.java |   25 +-
 .../lucene/index/NumericDocValuesWriter.java       |   54 +-
 .../lucene/index/OneMergeWrappingMergePolicy.java  |   23 +-
 .../java/org/apache/lucene/index/OrdTermState.java |   11 +-
 .../java/org/apache/lucene/index/OrdinalMap.java   |  157 +-
 .../lucene/index/ParallelCompositeReader.java      |  101 +-
 .../apache/lucene/index/ParallelLeafReader.java    |  151 +-
 .../apache/lucene/index/ParallelPostingsArray.java |    2 +-
 .../org/apache/lucene/index/PendingDeletes.java    |  116 +-
 .../apache/lucene/index/PendingSoftDeletes.java    |   72 +-
 .../index/PersistentSnapshotDeletionPolicy.java    |  133 +-
 .../java/org/apache/lucene/index/PointValues.java  |  199 +-
 .../org/apache/lucene/index/PointValuesWriter.java |  268 +-
 .../java/org/apache/lucene/index/PostingsEnum.java |   88 +-
 .../org/apache/lucene/index/PrefixCodedTerms.java  |   27 +-
 .../java/org/apache/lucene/index/QueryTimeout.java |   17 +-
 .../org/apache/lucene/index/QueryTimeoutImpl.java  |   37 +-
 .../lucene/index/RandomAccessVectorValues.java     |   25 +-
 .../index/RandomAccessVectorValuesProducer.java    |    7 +-
 .../org/apache/lucene/index/ReaderManager.java     |   69 +-
 .../java/org/apache/lucene/index/ReaderPool.java   |  173 +-
 .../java/org/apache/lucene/index/ReaderSlice.java  |    3 +-
 .../java/org/apache/lucene/index/ReaderUtil.java   |   34 +-
 .../org/apache/lucene/index/ReadersAndUpdates.java |  383 +-
 .../org/apache/lucene/index/SegmentCommitInfo.java |  182 +-
 .../apache/lucene/index/SegmentCoreReaders.java    |  122 +-
 .../org/apache/lucene/index/SegmentDocValues.java  |   36 +-
 .../lucene/index/SegmentDocValuesProducer.java     |   32 +-
 .../java/org/apache/lucene/index/SegmentInfo.java  |  152 +-
 .../java/org/apache/lucene/index/SegmentInfos.java |  627 ++--
 .../org/apache/lucene/index/SegmentMerger.java     |  100 +-
 .../org/apache/lucene/index/SegmentReadState.java  |   41 +-
 .../org/apache/lucene/index/SegmentReader.java     |  157 +-
 .../org/apache/lucene/index/SegmentWriteState.java |   81 +-
 .../apache/lucene/index/SerialMergeScheduler.java  |   18 +-
 .../lucene/index/SimpleMergedSegmentWarmer.java    |   40 +-
 .../org/apache/lucene/index/SingleTermsEnum.java   |   19 +-
 .../index/SingletonSortedNumericDocValues.java     |   15 +-
 .../lucene/index/SingletonSortedSetDocValues.java  |   12 +-
 .../lucene/index/SlowCodecReaderWrapper.java       |   43 +-
 .../org/apache/lucene/index/SlowImpactsEnum.java   |   39 +-
 .../lucene/index/SnapshotDeletionPolicy.java       |  103 +-
 .../index/SoftDeletesDirectoryReaderWrapper.java   |  120 +-
 .../index/SoftDeletesRetentionMergePolicy.java     |  102 +-
 .../org/apache/lucene/index/SortFieldProvider.java |   55 +-
 .../org/apache/lucene/index/SortedDocValues.java   |   73 +-
 .../lucene/index/SortedDocValuesTermsEnum.java     |   11 +-
 .../apache/lucene/index/SortedDocValuesWriter.java |  109 +-
 .../lucene/index/SortedNumericDocValues.java       |   27 +-
 .../lucene/index/SortedNumericDocValuesWriter.java |   93 +-
 .../apache/lucene/index/SortedSetDocValues.java    |   75 +-
 .../lucene/index/SortedSetDocValuesTermsEnum.java  |   13 +-
 .../lucene/index/SortedSetDocValuesWriter.java     |  136 +-
 .../src/java/org/apache/lucene/index/Sorter.java   |  102 +-
 .../apache/lucene/index/SortingCodecReader.java    |  160 +-
 .../lucene/index/SortingStoredFieldsConsumer.java  |   87 +-
 .../lucene/index/SortingTermVectorsConsumer.java   |   69 +-
 .../lucene/index/StandardDirectoryReader.java      |  208 +-
 .../apache/lucene/index/StoredFieldVisitor.java    |   74 +-
 .../apache/lucene/index/StoredFieldsConsumer.java  |    7 +-
 .../src/java/org/apache/lucene/index/Term.java     |  144 +-
 .../java/org/apache/lucene/index/TermState.java    |   20 +-
 .../java/org/apache/lucene/index/TermStates.java   |  132 +-
 .../apache/lucene/index/TermVectorsConsumer.java   |   31 +-
 .../lucene/index/TermVectorsConsumerPerField.java  |   99 +-
 .../src/java/org/apache/lucene/index/Terms.java    |  125 +-
 .../java/org/apache/lucene/index/TermsEnum.java    |  313 +-
 .../java/org/apache/lucene/index/TermsHash.java    |   34 +-
 .../org/apache/lucene/index/TermsHashPerField.java |  104 +-
 .../org/apache/lucene/index/TieredMergePolicy.java |  500 ++-
 .../index/TrackingTmpOutputDirectoryWrapper.java   |    3 +-
 .../org/apache/lucene/index/TwoPhaseCommit.java    |   29 +-
 .../apache/lucene/index/TwoPhaseCommitTool.java    |   54 +-
 .../lucene/index/UpgradeIndexMergePolicy.java      |  108 +-
 .../java/org/apache/lucene/index/VectorValues.java |  165 +-
 .../apache/lucene/index/VectorValuesWriter.java    |   49 +-
 .../java/org/apache/lucene/index/package-info.java |  365 +-
 .../src/java/org/apache/lucene/package-info.java   |    6 +-
 .../org/apache/lucene/search/AutomatonQuery.java   |  101 +-
 .../org/apache/lucene/search/BlendedTermQuery.java |  139 +-
 .../lucene/search/BlockMaxConjunctionScorer.java   |   20 +-
 .../org/apache/lucene/search/BlockMaxDISI.java     |   10 +-
 .../lucene/search/Boolean2ScorerSupplier.java      |  104 +-
 .../org/apache/lucene/search/BooleanClause.java    |   75 +-
 .../org/apache/lucene/search/BooleanQuery.java     |  199 +-
 .../org/apache/lucene/search/BooleanScorer.java    |   74 +-
 .../org/apache/lucene/search/BooleanWeight.java    |  102 +-
 .../org/apache/lucene/search/BoostAttribute.java   |   22 +-
 .../apache/lucene/search/BoostAttributeImpl.java   |    9 +-
 .../java/org/apache/lucene/search/BoostQuery.java  |   39 +-
 .../java/org/apache/lucene/search/BulkScorer.java  |   71 +-
 .../org/apache/lucene/search/CachingCollector.java |  128 +-
 .../apache/lucene/search/CollectionStatistics.java |  124 +-
 .../search/CollectionTerminatedException.java      |   16 +-
 .../java/org/apache/lucene/search/Collector.java   |   61 +-
 .../org/apache/lucene/search/CollectorManager.java |   35 +-
 .../org/apache/lucene/search/ConjunctionDISI.java  |  156 +-
 .../apache/lucene/search/ConjunctionScorer.java    |    9 +-
 .../apache/lucene/search/ConstantScoreQuery.java   |   49 +-
 .../apache/lucene/search/ConstantScoreScorer.java  |   65 +-
 .../apache/lucene/search/ConstantScoreWeight.java  |   12 +-
 .../search/ControlledRealTimeReopenThread.java     |  113 +-
 .../apache/lucene/search/DisiPriorityQueue.java    |   15 +-
 .../java/org/apache/lucene/search/DisiWrapper.java |    7 +-
 .../search/DisjunctionDISIApproximation.java       |    9 +-
 .../lucene/search/DisjunctionMatchesIterator.java  |   68 +-
 .../apache/lucene/search/DisjunctionMaxQuery.java  |  123 +-
 .../apache/lucene/search/DisjunctionMaxScorer.java |   26 +-
 .../DisjunctionScoreBlockBoundaryPropagator.java   |   40 +-
 .../apache/lucene/search/DisjunctionScorer.java    |   44 +-
 .../apache/lucene/search/DisjunctionSumScorer.java |   14 +-
 .../java/org/apache/lucene/search/DocIdSet.java    |   73 +-
 .../org/apache/lucene/search/DocIdSetIterator.java |  112 +-
 .../lucene/search/DocValuesFieldExistsQuery.java   |   20 +-
 .../lucene/search/DocValuesRewriteMethod.java      |  160 +-
 .../org/apache/lucene/search/DoubleValues.java     |   38 +-
 .../apache/lucene/search/DoubleValuesSource.java   |  237 +-
 .../apache/lucene/search/ExactPhraseMatcher.java   |   68 +-
 .../java/org/apache/lucene/search/Explanation.java |   63 +-
 .../org/apache/lucene/search/FieldComparator.java  |  250 +-
 .../lucene/search/FieldComparatorSource.java       |   11 +-
 .../java/org/apache/lucene/search/FieldDoc.java    |   35 +-
 .../apache/lucene/search/FieldValueHitQueue.java   |   81 +-
 .../org/apache/lucene/search/FilterCollector.java  |    2 -
 .../apache/lucene/search/FilterLeafCollector.java  |    2 -
 .../lucene/search/FilterMatchesIterator.java       |    9 +-
 .../org/apache/lucene/search/FilterScorable.java   |   11 +-
 .../org/apache/lucene/search/FilterScorer.java     |   22 +-
 .../org/apache/lucene/search/FilterWeight.java     |   25 +-
 .../lucene/search/FilteredDocIdSetIterator.java    |   12 +-
 .../lucene/search/FuzzyAutomatonBuilder.java       |   14 +-
 .../java/org/apache/lucene/search/FuzzyQuery.java  |  170 +-
 .../org/apache/lucene/search/FuzzyTermsEnum.java   |  171 +-
 .../java/org/apache/lucene/search/HitQueue.java    |   65 +-
 .../apache/lucene/search/HitsThresholdChecker.java |   23 +-
 .../java/org/apache/lucene/search/ImpactsDISI.java |   25 +-
 .../lucene/search/IndexOrDocValuesQuery.java       |   54 +-
 .../org/apache/lucene/search/IndexSearcher.java    |  688 ++--
 .../org/apache/lucene/search/LRUQueryCache.java    |  279 +-
 .../org/apache/lucene/search/LeafCollector.java    |   74 +-
 .../apache/lucene/search/LeafFieldComparator.java  |  119 +-
 .../org/apache/lucene/search/LeafSimScorer.java    |   31 +-
 .../org/apache/lucene/search/LiveFieldValues.java  |   50 +-
 .../java/org/apache/lucene/search/LongValues.java  |   10 +-
 .../org/apache/lucene/search/LongValuesSource.java |   65 +-
 .../apache/lucene/search/MatchAllDocsQuery.java    |   14 +-
 .../org/apache/lucene/search/MatchNoDocsQuery.java |   12 +-
 .../src/java/org/apache/lucene/search/Matches.java |   19 +-
 .../org/apache/lucene/search/MatchesIterator.java  |   36 +-
 .../org/apache/lucene/search/MatchesUtils.java     |   73 +-
 .../search/MaxNonCompetitiveBoostAttribute.java    |   34 +-
 .../MaxNonCompetitiveBoostAttributeImpl.java       |   17 +-
 .../apache/lucene/search/MaxScoreAccumulator.java  |   14 +-
 .../org/apache/lucene/search/MaxScoreCache.java    |   30 +-
 .../lucene/search/MaxScoreSumPropagator.java       |   22 +-
 .../lucene/search/MinShouldMatchSumScorer.java     |  182 +-
 .../org/apache/lucene/search/MultiCollector.java   |   69 +-
 .../lucene/search/MultiCollectorManager.java       |   30 +-
 .../lucene/search/MultiLeafFieldComparator.java    |    7 +-
 .../org/apache/lucene/search/MultiPhraseQuery.java |  193 +-
 .../org/apache/lucene/search/MultiTermQuery.java   |  317 +-
 .../search/MultiTermQueryConstantScoreWrapper.java |   66 +-
 .../java/org/apache/lucene/search/Multiset.java    |   10 +-
 .../org/apache/lucene/search/NGramPhraseQuery.java |   32 +-
 .../org/apache/lucene/search/NamedMatches.java     |   32 +-
 .../lucene/search/NormsFieldExistsQuery.java       |   16 +-
 .../org/apache/lucene/search/PhraseMatcher.java    |   47 +-
 .../org/apache/lucene/search/PhrasePositions.java  |   41 +-
 .../java/org/apache/lucene/search/PhraseQuery.java |  244 +-
 .../java/org/apache/lucene/search/PhraseQueue.java |    5 +-
 .../org/apache/lucene/search/PhraseScorer.java     |    1 -
 .../org/apache/lucene/search/PhraseWeight.java     |  142 +-
 .../org/apache/lucene/search/PointInSetQuery.java  |  167 +-
 .../org/apache/lucene/search/PointRangeQuery.java  |  184 +-
 .../lucene/search/PositiveScoresOnlyCollector.java |   12 +-
 .../java/org/apache/lucene/search/PrefixQuery.java |   23 +-
 .../src/java/org/apache/lucene/search/Query.java   |  109 +-
 .../java/org/apache/lucene/search/QueryCache.java  |    9 +-
 .../apache/lucene/search/QueryCachingPolicy.java   |   23 +-
 .../org/apache/lucene/search/QueryRescorer.java    |  101 +-
 .../org/apache/lucene/search/QueryVisitor.java     |   35 +-
 .../lucene/search/QueueSizeBasedExecutor.java      |   10 +-
 .../org/apache/lucene/search/ReferenceManager.java |  215 +-
 .../java/org/apache/lucene/search/RegexpQuery.java |  117 +-
 .../apache/lucene/search/ReqExclBulkScorer.java    |    3 -
 .../org/apache/lucene/search/ReqExclScorer.java    |   33 +-
 .../org/apache/lucene/search/ReqOptSumScorer.java  |  269 +-
 .../java/org/apache/lucene/search/Rescorer.java    |   42 +-
 .../java/org/apache/lucene/search/Scorable.java    |   47 +-
 .../java/org/apache/lucene/search/ScoreAndDoc.java |    7 +-
 .../lucene/search/ScoreCachingWrappingScorer.java  |   17 +-
 .../java/org/apache/lucene/search/ScoreDoc.java    |   13 +-
 .../java/org/apache/lucene/search/ScoreMode.java   |   36 +-
 .../src/java/org/apache/lucene/search/Scorer.java  |   72 +-
 .../org/apache/lucene/search/ScorerSupplier.java   |   25 +-
 .../org/apache/lucene/search/ScoringRewrite.java   |  181 +-
 .../org/apache/lucene/search/SearcherFactory.java  |   35 +-
 .../lucene/search/SearcherLifetimeManager.java     |  168 +-
 .../org/apache/lucene/search/SearcherManager.java  |  153 +-
 .../org/apache/lucene/search/SegmentCacheable.java |   19 +-
 .../org/apache/lucene/search/SimpleCollector.java  |    3 -
 .../lucene/search/SimpleFieldComparator.java       |    5 +-
 .../org/apache/lucene/search/SliceExecutor.java    |   11 +-
 .../apache/lucene/search/SloppyPhraseMatcher.java  |  332 +-
 .../src/java/org/apache/lucene/search/Sort.java    |  146 +-
 .../java/org/apache/lucene/search/SortField.java   |  365 +-
 .../org/apache/lucene/search/SortRescorer.java     |   29 +-
 .../lucene/search/SortedNumericSelector.java       |   65 +-
 .../lucene/search/SortedNumericSortField.java      |  134 +-
 .../apache/lucene/search/SortedSetSelector.java    |   98 +-
 .../apache/lucene/search/SortedSetSortField.java   |   81 +-
 .../org/apache/lucene/search/SynonymQuery.java     |  161 +-
 .../lucene/search/TermCollectingRewrite.java       |   37 +-
 .../org/apache/lucene/search/TermInSetQuery.java   |   68 +-
 .../apache/lucene/search/TermMatchesIterator.java  |    9 +-
 .../java/org/apache/lucene/search/TermQuery.java   |   94 +-
 .../org/apache/lucene/search/TermRangeQuery.java   |  130 +-
 .../java/org/apache/lucene/search/TermScorer.java  |   19 +-
 .../org/apache/lucene/search/TermStatistics.java   |   92 +-
 .../lucene/search/TimeLimitingCollector.java       |  176 +-
 .../src/java/org/apache/lucene/search/TopDocs.java |  136 +-
 .../org/apache/lucene/search/TopDocsCollector.java |  123 +-
 .../apache/lucene/search/TopFieldCollector.java    |  214 +-
 .../org/apache/lucene/search/TopFieldDocs.java     |   24 +-
 .../apache/lucene/search/TopScoreDocCollector.java |  111 +-
 .../org/apache/lucene/search/TopTermsRewrite.java  |  244 +-
 .../lucene/search/TotalHitCountCollector.java      |    6 +-
 .../java/org/apache/lucene/search/TotalHits.java   |   32 +-
 .../org/apache/lucene/search/TwoPhaseIterator.java |   51 +-
 .../search/UsageTrackingQueryCachingPolicy.java    |   49 +-
 .../java/org/apache/lucene/search/WANDScorer.java  |  147 +-
 .../src/java/org/apache/lucene/search/Weight.java  |  177 +-
 .../org/apache/lucene/search/WildcardQuery.java    |   59 +-
 .../lucene/search/comparators/DocComparator.java   |   25 +-
 .../search/comparators/DoubleComparator.java       |   15 +-
 .../lucene/search/comparators/FloatComparator.java |   15 +-
 .../lucene/search/comparators/IntComparator.java   |   15 +-
 .../lucene/search/comparators/LongComparator.java  |   15 +-
 .../lucene/search/comparators/MinDocIterator.java  |    7 +-
 .../search/comparators/NumericComparator.java      |  171 +-
 .../lucene/search/comparators/package-info.java    |    5 +-
 .../org/apache/lucene/search/package-info.java     |  839 +++--
 .../lucene/search/similarities/AfterEffect.java    |   31 +-
 .../lucene/search/similarities/AfterEffectB.java   |   17 +-
 .../lucene/search/similarities/AfterEffectL.java   |    8 +-
 .../lucene/search/similarities/Axiomatic.java      |  160 +-
 .../lucene/search/similarities/AxiomaticF1EXP.java |   82 +-
 .../lucene/search/similarities/AxiomaticF1LOG.java |   76 +-
 .../lucene/search/similarities/AxiomaticF2EXP.java |   85 +-
 .../lucene/search/similarities/AxiomaticF2LOG.java |   80 +-
 .../lucene/search/similarities/AxiomaticF3EXP.java |   80 +-
 .../lucene/search/similarities/AxiomaticF3LOG.java |   73 +-
 .../lucene/search/similarities/BM25Similarity.java |  140 +-
 .../lucene/search/similarities/BasicModel.java     |   37 +-
 .../lucene/search/similarities/BasicModelG.java    |   34 +-
 .../lucene/search/similarities/BasicModelIF.java   |   23 +-
 .../lucene/search/similarities/BasicModelIn.java   |   18 +-
 .../lucene/search/similarities/BasicModelIne.java  |   27 +-
 .../lucene/search/similarities/BasicStats.java     |   32 +-
 .../search/similarities/BooleanSimilarity.java     |   18 +-
 .../search/similarities/ClassicSimilarity.java     |   23 +-
 .../lucene/search/similarities/DFISimilarity.java  |   85 +-
 .../lucene/search/similarities/DFRSimilarity.java  |  146 +-
 .../lucene/search/similarities/Distribution.java   |   30 +-
 .../lucene/search/similarities/DistributionLL.java |   10 +-
 .../search/similarities/DistributionSPL.java       |   19 +-
 .../lucene/search/similarities/IBSimilarity.java   |  149 +-
 .../lucene/search/similarities/Independence.java   |   21 +-
 .../similarities/IndependenceChiSquared.java       |   15 +-
 .../search/similarities/IndependenceSaturated.java |   13 +-
 .../similarities/IndependenceStandardized.java     |   16 +-
 .../search/similarities/LMDirichletSimilarity.java |   93 +-
 .../similarities/LMJelinekMercerSimilarity.java    |   70 +-
 .../lucene/search/similarities/LMSimilarity.java   |  101 +-
 .../apache/lucene/search/similarities/Lambda.java  |   21 +-
 .../lucene/search/similarities/LambdaDF.java       |   17 +-
 .../lucene/search/similarities/LambdaTTF.java      |   18 +-
 .../search/similarities/MultiSimilarity.java       |   25 +-
 .../lucene/search/similarities/Normalization.java  |   52 +-
 .../search/similarities/NormalizationH1.java       |   47 +-
 .../search/similarities/NormalizationH2.java       |   48 +-
 .../search/similarities/NormalizationH3.java       |   33 +-
 .../lucene/search/similarities/NormalizationZ.java |   28 +-
 .../similarities/PerFieldSimilarityWrapper.java    |   26 +-
 .../lucene/search/similarities/Similarity.java     |  205 +-
 .../lucene/search/similarities/SimilarityBase.java |  151 +-
 .../search/similarities/TFIDFSimilarity.java       |  485 ++-
 .../lucene/search/similarities/package-info.java   |  191 +-
 .../lucene/search/spans/ConjunctionSpans.java      |   24 +-
 .../apache/lucene/search/spans/ContainSpans.java   |   18 +-
 .../lucene/search/spans/FieldMaskingSpanQuery.java |   68 +-
 .../apache/lucene/search/spans/FilterSpans.java    |   57 +-
 .../lucene/search/spans/NearSpansOrdered.java      |   46 +-
 .../lucene/search/spans/NearSpansUnordered.java    |   40 +-
 .../apache/lucene/search/spans/SpanBoostQuery.java |   33 +-
 .../apache/lucene/search/spans/SpanCollector.java  |   16 +-
 .../lucene/search/spans/SpanContainQuery.java      |   35 +-
 .../lucene/search/spans/SpanContainingQuery.java   |   35 +-
 .../apache/lucene/search/spans/SpanFirstQuery.java |   27 +-
 .../search/spans/SpanMultiTermQueryWrapper.java    |  200 +-
 .../apache/lucene/search/spans/SpanNearQuery.java  |  131 +-
 .../apache/lucene/search/spans/SpanNotQuery.java   |  107 +-
 .../apache/lucene/search/spans/SpanOrQuery.java    |   59 +-
 .../search/spans/SpanPositionCheckQuery.java       |   63 +-
 .../lucene/search/spans/SpanPositionQueue.java     |    8 +-
 .../search/spans/SpanPositionRangeQuery.java       |   33 +-
 .../org/apache/lucene/search/spans/SpanQuery.java  |   11 +-
 .../org/apache/lucene/search/spans/SpanScorer.java |   39 +-
 .../apache/lucene/search/spans/SpanTermQuery.java  |   98 +-
 .../org/apache/lucene/search/spans/SpanWeight.java |  242 +-
 .../lucene/search/spans/SpanWithinQuery.java       |   36 +-
 .../java/org/apache/lucene/search/spans/Spans.java |   62 +-
 .../org/apache/lucene/search/spans/TermSpans.java  |   28 +-
 .../apache/lucene/search/spans/package-info.java   |   97 +-
 .../lucene/store/AlreadyClosedException.java       |    7 +-
 .../org/apache/lucene/store/BaseDirectory.java     |    9 +-
 .../org/apache/lucene/store/BufferedChecksum.java  |   18 +-
 .../lucene/store/BufferedChecksumIndexInput.java   |    9 +-
 .../apache/lucene/store/BufferedIndexInput.java    |  146 +-
 .../apache/lucene/store/ByteArrayDataInput.java    |   41 +-
 .../apache/lucene/store/ByteArrayDataOutput.java   |   11 +-
 .../org/apache/lucene/store/ByteBufferGuard.java   |   61 +-
 .../apache/lucene/store/ByteBufferIndexInput.java  |  162 +-
 .../apache/lucene/store/ByteBuffersDataInput.java  |   93 +-
 .../apache/lucene/store/ByteBuffersDataOutput.java |  232 +-
 .../apache/lucene/store/ByteBuffersDirectory.java  |  141 +-
 .../apache/lucene/store/ByteBuffersIndexInput.java |   27 +-
 .../lucene/store/ByteBuffersIndexOutput.java       |   25 +-
 .../apache/lucene/store/ChecksumIndexInput.java    |   25 +-
 .../java/org/apache/lucene/store/DataInput.java    |  153 +-
 .../java/org/apache/lucene/store/DataOutput.java   |  174 +-
 .../java/org/apache/lucene/store/Directory.java    |  106 +-
 .../java/org/apache/lucene/store/FSDirectory.java  |  260 +-
 .../org/apache/lucene/store/FSLockFactory.java     |   26 +-
 .../apache/lucene/store/FileSwitchDirectory.java   |   60 +-
 .../org/apache/lucene/store/FilterDirectory.java   |   35 +-
 .../java/org/apache/lucene/store/FlushInfo.java    |   45 +-
 .../java/org/apache/lucene/store/IOContext.java    |   78 +-
 .../java/org/apache/lucene/store/IndexInput.java   |  101 +-
 .../java/org/apache/lucene/store/IndexOutput.java  |   26 +-
 .../apache/lucene/store/InputStreamDataInput.java  |   13 +-
 .../src/java/org/apache/lucene/store/Lock.java     |   42 +-
 .../java/org/apache/lucene/store/LockFactory.java  |   35 +-
 .../lucene/store/LockObtainFailedException.java    |    9 +-
 .../lucene/store/LockReleaseFailedException.java   |    6 +-
 .../org/apache/lucene/store/LockStressTest.java    |   78 +-
 .../store/LockValidatingDirectoryWrapper.java      |   10 +-
 .../org/apache/lucene/store/LockVerifyServer.java  |  138 +-
 .../org/apache/lucene/store/MMapDirectory.java     |  355 +-
 .../java/org/apache/lucene/store/MergeInfo.java    |   64 +-
 .../org/apache/lucene/store/NIOFSDirectory.java    |  127 +-
 .../apache/lucene/store/NRTCachingDirectory.java   |  107 +-
 .../apache/lucene/store/NativeFSLockFactory.java   |   99 +-
 .../org/apache/lucene/store/NoLockFactory.java     |   18 +-
 .../lucene/store/OutputStreamDataOutput.java       |   11 +-
 .../lucene/store/OutputStreamIndexOutput.java      |   13 +-
 .../org/apache/lucene/store/RandomAccessInput.java |   22 +-
 .../lucene/store/RateLimitedIndexOutput.java       |   20 +-
 .../java/org/apache/lucene/store/RateLimiter.java  |   78 +-
 .../apache/lucene/store/SimpleFSLockFactory.java   |   75 +-
 .../lucene/store/SingleInstanceLockFactory.java    |   13 +-
 .../apache/lucene/store/SleepingLockWrapper.java   |   60 +-
 .../lucene/store/TrackingDirectoryWrapper.java     |    7 +-
 .../apache/lucene/store/VerifyingLockFactory.java  |   17 +-
 .../java/org/apache/lucene/store/package-info.java |    4 +-
 .../java/org/apache/lucene/util/Accountable.java   |   14 +-
 .../java/org/apache/lucene/util/Accountables.java  |  115 +-
 .../lucene/util/ArrayInPlaceMergeSorter.java       |    3 +-
 .../org/apache/lucene/util/ArrayIntroSorter.java   |    3 +-
 .../org/apache/lucene/util/ArrayTimSorter.java     |    3 +-
 .../src/java/org/apache/lucene/util/ArrayUtil.java |  400 ++-
 .../src/java/org/apache/lucene/util/Attribute.java |    8 +-
 .../org/apache/lucene/util/AttributeFactory.java   |  122 +-
 .../java/org/apache/lucene/util/AttributeImpl.java |   89 +-
 .../org/apache/lucene/util/AttributeReflector.java |   11 +-
 .../org/apache/lucene/util/AttributeSource.java    |  354 +-
 .../java/org/apache/lucene/util/BitDocIdSet.java   |   14 +-
 .../src/java/org/apache/lucene/util/BitSet.java    |   45 +-
 .../org/apache/lucene/util/BitSetIterator.java     |   13 +-
 .../src/java/org/apache/lucene/util/BitUtil.java   |  107 +-
 .../core/src/java/org/apache/lucene/util/Bits.java |   29 +-
 .../java/org/apache/lucene/util/ByteBlockPool.java |  251 +-
 .../src/java/org/apache/lucene/util/BytesRef.java  |  137 +-
 .../java/org/apache/lucene/util/BytesRefArray.java |  106 +-
 .../org/apache/lucene/util/BytesRefBuilder.java    |   62 +-
 .../org/apache/lucene/util/BytesRefComparator.java |   27 +-
 .../java/org/apache/lucene/util/BytesRefHash.java  |  245 +-
 .../org/apache/lucene/util/BytesRefIterator.java   |   22 +-
 .../src/java/org/apache/lucene/util/CharsRef.java  |  115 +-
 .../org/apache/lucene/util/CharsRefBuilder.java    |   35 +-
 .../org/apache/lucene/util/ClassLoaderUtils.java   |   13 +-
 .../lucene/util/ClasspathResourceLoader.java       |   44 +-
 .../apache/lucene/util/CloseableThreadLocal.java   |   56 +-
 .../org/apache/lucene/util/CollectionUtil.java     |   54 +-
 .../org/apache/lucene/util/CommandLineUtil.java    |   75 +-
 .../src/java/org/apache/lucene/util/Constants.java |   45 +-
 .../src/java/org/apache/lucene/util/Counter.java   |   29 +-
 .../org/apache/lucene/util/DocIdSetBuilder.java    |   59 +-
 .../org/apache/lucene/util/FilterIterator.java     |   20 +-
 .../java/org/apache/lucene/util/FixedBitSet.java   |  205 +-
 .../src/java/org/apache/lucene/util/FixedBits.java |    9 +-
 .../lucene/util/FixedLengthBytesRefArray.java      |   65 +-
 .../lucene/util/FrequencyTrackingRingBuffer.java   |   57 +-
 .../java/org/apache/lucene/util/IOSupplier.java    |    3 +-
 .../src/java/org/apache/lucene/util/IOUtils.java   |  356 +-
 .../org/apache/lucene/util/InPlaceMergeSorter.java |   12 +-
 .../java/org/apache/lucene/util/InfoStream.java    |   54 +-
 .../org/apache/lucene/util/IntArrayDocIdSet.java   |   11 +-
 .../java/org/apache/lucene/util/IntBlockPool.java  |  218 +-
 .../java/org/apache/lucene/util/IntroSelector.java |   53 +-
 .../java/org/apache/lucene/util/IntroSorter.java   |   13 +-
 .../src/java/org/apache/lucene/util/IntsRef.java   |   89 +-
 .../org/apache/lucene/util/IntsRefBuilder.java     |   25 +-
 .../org/apache/lucene/util/LSBRadixSorter.java     |   12 +-
 .../java/org/apache/lucene/util/LongBitSet.java    |  197 +-
 .../src/java/org/apache/lucene/util/LongHeap.java  |   73 +-
 .../java/org/apache/lucene/util/LongValues.java    |   38 +-
 .../src/java/org/apache/lucene/util/LongsRef.java  |   92 +-
 .../org/apache/lucene/util/MSBRadixSorter.java     |   62 +-
 .../src/java/org/apache/lucene/util/MapOfSets.java |   30 +-
 .../src/java/org/apache/lucene/util/MathUtil.java  |   83 +-
 .../org/apache/lucene/util/MergedIterator.java     |   60 +-
 .../org/apache/lucene/util/NamedSPILoader.java     |   75 +-
 .../org/apache/lucene/util/NamedThreadFactory.java |   33 +-
 .../java/org/apache/lucene/util/NotDocIdSet.java   |   13 +-
 .../java/org/apache/lucene/util/NumericUtils.java  |  157 +-
 .../java/org/apache/lucene/util/OfflineSorter.java |  306 +-
 .../java/org/apache/lucene/util/PagedBytes.java    |  127 +-
 .../apache/lucene/util/PrintStreamInfoStream.java  |   29 +-
 .../java/org/apache/lucene/util/PriorityQueue.java |  125 +-
 .../java/org/apache/lucene/util/QueryBuilder.java  |  347 +-
 .../java/org/apache/lucene/util/RadixSelector.java |   63 +-
 .../org/apache/lucene/util/RamUsageEstimator.java  |  297 +-
 .../lucene/util/RecyclingByteBlockAllocator.java   |   68 +-
 .../lucene/util/RecyclingIntBlockAllocator.java    |   70 +-
 .../src/java/org/apache/lucene/util/RefCount.java  |   37 +-
 .../org/apache/lucene/util/ResourceLoader.java     |   24 +-
 .../apache/lucene/util/ResourceLoaderAware.java    |   10 +-
 .../org/apache/lucene/util/RoaringDocIdSet.java    |   41 +-
 .../java/org/apache/lucene/util/RollingBuffer.java |   63 +-
 .../lucene/util/SameThreadExecutorService.java     |    6 +-
 .../src/java/org/apache/lucene/util/Selector.java  |   17 +-
 .../org/apache/lucene/util/SentinelIntSet.java     |   74 +-
 .../src/java/org/apache/lucene/util/SetOnce.java   |   29 +-
 .../java/org/apache/lucene/util/SloppyMath.java    |  226 +-
 .../java/org/apache/lucene/util/SmallFloat.java    |   76 +-
 .../src/java/org/apache/lucene/util/Sorter.java    |   55 +-
 .../org/apache/lucene/util/SparseFixedBitSet.java  |   58 +-
 .../apache/lucene/util/StrictStringTokenizer.java  |   13 +-
 .../java/org/apache/lucene/util/StringHelper.java  |  171 +-
 .../apache/lucene/util/StringMSBRadixSorter.java   |    3 +-
 .../org/apache/lucene/util/SuppressForbidden.java  |    6 +-
 .../lucene/util/ThreadInterruptedException.java    |    6 +-
 .../src/java/org/apache/lucene/util/TimSorter.java |   67 +-
 .../java/org/apache/lucene/util/ToStringUtils.java |   11 +-
 .../java/org/apache/lucene/util/UnicodeUtil.java   |  419 +--
 .../java/org/apache/lucene/util/VectorUtil.java    |   27 +-
 .../src/java/org/apache/lucene/util/Version.java   |  196 +-
 .../java/org/apache/lucene/util/VirtualMethod.java |  119 +-
 .../org/apache/lucene/util/WeakIdentityMap.java    |  164 +-
 .../org/apache/lucene/util/automaton/Automata.java |  215 +-
 .../apache/lucene/util/automaton/Automaton.java    |  709 ++--
 .../lucene/util/automaton/AutomatonProvider.java   |   15 +-
 .../lucene/util/automaton/ByteRunAutomaton.java    |   10 +-
 .../util/automaton/CharacterRunAutomaton.java      |   30 +-
 .../lucene/util/automaton/CompiledAutomaton.java   |  239 +-
 .../automaton/DaciukMihovAutomatonBuilder.java     |  224 +-
 .../util/automaton/FiniteStringsIterator.java      |   73 +-
 .../org/apache/lucene/util/automaton/IntSet.java   |   46 +-
 .../util/automaton/Lev1ParametricDescription.java  |   75 +-
 .../util/automaton/Lev1TParametricDescription.java |   82 +-
 .../util/automaton/Lev2ParametricDescription.java  |  225 +-
 .../util/automaton/Lev2TParametricDescription.java |  296 +-
 .../lucene/util/automaton/LevenshteinAutomata.java |  252 +-
 .../automaton/LimitedFiniteStringsIterator.java    |   26 +-
 .../util/automaton/MinimizationOperations.java     |  111 +-
 .../apache/lucene/util/automaton/Operations.java   |  507 +--
 .../org/apache/lucene/util/automaton/RegExp.java   |  629 ++--
 .../apache/lucene/util/automaton/RunAutomaton.java |   84 +-
 .../apache/lucene/util/automaton/SortedIntSet.java |   56 +-
 .../apache/lucene/util/automaton/StatePair.java    |   28 +-
 .../TooComplexToDeterminizeException.java          |   33 +-
 .../apache/lucene/util/automaton/Transition.java   |   20 +-
 .../apache/lucene/util/automaton/UTF32ToUTF8.java  |  132 +-
 .../apache/lucene/util/automaton/package-info.java |   28 +-
 .../java/org/apache/lucene/util/bkd/BKDConfig.java |   36 +-
 .../apache/lucene/util/bkd/BKDRadixSelector.java   |  331 +-
 .../java/org/apache/lucene/util/bkd/BKDReader.java |  465 ++-
 .../java/org/apache/lucene/util/bkd/BKDWriter.java | 1461 +++++---
 .../org/apache/lucene/util/bkd/DocIdsWriter.java   |   31 +-
 .../apache/lucene/util/bkd/HeapPointReader.java    |   23 +-
 .../apache/lucene/util/bkd/HeapPointWriter.java    |   53 +-
 .../lucene/util/bkd/MutablePointsReaderUtils.java  |  108 +-
 .../apache/lucene/util/bkd/OfflinePointReader.java |   59 +-
 .../apache/lucene/util/bkd/OfflinePointWriter.java |   43 +-
 .../org/apache/lucene/util/bkd/PointReader.java    |   11 +-
 .../org/apache/lucene/util/bkd/PointValue.java     |    9 +-
 .../org/apache/lucene/util/bkd/PointWriter.java    |   13 +-
 .../org/apache/lucene/util/bkd/package-info.java   |    5 +-
 .../java/org/apache/lucene/util/compress/LZ4.java  |  112 +-
 .../util/compress/LowercaseAsciiCompression.java   |   36 +-
 .../apache/lucene/util/compress/package-info.java  |    6 +-
 .../org/apache/lucene/util/fst/BitTableUtil.java   |   76 +-
 .../lucene/util/fst/ByteSequenceOutputs.java       |   22 +-
 .../apache/lucene/util/fst/BytesRefFSTEnum.java    |   37 +-
 .../org/apache/lucene/util/fst/BytesStore.java     |  112 +-
 .../lucene/util/fst/CharSequenceOutputs.java       |   32 +-
 .../src/java/org/apache/lucene/util/fst/FST.java   |  502 +--
 .../org/apache/lucene/util/fst/FSTCompiler.java    |  330 +-
 .../java/org/apache/lucene/util/fst/FSTEnum.java   |  214 +-
 .../java/org/apache/lucene/util/fst/FSTStore.java  |   14 +-
 .../apache/lucene/util/fst/ForwardBytesReader.java |    1 -
 .../apache/lucene/util/fst/IntSequenceOutputs.java |   34 +-
 .../org/apache/lucene/util/fst/IntsRefFSTEnum.java |   39 +-
 .../java/org/apache/lucene/util/fst/NoOutputs.java |   43 +-
 .../java/org/apache/lucene/util/fst/NodeHash.java  |   64 +-
 .../apache/lucene/util/fst/OffHeapFSTStore.java    |   77 +-
 .../org/apache/lucene/util/fst/OnHeapFSTStore.java |  115 +-
 .../java/org/apache/lucene/util/fst/Outputs.java   |   55 +-
 .../org/apache/lucene/util/fst/PairOutputs.java    |   64 +-
 .../apache/lucene/util/fst/PositiveIntOutputs.java |   16 +-
 .../apache/lucene/util/fst/ReverseBytesReader.java |    6 +-
 .../lucene/util/fst/ReverseRandomAccessReader.java |   76 +-
 .../src/java/org/apache/lucene/util/fst/Util.java  |  430 ++-
 .../org/apache/lucene/util/fst/package-info.java   |   41 +-
 .../util/graph/GraphTokenStreamFiniteStrings.java  |   63 +-
 .../org/apache/lucene/util/graph/package-info.java |    4 +-
 .../org/apache/lucene/util/hnsw/BoundsChecker.java |   84 +-
 .../org/apache/lucene/util/hnsw/HnswGraph.java     |   60 +-
 .../apache/lucene/util/hnsw/HnswGraphBuilder.java  |   97 +-
 .../org/apache/lucene/util/hnsw/NeighborArray.java |   11 +-
 .../org/apache/lucene/util/hnsw/NeighborQueue.java |   35 +-
 .../org/apache/lucene/util/hnsw/package-info.java  |    4 +-
 .../apache/lucene/util/mutable/MutableValue.java   |   10 +-
 .../lucene/util/mutable/MutableValueBool.java      |   12 +-
 .../lucene/util/mutable/MutableValueDate.java      |    3 +-
 .../lucene/util/mutable/MutableValueDouble.java    |   13 +-
 .../lucene/util/mutable/MutableValueFloat.java     |   11 +-
 .../lucene/util/mutable/MutableValueInt.java       |   20 +-
 .../lucene/util/mutable/MutableValueLong.java      |   18 +-
 .../lucene/util/mutable/MutableValueStr.java       |   12 +-
 .../apache/lucene/util/mutable/package-info.java   |    6 +-
 .../java/org/apache/lucene/util/package-info.java  |    4 +-
 .../util/packed/AbstractBlockPackedWriter.java     |   19 +-
 .../lucene/util/packed/AbstractPagedMutable.java   |   14 +-
 .../lucene/util/packed/BlockPackedReader.java      |   29 +-
 .../util/packed/BlockPackedReaderIterator.java     |   31 +-
 .../lucene/util/packed/BlockPackedWriter.java      |   56 +-
 .../apache/lucene/util/packed/BulkOperation.java   |  243 +-
 .../lucene/util/packed/BulkOperationPacked.java    |   51 +-
 .../lucene/util/packed/BulkOperationPacked1.java   |   17 +-
 .../lucene/util/packed/BulkOperationPacked10.java  |   17 +-
 .../lucene/util/packed/BulkOperationPacked11.java  |   17 +-
 .../lucene/util/packed/BulkOperationPacked12.java  |   17 +-
 .../lucene/util/packed/BulkOperationPacked13.java  |   17 +-
 .../lucene/util/packed/BulkOperationPacked14.java  |   17 +-
 .../lucene/util/packed/BulkOperationPacked15.java  |   17 +-
 .../lucene/util/packed/BulkOperationPacked16.java  |   23 +-
 .../lucene/util/packed/BulkOperationPacked17.java  |   17 +-
 .../lucene/util/packed/BulkOperationPacked18.java  |   17 +-
 .../lucene/util/packed/BulkOperationPacked19.java  |   23 +-
 .../lucene/util/packed/BulkOperationPacked2.java   |   17 +-
 .../lucene/util/packed/BulkOperationPacked20.java  |   17 +-
 .../lucene/util/packed/BulkOperationPacked21.java  |   29 +-
 .../lucene/util/packed/BulkOperationPacked22.java  |   17 +-
 .../lucene/util/packed/BulkOperationPacked23.java  |   35 +-
 .../lucene/util/packed/BulkOperationPacked24.java  |   17 +-
 .../lucene/util/packed/BulkOperationPacked3.java   |   17 +-
 .../lucene/util/packed/BulkOperationPacked4.java   |   17 +-
 .../lucene/util/packed/BulkOperationPacked5.java   |   17 +-
 .../lucene/util/packed/BulkOperationPacked6.java   |   17 +-
 .../lucene/util/packed/BulkOperationPacked7.java   |   17 +-
 .../lucene/util/packed/BulkOperationPacked8.java   |   17 +-
 .../lucene/util/packed/BulkOperationPacked9.java   |   17 +-
 .../packed/BulkOperationPackedSingleBlock.java     |   43 +-
 .../lucene/util/packed/DeltaPackedLongValues.java  |   18 +-
 .../lucene/util/packed/DirectMonotonicReader.java  |   68 +-
 .../lucene/util/packed/DirectMonotonicWriter.java  |   58 +-
 .../packed/DirectPacked64SingleBlockReader.java    |    5 +-
 .../lucene/util/packed/DirectPackedReader.java     |   21 +-
 .../apache/lucene/util/packed/DirectReader.java    |  157 +-
 .../apache/lucene/util/packed/DirectWriter.java    |   69 +-
 .../apache/lucene/util/packed/GrowableWriter.java  |   31 +-
 .../util/packed/MonotonicBlockPackedReader.java    |   40 +-
 .../util/packed/MonotonicBlockPackedWriter.java    |   47 +-
 .../lucene/util/packed/MonotonicLongValues.java    |   32 +-
 .../org/apache/lucene/util/packed/Packed64.java    |  112 +-
 .../lucene/util/packed/Packed64SingleBlock.java    |   56 +-
 .../apache/lucene/util/packed/PackedDataInput.java |   21 +-
 .../lucene/util/packed/PackedDataOutput.java       |   24 +-
 .../org/apache/lucene/util/packed/PackedInts.java  |  661 ++--
 .../lucene/util/packed/PackedLongValues.java       |   57 +-
 .../lucene/util/packed/PackedReaderIterator.java   |   13 +-
 .../apache/lucene/util/packed/PackedWriter.java    |    7 +-
 .../lucene/util/packed/PagedGrowableWriter.java    |   26 +-
 .../apache/lucene/util/packed/PagedMutable.java    |   14 +-
 .../apache/lucene/util/packed/gen_BulkOperation.py |    2 +
 .../lucene/util/packed/gen_Packed64SingleBlock.py  |    3 +
 .../apache/lucene/util/packed/package-info.java    |  143 +-
 .../src/test/org/apache/lucene/TestAssertions.java |   17 +-
 .../core/src/test/org/apache/lucene/TestDemo.java  |   10 +-
 .../test/org/apache/lucene/TestExternalCodecs.java |   50 +-
 .../apache/lucene/TestMergeSchedulerExternal.java  |   29 +-
 .../src/test/org/apache/lucene/TestSearch.java     |  141 +-
 .../org/apache/lucene/TestSearchForDuplicates.java |  163 +-
 .../lucene/analysis/FakeCharFilterFactory.java     |    4 +-
 .../lucene/analysis/FakeTokenFilterFactory.java    |    4 +-
 .../analysis/TestAbstractAnalysisFactory.java      |    2 +-
 .../lucene/analysis/TestAnalysisSPILoader.java     |  188 +-
 .../lucene/analysis/TestAnalyzerWrapper.java       |   49 +-
 .../lucene/analysis/TestCachingTokenFilter.java    |   93 +-
 .../apache/lucene/analysis/TestCharArrayMap.java   |  112 +-
 .../apache/lucene/analysis/TestCharArraySet.java   |  239 +-
 .../org/apache/lucene/analysis/TestCharFilter.java |    8 +-
 .../apache/lucene/analysis/TestCharacterUtils.java |   40 +-
 .../analysis/TestDelegatingAnalyzerWrapper.java    |   85 +-
 .../lucene/analysis/TestGraphTokenFilter.java      |   60 +-
 .../lucene/analysis/TestGraphTokenizers.java       |  415 +--
 .../lucene/analysis/TestReusableStringReader.java  |    8 +-
 .../org/apache/lucene/analysis/TestStopFilter.java |  104 +-
 .../test/org/apache/lucene/analysis/TestToken.java |  114 +-
 .../apache/lucene/analysis/TestWordlistLoader.java |   39 +-
 .../lucene/codecs/TestCodecLoadingDeadlock.java    |  167 +-
 .../org/apache/lucene/codecs/TestCodecUtil.java    |  275 +-
 .../codecs/TestCompetitiveFreqNormAccumulator.java |   11 +-
 .../compressing/AbstractTestCompressionMode.java   |   40 +-
 .../compressing/TestFastCompressionMode.java       |    1 -
 .../compressing/TestFastDecompressionMode.java     |    1 -
 .../compressing/TestHighCompressionMode.java       |    2 -
 .../lucene50/TestLucene50CompoundFormat.java       |    3 +-
 .../lucene50/TestLucene50LiveDocsFormat.java       |    1 -
 .../lucene50/TestLucene50TermVectorsFormat.java    |    1 -
 .../lucene50/TestLucene60FieldInfoFormat.java      |    5 +-
 .../BaseLucene80DocValuesFormatTestCase.java       |  176 +-
 ...TestBestCompressionLucene80DocValuesFormat.java |   12 +-
 .../TestBestSpeedLucene80DocValuesFormat.java      |    9 +-
 .../lucene/codecs/lucene80/TestIndexedDISI.java    |  236 +-
 .../codecs/lucene80/TestLucene80NormsFormat.java   |    7 +-
 .../TestLucene80NormsFormatMergeInstance.java      |    5 +-
 .../lucene/codecs/lucene84/TestForDeltaUtil.java   |   16 +-
 .../apache/lucene/codecs/lucene84/TestForUtil.java |   15 +-
 .../lucene84/TestLucene84PostingsFormat.java       |   23 +-
 .../lucene/codecs/lucene84/TestPForUtil.java       |   15 +-
 .../codecs/lucene86/TestLucene86PointsFormat.java  |  240 +-
 .../lucene86/TestLucene86SegmentInfoFormat.java    |    3 +-
 ...tLucene87StoredFieldsFormatHighCompression.java |   31 +-
 ...estLucene87StoredFieldsFormatMergeInstance.java |    5 +-
 .../perfield/TestPerFieldDocValuesFormat.java      |  104 +-
 .../perfield/TestPerFieldPostingsFormat.java       |    6 +-
 .../perfield/TestPerFieldPostingsFormat2.java      |  154 +-
 .../lucene/document/BaseLatLonShapeTestCase.java   |  144 +-
 .../lucene/document/BaseShapeEncodingTestCase.java |  169 +-
 .../apache/lucene/document/BaseShapeTestCase.java  |  275 +-
 .../lucene/document/BaseXYShapeTestCase.java       |   47 +-
 .../apache/lucene/document/TestBinaryDocument.java |   37 +-
 .../org/apache/lucene/document/TestDateTools.java  |  104 +-
 .../org/apache/lucene/document/TestDocument.java   |  164 +-
 .../apache/lucene/document/TestDoubleRange.java    |    6 +-
 .../lucene/document/TestFeatureDoubleValues.java   |  109 +-
 .../apache/lucene/document/TestFeatureField.java   |   77 +-
 .../apache/lucene/document/TestFeatureSort.java    |   48 +-
 .../test/org/apache/lucene/document/TestField.java |  251 +-
 .../org/apache/lucene/document/TestFieldType.java  |   40 +-
 .../org/apache/lucene/document/TestFloatRange.java |    8 +-
 .../lucene/document/TestInetAddressPoint.java      |  177 +-
 .../org/apache/lucene/document/TestIntRange.java   |    2 +-
 .../lucene/document/TestLatLonDocValuesField.java  |   10 +-
 .../document/TestLatLonLineShapeQueries.java       |   13 +-
 .../document/TestLatLonMultiLineShapeQueries.java  |   11 +-
 .../document/TestLatLonMultiPointShapeQueries.java |   11 +-
 .../TestLatLonMultiPolygonShapeQueries.java        |   16 +-
 .../apache/lucene/document/TestLatLonPoint.java    |   21 +-
 .../TestLatLonPointDistanceFeatureQuery.java       |  226 +-
 .../document/TestLatLonPointDistanceSort.java      |   87 +-
 .../document/TestLatLonPointShapeQueries.java      |   21 +-
 .../document/TestLatLonPolygonShapeQueries.java    |    5 +-
 .../apache/lucene/document/TestLatLonShape.java    |  469 +--
 .../lucene/document/TestLatLonShapeEncoding.java   |    2 +-
 .../document/TestLongDistanceFeatureQuery.java     |  117 +-
 .../org/apache/lucene/document/TestLongRange.java  |    2 +-
 .../lucene/document/TestXYLineShapeQueries.java    |   19 +-
 .../document/TestXYMultiLineShapeQueries.java      |   14 +-
 .../document/TestXYMultiPointShapeQueries.java     |    9 +-
 .../document/TestXYMultiPolygonShapeQueries.java   |   15 +-
 .../lucene/document/TestXYPointShapeQueries.java   |   23 +-
 .../lucene/document/TestXYPolygonShapeQueries.java |   13 +-
 .../org/apache/lucene/document/TestXYShape.java    |   54 +-
 .../lucene/document/TestXYShapeEncoding.java       |    2 +-
 .../src/test/org/apache/lucene/geo/TestCircle.java |   48 +-
 .../test/org/apache/lucene/geo/TestCircle2D.java   |   46 +-
 .../apache/lucene/geo/TestGeoEncodingUtils.java    |   75 +-
 .../test/org/apache/lucene/geo/TestGeoUtils.java   |  111 +-
 .../src/test/org/apache/lucene/geo/TestLine2D.java |   36 +-
 .../src/test/org/apache/lucene/geo/TestPoint.java  |   26 +-
 .../test/org/apache/lucene/geo/TestPoint2D.java    |   74 +-
 .../test/org/apache/lucene/geo/TestPolygon.java    |  142 +-
 .../test/org/apache/lucene/geo/TestPolygon2D.java  |  236 +-
 .../org/apache/lucene/geo/TestRectangle2D.java     |   51 +-
 .../lucene/geo/TestSimpleWKTShapeParsing.java      |   83 +-
 .../org/apache/lucene/geo/TestTessellator.java     |  857 +++--
 .../test/org/apache/lucene/geo/TestXYCircle.java   |   86 +-
 .../src/test/org/apache/lucene/geo/TestXYLine.java |   77 +-
 .../test/org/apache/lucene/geo/TestXYPoint.java    |   62 +-
 .../test/org/apache/lucene/geo/TestXYPolygon.java  |   88 +-
 .../org/apache/lucene/geo/TestXYRectangle.java     |  125 +-
 .../apache/lucene/index/Test2BBinaryDocValues.java |   87 +-
 .../test/org/apache/lucene/index/Test2BDocs.java   |   59 +-
 .../lucene/index/Test2BNumericDocValues.java       |   42 +-
 .../test/org/apache/lucene/index/Test2BPoints.java |   69 +-
 .../org/apache/lucene/index/Test2BPositions.java   |   41 +-
 .../org/apache/lucene/index/Test2BPostings.java    |   35 +-
 .../apache/lucene/index/Test2BPostingsBytes.java   |   72 +-
 .../index/Test2BSortedDocValuesFixedSorted.java    |   48 +-
 .../lucene/index/Test2BSortedDocValuesOrds.java    |   50 +-
 .../test/org/apache/lucene/index/Test2BTerms.java  |   74 +-
 .../apache/lucene/index/Test4GBStoredFields.java   |   30 +-
 .../org/apache/lucene/index/TestAddIndexes.java    |  670 ++--
 .../lucene/index/TestAllFilesCheckIndexHeader.java |   58 +-
 .../lucene/index/TestAllFilesDetectBitFlips.java   |   44 +-
 .../lucene/index/TestAllFilesDetectTruncation.java |   52 +-
 .../index/TestAllFilesHaveChecksumFooter.java      |   21 +-
 .../lucene/index/TestAllFilesHaveCodecHeader.java  |   35 +-
 .../org/apache/lucene/index/TestAtomicUpdate.java  |   24 +-
 .../apache/lucene/index/TestBagOfPositions.java    |   69 +-
 .../org/apache/lucene/index/TestBagOfPostings.java |   82 +-
 .../lucene/index/TestBinaryDocValuesUpdates.java   |  587 +--
 .../org/apache/lucene/index/TestBinaryTerms.java   |   22 +-
 .../apache/lucene/index/TestBufferedUpdates.java   |    4 +-
 .../org/apache/lucene/index/TestByteSlices.java    |   31 +-
 .../org/apache/lucene/index/TestCheckIndex.java    |    8 +-
 .../lucene/index/TestCodecHoldsOpenFiles.java      |    5 +-
 .../test/org/apache/lucene/index/TestCodecs.java   |  329 +-
 .../lucene/index/TestConcurrentMergeScheduler.java |  442 +--
 .../lucene/index/TestConsistentFieldNumbers.java   |  145 +-
 .../test/org/apache/lucene/index/TestCrash.java    |   25 +-
 .../lucene/index/TestCrashCausesCorruptIndex.java  |   86 +-
 .../org/apache/lucene/index/TestCustomNorms.java   |   15 +-
 .../apache/lucene/index/TestCustomTermFreq.java    |  265 +-
 .../apache/lucene/index/TestDeletionPolicy.java    |  301 +-
 .../lucene/index/TestDemoParallelLeafReader.java   |  650 ++--
 .../apache/lucene/index/TestDirectoryReader.java   |  527 +--
 .../lucene/index/TestDirectoryReaderReopen.java    |  702 ++--
 .../src/test/org/apache/lucene/index/TestDoc.java  |  127 +-
 .../test/org/apache/lucene/index/TestDocCount.java |   16 +-
 .../org/apache/lucene/index/TestDocIDMerger.java   |   59 +-
 .../index/TestDocInverterPerFieldErrorInfo.java    |   36 +-
 .../org/apache/lucene/index/TestDocValues.java     |  259 +-
 .../lucene/index/TestDocValuesFieldUpdates.java    |   16 +-
 .../apache/lucene/index/TestDocValuesIndexing.java |  300 +-
 .../apache/lucene/index/TestDocsAndPositions.java  |  148 +-
 .../apache/lucene/index/TestDocsWithFieldSet.java  |    2 -
 .../apache/lucene/index/TestDocumentWriter.java    |  190 +-
 .../index/TestDocumentsWriterDeleteQueue.java      |   55 +-
 .../index/TestDocumentsWriterPerThreadPool.java    |   53 +-
 .../index/TestDocumentsWriterStallControl.java     |  188 +-
 .../org/apache/lucene/index/TestDuelingCodecs.java |   46 +-
 .../lucene/index/TestDuelingCodecsAtNight.java     |    7 +-
 .../lucene/index/TestExceedMaxTermLength.java      |   70 +-
 .../lucene/index/TestExitableDirectoryReader.java  |  153 +-
 .../org/apache/lucene/index/TestFieldInfos.java    |   92 +-
 .../apache/lucene/index/TestFieldInvertState.java  |   40 +-
 .../org/apache/lucene/index/TestFieldReuse.java    |   42 +-
 .../lucene/index/TestFieldUpdatesBuffer.java       |   70 +-
 .../org/apache/lucene/index/TestFieldsReader.java  |   33 +-
 .../apache/lucene/index/TestFilterCodecReader.java |   21 +-
 .../lucene/index/TestFilterDirectoryReader.java    |   13 +-
 .../apache/lucene/index/TestFilterLeafReader.java  |   63 +-
 .../apache/lucene/index/TestFilterMergePolicy.java |    3 +-
 .../src/test/org/apache/lucene/index/TestFlex.java |   26 +-
 .../lucene/index/TestFlushByRamOrCountsPolicy.java |   89 +-
 .../apache/lucene/index/TestForTooMuchCloning.java |   42 +-
 .../apache/lucene/index/TestForceMergeForever.java |   35 +-
 .../lucene/index/TestFrozenBufferedUpdates.java    |   13 +-
 .../org/apache/lucene/index/TestIndexCommit.java   |  106 +-
 .../apache/lucene/index/TestIndexFileDeleter.java  |  282 +-
 .../org/apache/lucene/index/TestIndexInput.java    |  265 +-
 .../lucene/index/TestIndexManyDocuments.java       |   38 +-
 .../org/apache/lucene/index/TestIndexOptions.java  |   53 +-
 .../apache/lucene/index/TestIndexReaderClose.java  |   92 +-
 .../org/apache/lucene/index/TestIndexSorting.java  |  303 +-
 .../apache/lucene/index/TestIndexTooManyDocs.java  |  105 +-
 .../org/apache/lucene/index/TestIndexWriter.java   | 2219 +++++++-----
 .../apache/lucene/index/TestIndexWriterCommit.java |  412 ++-
 .../apache/lucene/index/TestIndexWriterConfig.java |  145 +-
 .../apache/lucene/index/TestIndexWriterDelete.java |  580 +--
 .../lucene/index/TestIndexWriterDeleteByQuery.java |    1 -
 .../lucene/index/TestIndexWriterExceptions.java    | 1432 ++++----
 .../lucene/index/TestIndexWriterExceptions2.java   |  103 +-
 .../lucene/index/TestIndexWriterForceMerge.java    |  152 +-
 .../lucene/index/TestIndexWriterFromReader.java    |  271 +-
 .../lucene/index/TestIndexWriterLockRelease.java   |   15 +-
 .../lucene/index/TestIndexWriterMaxDocs.java       |  306 +-
 .../lucene/index/TestIndexWriterMergePolicy.java   |  539 +--
 .../lucene/index/TestIndexWriterMerging.java       |  266 +-
 .../lucene/index/TestIndexWriterNRTIsCurrent.java  |   36 +-
 .../lucene/index/TestIndexWriterOnDiskFull.java    |  303 +-
 .../lucene/index/TestIndexWriterOnJRECrash.java    |  153 +-
 .../lucene/index/TestIndexWriterOnVMError.java     |  175 +-
 .../index/TestIndexWriterOutOfFileDescriptors.java |   22 +-
 .../apache/lucene/index/TestIndexWriterReader.java |  587 +--
 .../index/TestIndexWriterThreadsToSegments.java    |  224 +-
 .../lucene/index/TestIndexWriterUnicode.java       |  158 +-
 .../lucene/index/TestIndexWriterWithThreads.java   |  401 ++-
 .../apache/lucene/index/TestIndexableField.java    |  236 +-
 .../lucene/index/TestIndexingSequenceNumbers.java  |  463 ++-
 .../org/apache/lucene/index/TestInfoStream.java    |   75 +-
 .../org/apache/lucene/index/TestIntBlockPool.java  |   50 +-
 .../org/apache/lucene/index/TestIsCurrent.java     |   19 +-
 .../test/org/apache/lucene/index/TestKnnGraph.java |  122 +-
 .../apache/lucene/index/TestLazyProxSkipping.java  |  351 +-
 .../apache/lucene/index/TestLogMergePolicy.java    |    1 -
 .../org/apache/lucene/index/TestLongPostings.java  |   94 +-
 .../org/apache/lucene/index/TestManyFields.java    |   74 +-
 .../org/apache/lucene/index/TestMaxPosition.java   |   19 +-
 .../apache/lucene/index/TestMaxTermFrequency.java  |   30 +-
 .../org/apache/lucene/index/TestMergePolicy.java   |  107 +-
 .../apache/lucene/index/TestMergeRateLimiter.java  |    1 -
 .../org/apache/lucene/index/TestMixedCodecs.java   |    6 +-
 .../lucene/index/TestMixedDocValuesUpdates.java    |  385 +-
 .../apache/lucene/index/TestMultiDocValues.java    |  132 +-
 .../org/apache/lucene/index/TestMultiFields.java   |   55 +-
 .../lucene/index/TestMultiLevelSkipList.java       |   57 +-
 .../apache/lucene/index/TestMultiTermsEnum.java    |   19 +-
 .../apache/lucene/index/TestNRTReaderCleanup.java  |    6 +-
 .../lucene/index/TestNRTReaderWithThreads.java     |   29 +-
 .../org/apache/lucene/index/TestNRTThreads.java    |   23 +-
 .../org/apache/lucene/index/TestNeverDelete.java   |   55 +-
 .../org/apache/lucene/index/TestNewestSegment.java |    4 +-
 .../apache/lucene/index/TestNoDeletionPolicy.java  |   15 +-
 .../org/apache/lucene/index/TestNoMergePolicy.java |    7 +-
 .../apache/lucene/index/TestNoMergeScheduler.java  |   10 +-
 .../test/org/apache/lucene/index/TestNorms.java    |   22 +-
 .../lucene/index/TestNumericDocValuesUpdates.java  |  678 ++--
 .../org/apache/lucene/index/TestOmitNorms.java     |  114 +-
 .../org/apache/lucene/index/TestOmitPositions.java |   54 +-
 .../test/org/apache/lucene/index/TestOmitTf.java   |  400 ++-
 .../index/TestOneMergeWrappingMergePolicy.java     |   77 +-
 .../org/apache/lucene/index/TestOrdinalMap.java    |   61 +-
 .../lucene/index/TestParallelCompositeReader.java  |  234 +-
 .../lucene/index/TestParallelLeafReader.java       |  144 +-
 .../lucene/index/TestParallelReaderEmptyIndex.java |   52 +-
 .../apache/lucene/index/TestParallelTermEnum.java  |    8 +-
 .../test/org/apache/lucene/index/TestPayloads.java |  233 +-
 .../apache/lucene/index/TestPayloadsOnVectors.java |   29 +-
 .../apache/lucene/index/TestPendingDeletes.java    |   58 +-
 .../lucene/index/TestPendingSoftDeletes.java       |  220 +-
 .../apache/lucene/index/TestPerSegmentDeletes.java |   67 +-
 .../TestPersistentSnapshotDeletionPolicy.java      |   76 +-
 .../org/apache/lucene/index/TestPointValues.java   |  355 +-
 .../apache/lucene/index/TestPostingsOffsets.java   |  257 +-
 .../apache/lucene/index/TestPrefixCodedTerms.java  |   21 +-
 .../org/apache/lucene/index/TestReadOnlyIndex.java |   31 +-
 .../org/apache/lucene/index/TestReaderClosed.java  |   22 +-
 .../org/apache/lucene/index/TestReaderPool.java    |  131 +-
 .../lucene/index/TestReaderWrapperDVTypeCheck.java |   63 +-
 .../test/org/apache/lucene/index/TestRollback.java |   10 +-
 .../apache/lucene/index/TestRollingUpdates.java    |   53 +-
 .../lucene/index/TestSameTokenSamePosition.java    |   23 +-
 .../org/apache/lucene/index/TestSegmentInfos.java  |  254 +-
 .../org/apache/lucene/index/TestSegmentMerger.java |   73 +-
 .../org/apache/lucene/index/TestSegmentReader.java |  140 +-
 .../apache/lucene/index/TestSegmentTermDocs.java   |  146 +-
 .../apache/lucene/index/TestSegmentTermEnum.java   |   38 +-
 .../lucene/index/TestSegmentToThreadMapping.java   |   29 +-
 .../lucene/index/TestSizeBoundedForceMerge.java    |  129 +-
 .../lucene/index/TestSnapshotDeletionPolicy.java   |  200 +-
 .../TestSoftDeletesDirectoryReaderWrapper.java     |   72 +-
 .../index/TestSoftDeletesRetentionMergePolicy.java |  376 +-
 .../lucene/index/TestSortedSetDocValues.java       |    1 -
 .../lucene/index/TestSortingCodecReader.java       |   73 +-
 .../org/apache/lucene/index/TestStressAdvance.java |   20 +-
 .../org/apache/lucene/index/TestStressDeletes.java |   92 +-
 .../apache/lucene/index/TestStressIndexing.java    |   55 +-
 .../apache/lucene/index/TestStressIndexing2.java   |  421 ++-
 .../org/apache/lucene/index/TestStressNRT.java     |  574 +--
 .../org/apache/lucene/index/TestSumDocFreq.java    |   23 +-
 .../apache/lucene/index/TestSwappedIndexFiles.java |   37 +-
 .../src/test/org/apache/lucene/index/TestTerm.java |    1 -
 .../org/apache/lucene/index/TestTermStates.java    |    3 +-
 .../org/apache/lucene/index/TestTermVectors.java   |   50 +-
 .../apache/lucene/index/TestTermVectorsReader.java |  226 +-
 .../apache/lucene/index/TestTermVectorsWriter.java |  156 +-
 .../org/apache/lucene/index/TestTermdocPerf.java   |  101 +-
 .../test/org/apache/lucene/index/TestTerms.java    |   17 +-
 .../org/apache/lucene/index/TestTermsEnum.java     |  190 +-
 .../org/apache/lucene/index/TestTermsEnum2.java    |   62 +-
 .../apache/lucene/index/TestTermsHashPerField.java |  208 +-
 .../lucene/index/TestThreadedForceMerge.java       |  130 +-
 .../apache/lucene/index/TestTieredMergePolicy.java |  427 ++-
 .../index/TestTragicIndexWriterDeadlock.java       |  140 +-
 .../lucene/index/TestTransactionRollback.java      |  103 +-
 .../org/apache/lucene/index/TestTransactions.java  |   77 +-
 .../org/apache/lucene/index/TestTryDelete.java     |   45 +-
 .../lucene/index/TestTwoPhaseCommitTool.java       |    7 +-
 .../apache/lucene/index/TestUniqueTermCount.java   |   27 +-
 .../lucene/index/TestUpgradeIndexMergePolicy.java  |    5 +-
 .../org/apache/lucene/index/TestVectorValues.java  |  210 +-
 .../apache/lucene/search/BaseTestRangeFilter.java  |   70 +-
 .../lucene/search/FuzzyTermOnShortTermsTest.java   |  119 +-
 .../apache/lucene/search/JustCompileSearch.java    |   33 +-
 .../apache/lucene/search/TermInSetQueryTest.java   |   87 +-
 .../search/TestApproximationSearchEquivalence.java |   12 +-
 .../apache/lucene/search/TestAutomatonQuery.java   |  131 +-
 .../lucene/search/TestAutomatonQueryUnicode.java   |   19 +-
 .../apache/lucene/search/TestBlendedTermQuery.java |   68 +-
 .../lucene/search/TestBlockMaxConjunction.java     |   29 +-
 .../org/apache/lucene/search/TestBoolean2.java     |  125 +-
 .../lucene/search/TestBoolean2ScorerSupplier.java  |  144 +-
 .../lucene/search/TestBooleanMinShouldMatch.java   |  678 ++--
 .../org/apache/lucene/search/TestBooleanOr.java    |  107 +-
 .../org/apache/lucene/search/TestBooleanQuery.java |  327 +-
 .../search/TestBooleanQueryVisitSubscorers.java    |  100 +-
 .../apache/lucene/search/TestBooleanRewrites.java  |  495 +--
 .../apache/lucene/search/TestBooleanScorer.java    |  172 +-
 .../org/apache/lucene/search/TestBoostQuery.java   |   34 +-
 .../apache/lucene/search/TestCachingCollector.java |   83 +-
 .../lucene/search/TestComplexExplanations.java     |  238 +-
 .../TestComplexExplanationsOfNonMatches.java       |   11 +-
 .../apache/lucene/search/TestConjunctionDISI.java  |  139 +-
 .../org/apache/lucene/search/TestConjunctions.java |   74 +-
 .../lucene/search/TestConstantScoreQuery.java      |  141 +-
 .../lucene/search/TestConstantScoreScorer.java     |   69 +-
 .../search/TestControlledRealTimeReopenThread.java |  263 +-
 .../lucene/search/TestCustomSearcherSort.java      |  105 +-
 .../org/apache/lucene/search/TestDateSort.java     |   11 +-
 .../lucene/search/TestDisjunctionMaxQuery.java     |  436 ++-
 ...estDisjunctionScoreBlockBoundaryPropagator.java |    5 +-
 .../apache/lucene/search/TestDocIdSetIterator.java |   22 +-
 .../apache/lucene/search/TestDocValuesQueries.java |  117 +-
 .../lucene/search/TestDocValuesRewriteMethod.java  |   48 +-
 .../lucene/search/TestDoubleRangeFieldQueries.java |   86 +-
 .../lucene/search/TestDoubleValuesSource.java      |  195 +-
 .../apache/lucene/search/TestEarlyTermination.java |   56 +-
 .../lucene/search/TestElevationComparator.java     |  221 +-
 .../lucene/search/TestFieldCacheRewriteMethod.java |   18 +-
 .../search/TestFieldSortOptimizationSkipping.java  |  137 +-
 .../apache/lucene/search/TestFieldValueQuery.java  |   21 +-
 .../org/apache/lucene/search/TestFilterWeight.java |   30 +-
 .../lucene/search/TestFloatRangeFieldQueries.java  |   89 +-
 .../org/apache/lucene/search/TestFuzzyQuery.java   |  340 +-
 .../lucene/search/TestIndexOrDocValuesQuery.java   |   89 +-
 .../apache/lucene/search/TestIndexSearcher.java    |  228 +-
 .../lucene/search/TestInetAddressRangeQueries.java |   43 +-
 .../lucene/search/TestIntRangeFieldQueries.java    |   59 +-
 .../apache/lucene/search/TestLRUQueryCache.java    |  781 ++--
 .../lucene/search/TestLatLonDocValuesQueries.java  |    6 +-
 .../lucene/search/TestLatLonPointQueries.java      |    7 +-
 .../apache/lucene/search/TestLiveFieldValues.java  |  167 +-
 .../lucene/search/TestLongRangeFieldQueries.java   |   63 +-
 .../apache/lucene/search/TestLongValuesSource.java |   28 +-
 .../lucene/search/TestMatchAllDocsQuery.java       |   36 +-
 .../apache/lucene/search/TestMatchNoDocsQuery.java |   16 +-
 .../apache/lucene/search/TestMatchesIterator.java  |  709 ++--
 .../apache/lucene/search/TestMaxClauseLimit.java   |   72 +-
 .../lucene/search/TestMaxScoreAccumulator.java     |    3 +-
 .../lucene/search/TestMaxScoreSumPropagator.java   |   31 +-
 .../apache/lucene/search/TestMinShouldMatch2.java  |  177 +-
 .../apache/lucene/search/TestMultiCollector.java   |  218 +-
 .../apache/lucene/search/TestMultiPhraseEnum.java  |   56 +-
 .../apache/lucene/search/TestMultiPhraseQuery.java |  288 +-
 .../apache/lucene/search/TestMultiSliceMerge.java  |   25 +-
 .../lucene/search/TestMultiTermConstantScore.java  |  168 +-
 .../lucene/search/TestMultiTermQueryRewrites.java  |  166 +-
 .../lucene/search/TestMultiThreadTermVectors.java  |   25 +-
 .../org/apache/lucene/search/TestMultiset.java     |    3 -
 .../apache/lucene/search/TestNGramPhraseQuery.java |   27 +-
 .../org/apache/lucene/search/TestNeedsScores.java  |   36 +-
 .../lucene/search/TestNormsFieldExistsQuery.java   |   15 +-
 .../src/test/org/apache/lucene/search/TestNot.java |   12 +-
 .../lucene/search/TestPhrasePrefixQuery.java       |   26 +-
 .../org/apache/lucene/search/TestPhraseQuery.java  |  557 ++-
 .../org/apache/lucene/search/TestPointQueries.java |  980 +++--
 .../lucene/search/TestPositionIncrement.java       |  133 +-
 .../search/TestPositiveScoresOnlyCollector.java    |   53 +-
 .../lucene/search/TestPrefixInBooleanQuery.java    |   54 +-
 .../org/apache/lucene/search/TestPrefixQuery.java  |   21 +-
 .../org/apache/lucene/search/TestPrefixRandom.java |   48 +-
 .../apache/lucene/search/TestQueryRescorer.java    |  162 +-
 .../org/apache/lucene/search/TestQueryVisitor.java |  228 +-
 .../search/TestRangeFieldsDocValuesQuery.java      |    8 +-
 .../org/apache/lucene/search/TestRegexpQuery.java  |  118 +-
 .../org/apache/lucene/search/TestRegexpRandom.java |   46 +-
 .../apache/lucene/search/TestRegexpRandom2.java    |   51 +-
 .../lucene/search/TestReqExclBulkScorer.java       |   88 +-
 .../apache/lucene/search/TestReqOptSumScorer.java  |  162 +-
 .../lucene/search/TestSameScoresWithThreads.java   |   72 +-
 .../search/TestScoreCachingWrappingScorer.java     |   77 +-
 .../org/apache/lucene/search/TestScorerPerf.java   |  240 +-
 .../org/apache/lucene/search/TestSearchAfter.java  |   97 +-
 .../lucene/search/TestSearchWithThreads.java       |   50 +-
 .../apache/lucene/search/TestSearcherManager.java  |  554 +--
 .../lucene/search/TestSegmentCacheables.java       |    7 +-
 .../apache/lucene/search/TestShardSearching.java   |  101 +-
 .../org/apache/lucene/search/TestSimilarity.java   |  208 +-
 .../lucene/search/TestSimilarityProvider.java      |    8 +-
 .../lucene/search/TestSimpleExplanations.java      |  355 +-
 .../search/TestSimpleExplanationsOfNonMatches.java |   12 +-
 .../TestSimpleExplanationsWithFillerDocs.java      |   60 +-
 .../lucene/search/TestSimpleSearchEquivalence.java |   55 +-
 .../lucene/search/TestSloppyPhraseQuery.java       |  242 +-
 .../lucene/search/TestSloppyPhraseQuery2.java      |   26 +-
 .../test/org/apache/lucene/search/TestSort.java    |  124 +-
 .../org/apache/lucene/search/TestSortRandom.java   |  114 +-
 .../org/apache/lucene/search/TestSortRescorer.java |   91 +-
 .../lucene/search/TestSortedNumericSortField.java  |   53 +-
 .../lucene/search/TestSortedSetSelector.java       |   93 +-
 .../lucene/search/TestSortedSetSortField.java      |   37 +-
 .../apache/lucene/search/TestSubScorerFreqs.java   |   44 +-
 .../org/apache/lucene/search/TestSynonymQuery.java |  250 +-
 .../org/apache/lucene/search/TestTermQuery.java    |  107 +-
 .../apache/lucene/search/TestTermRangeQuery.java   |   84 +-
 .../org/apache/lucene/search/TestTermScorer.java   |  193 +-
 .../lucene/search/TestTimeLimitingCollector.java   |  267 +-
 .../apache/lucene/search/TestTopDocsCollector.java |  235 +-
 .../org/apache/lucene/search/TestTopDocsMerge.java |  117 +-
 .../lucene/search/TestTopFieldCollector.java       |  246 +-
 .../TestTopFieldCollectorEarlyTermination.java     |  157 +-
 .../lucene/search/TestTotalHitCountCollector.java  |    9 +-
 .../TestUsageTrackingFilterCachingPolicy.java      |   37 +-
 .../org/apache/lucene/search/TestWANDScorer.java   |  174 +-
 .../org/apache/lucene/search/TestWildcard.java     |  279 +-
 .../apache/lucene/search/TestWildcardRandom.java   |   49 +-
 .../lucene/search/TestXYPointDistanceSort.java     |   67 +-
 .../apache/lucene/search/TestXYPointQueries.java   |    2 +-
 .../search/similarities/AxiomaticTestCase.java     |    6 +-
 .../search/similarities/BasicModelTestCase.java    |   11 +-
 .../search/similarities/DistributionTestCase.java  |    5 +-
 .../search/similarities/TestAxiomaticF1EXP.java    |    1 -
 .../search/similarities/TestAxiomaticF1LOG.java    |    1 -
 .../search/similarities/TestAxiomaticF2EXP.java    |    1 -
 .../search/similarities/TestAxiomaticF2LOG.java    |    1 -
 .../search/similarities/TestAxiomaticF3EXP.java    |    1 -
 .../search/similarities/TestAxiomaticF3LOG.java    |    1 -
 .../similarities/TestAxiomaticSimilarity.java      |   81 +-
 .../search/similarities/TestBM25Similarity.java    |   80 +-
 .../search/similarities/TestBasicModelG.java       |    1 -
 .../search/similarities/TestBasicModelIF.java      |    1 -
 .../search/similarities/TestBasicModelIn.java      |    1 -
 .../search/similarities/TestBasicModelIne.java     |    1 -
 .../search/similarities/TestBooleanSimilarity.java |   26 +-
 .../search/similarities/TestClassicSimilarity.java |   77 +-
 .../search/similarities/TestDistributionLL.java    |    1 -
 .../search/similarities/TestDistributionSPL.java   |    1 -
 .../similarities/TestIndependenceChiSquared.java   |    1 -
 .../similarities/TestIndependenceSaturated.java    |    1 -
 .../similarities/TestIndependenceStandardized.java |    1 -
 .../similarities/TestLMDirichletSimilarity.java    |    1 -
 .../TestLMJelinekMercerSimilarity.java             |    1 -
 .../search/similarities/TestSimilarity2.java       |   62 +-
 .../search/similarities/TestSimilarityBase.java    |  397 +--
 .../search/spans/JustCompileSearchSpans.java       |   18 +-
 .../org/apache/lucene/search/spans/TestBasics.java |  377 +-
 .../search/spans/TestFieldMaskingSpanQuery.java    |  348 +-
 .../lucene/search/spans/TestFilterSpans.java       |    2 -
 .../lucene/search/spans/TestNearSpansOrdered.java  |  275 +-
 .../lucene/search/spans/TestSpanBoostQuery.java    |   14 +-
 .../lucene/search/spans/TestSpanCollection.java    |   51 +-
 .../lucene/search/spans/TestSpanContainQuery.java  |   34 +-
 .../lucene/search/spans/TestSpanExplanations.java  |  154 +-
 .../spans/TestSpanExplanationsOfNonMatches.java    |   13 +-
 .../lucene/search/spans/TestSpanFirstQuery.java    |   15 +-
 .../spans/TestSpanMultiTermQueryWrapper.java       |  133 +-
 .../lucene/search/spans/TestSpanNearQuery.java     |   54 +-
 .../lucene/search/spans/TestSpanNotQuery.java      |   32 +-
 .../lucene/search/spans/TestSpanOrQuery.java       |   18 +-
 .../search/spans/TestSpanSearchEquivalence.java    |  313 +-
 .../lucene/search/spans/TestSpanTermQuery.java     |   19 +-
 .../org/apache/lucene/search/spans/TestSpans.java  |  248 +-
 .../apache/lucene/search/spans/TestSpansEnum.java  |   56 +-
 .../lucene/store/BaseDataOutputTestCase.java       |  230 +-
 .../apache/lucene/store/TestBufferedChecksum.java  |    6 +-
 .../lucene/store/TestBufferedIndexInput.java       |  170 +-
 .../lucene/store/TestByteArrayDataInput.java       |    1 -
 .../lucene/store/TestByteBuffersDataInput.java     |  149 +-
 .../lucene/store/TestByteBuffersDataOutput.java    |   61 +-
 .../lucene/store/TestByteBuffersDirectory.java     |   74 +-
 .../org/apache/lucene/store/TestDirectory.java     |   56 +-
 .../lucene/store/TestFileSwitchDirectory.java      |   76 +-
 .../apache/lucene/store/TestFilterDirectory.java   |    8 +-
 .../org/apache/lucene/store/TestLockFactory.java   |  126 +-
 .../org/apache/lucene/store/TestMmapDirectory.java |   44 +-
 .../org/apache/lucene/store/TestMultiMMap.java     |  204 +-
 .../apache/lucene/store/TestNIOFSDirectory.java    |   29 +-
 .../lucene/store/TestNRTCachingDirectory.java      |   53 +-
 .../lucene/store/TestNativeFSLockFactory.java      |   57 +-
 .../org/apache/lucene/store/TestRateLimiter.java   |   83 +-
 .../lucene/store/TestSimpleFSLockFactory.java      |   16 +-
 .../store/TestSingleInstanceLockFactory.java       |   19 +-
 .../lucene/store/TestSleepingLockWrapper.java      |    9 +-
 .../lucene/store/TestStressLockFactories.java      |   74 +-
 .../lucene/store/TestTrackingDirectoryWrapper.java |    9 +-
 .../org/apache/lucene/util/BaseSortTestCase.java   |   39 +-
 .../lucene/util/StressRamUsageEstimator.java       |   15 +-
 .../org/apache/lucene/util/Test2BPagedBytes.java   |    8 +-
 .../test/org/apache/lucene/util/TestArrayUtil.java |  249 +-
 .../apache/lucene/util/TestAttributeSource.java    |  165 +-
 .../org/apache/lucene/util/TestByteBlockPool.java  |    4 +-
 .../test/org/apache/lucene/util/TestBytesRef.java  |   23 +-
 .../org/apache/lucene/util/TestBytesRefArray.java  |   27 +-
 .../org/apache/lucene/util/TestBytesRefHash.java   |  121 +-
 .../test/org/apache/lucene/util/TestCharsRef.java  |  112 +-
 .../apache/lucene/util/TestCharsRefBuilder.java    |    2 -
 .../apache/lucene/util/TestClassLoaderUtils.java   |    4 +-
 .../lucene/util/TestCloseableThreadLocal.java      |    6 +-
 .../org/apache/lucene/util/TestCollectionUtil.java |   23 +-
 .../apache/lucene/util/TestDocIdSetBuilder.java    |   17 +-
 .../org/apache/lucene/util/TestFilterIterator.java |  135 +-
 .../apache/lucene/util/TestFixedBitDocIdSet.java   |    1 -
 .../org/apache/lucene/util/TestFixedBitSet.java    |  318 +-
 .../lucene/util/TestFixedLengthBytesRefArray.java  |   41 +-
 .../util/TestFrequencyTrackingRingBuffer.java      |    5 +-
 .../test/org/apache/lucene/util/TestIOUtils.java   |   45 +-
 .../apache/lucene/util/TestInPlaceMergeSorter.java |    6 +-
 .../apache/lucene/util/TestIntArrayDocIdSet.java   |    3 -
 .../org/apache/lucene/util/TestIntroSelector.java  |   32 +-
 .../org/apache/lucene/util/TestIntroSorter.java    |    1 -
 .../test/org/apache/lucene/util/TestIntsRef.java   |   23 +-
 .../org/apache/lucene/util/TestLSBRadixSorter.java |    2 -
 .../org/apache/lucene/util/TestLongBitSet.java     |  199 +-
 .../test/org/apache/lucene/util/TestLongHeap.java  |   19 +-
 .../test/org/apache/lucene/util/TestLongsRef.java  |   22 +-
 .../org/apache/lucene/util/TestMSBRadixSorter.java |   27 +-
 .../test/org/apache/lucene/util/TestMathUtil.java  |   68 +-
 .../org/apache/lucene/util/TestMergedIterator.java |   13 +-
 .../org/apache/lucene/util/TestNamedSPILoader.java |   16 +-
 .../org/apache/lucene/util/TestNotDocIdSet.java    |    6 +-
 .../org/apache/lucene/util/TestNumericUtils.java   |  426 ++-
 .../org/apache/lucene/util/TestOfflineSorter.java  |  544 +--
 .../org/apache/lucene/util/TestPagedBytes.java     |   48 +-
 .../org/apache/lucene/util/TestPriorityQueue.java  |   52 +-
 .../org/apache/lucene/util/TestQueryBuilder.java   |  471 +--
 .../org/apache/lucene/util/TestRadixSelector.java  |   42 +-
 .../apache/lucene/util/TestRamUsageEstimator.java  |  118 +-
 .../util/TestRecyclingByteBlockAllocator.java      |   25 +-
 .../util/TestRecyclingIntBlockAllocator.java       |   27 +-
 .../apache/lucene/util/TestRoaringDocIdSet.java    |    5 +-
 .../org/apache/lucene/util/TestRollingBuffer.java  |   24 +-
 .../org/apache/lucene/util/TestSentinelIntSet.java |   31 +-
 .../test/org/apache/lucene/util/TestSetOnce.java   |   25 +-
 .../org/apache/lucene/util/TestSloppyMath.java     |  107 +-
 .../org/apache/lucene/util/TestSmallFloat.java     |   97 +-
 .../lucene/util/TestSparseFixedBitDocIdSet.java    |    2 -
 .../apache/lucene/util/TestSparseFixedBitSet.java  |    6 +-
 .../org/apache/lucene/util/TestStringHelper.java   |   41 +-
 .../lucene/util/TestStringMSBRadixSorter.java      |    4 +-
 .../test/org/apache/lucene/util/TestTimSorter.java |    3 +-
 .../apache/lucene/util/TestTimSorterWorstCase.java |   63 +-
 .../org/apache/lucene/util/TestUnicodeUtil.java    |   79 +-
 .../org/apache/lucene/util/TestVectorUtil.java     |   10 +-
 .../test/org/apache/lucene/util/TestVersion.java   |  176 +-
 .../org/apache/lucene/util/TestVirtualMethod.java  |   69 +-
 .../apache/lucene/util/TestWeakIdentityMap.java    |  197 +-
 .../util/automaton/FiniteStringsIteratorTest.java  |   53 +-
 .../LimitedFiniteStringsIteratorTest.java          |   32 +-
 .../lucene/util/automaton/TestAutomaton.java       |  982 +++---
 .../util/automaton/TestCompiledAutomaton.java      |   32 +-
 .../automaton/TestDaciukMihovAutomatonBuilder.java |   12 +-
 .../lucene/util/automaton/TestDeterminism.java     |   60 +-
 .../util/automaton/TestDeterminizeLexicon.java     |   10 +-
 .../apache/lucene/util/automaton/TestIntSet.java   |  124 +-
 .../util/automaton/TestLevenshteinAutomata.java    |  309 +-
 .../apache/lucene/util/automaton/TestMinimize.java |   29 +-
 .../lucene/util/automaton/TestOperations.java      |   81 +-
 .../apache/lucene/util/automaton/TestRegExp.java   |  136 +-
 .../lucene/util/automaton/TestUTF32ToUTF8.java     |   86 +-
 .../apache/lucene/util/bkd/Test2BBKDPoints.java    |   42 +-
 .../test/org/apache/lucene/util/bkd/TestBKD.java   | 1485 ++++----
 .../lucene/util/bkd/TestBKDRadixSelector.java      |  225 +-
 .../apache/lucene/util/bkd/TestBKDRadixSort.java   |   50 +-
 .../apache/lucene/util/bkd/TestDocIdsWriter.java   |   37 +-
 .../util/bkd/TestMutablePointsReaderUtils.java     |  116 +-
 .../apache/lucene/util/compress/LZ4TestCase.java   |  211 +-
 .../apache/lucene/util/compress/TestFastLZ4.java   |    1 -
 .../apache/lucene/util/compress/TestHighLZ4.java   |    3 +-
 .../compress/TestLowercaseAsciiCompression.java    |    7 +-
 .../test/org/apache/lucene/util/fst/Test2BFST.java |   88 +-
 .../apache/lucene/util/fst/TestBitTableUtil.java   |   79 +-
 .../org/apache/lucene/util/fst/TestBytesStore.java |  312 +-
 .../lucene/util/fst/TestFSTDirectAddressing.java   |  118 +-
 .../test/org/apache/lucene/util/fst/TestFSTs.java  |  659 ++--
 .../test/org/apache/lucene/util/fst/TestUtil.java  |   20 +-
 .../graph/TestGraphTokenStreamFiniteStrings.java   |  427 +--
 .../apache/lucene/util/hnsw/KnnGraphTester.java    |  188 +-
 .../apache/lucene/util/hnsw/MockVectorValues.java  |    6 +-
 .../test/org/apache/lucene/util/hnsw/TestHnsw.java |   88 +-
 .../org/apache/lucene/util/hnsw/TestNeighbors.java |    4 +-
 .../lucene/util/mutable/TestMutableValues.java     |   64 +-
 .../lucene/util/packed/TestDirectMonotonic.java    |   98 +-
 .../lucene/util/packed/TestDirectPacked.java       |   34 +-
 .../apache/lucene/util/packed/TestPackedInts.java  |  492 ++-
 .../lucene/index/BaseDocValuesFormatTestCase.java  |    3 +-
 1537 files changed, 92290 insertions(+), 74918 deletions(-)


[lucene-solr] 01/02: LUCENE-9570: code reformatting [partial].

Posted by dw...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

dweiss pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git

commit 2d6ad2fee6dfd96388594f4de9b37c037efe8017
Author: Dawid Weiss <da...@carrotsearch.com>
AuthorDate: Wed Dec 23 12:41:23 2020 +0100

    LUCENE-9570: code reformatting [partial].
---
 dev-tools/scripts/checkJavadocLinks.py             |    4 +-
 gradle/generation/util.gradle                      |   17 +
 gradle/validation/spotless.gradle                  |   55 +-
 .../lucene50/Lucene50SkipWriter.java               |    3 +-
 .../src/java/org/apache/lucene/LucenePackage.java  |    5 +-
 .../lucene/analysis/AbstractAnalysisFactory.java   |  197 +-
 .../apache/lucene/analysis/AnalysisSPILoader.java  |  161 +-
 .../java/org/apache/lucene/analysis/Analyzer.java  |  417 ++-
 .../apache/lucene/analysis/AnalyzerWrapper.java    |  112 +-
 .../apache/lucene/analysis/CachingTokenFilter.java |   37 +-
 .../org/apache/lucene/analysis/CharArrayMap.java   |  359 +-
 .../org/apache/lucene/analysis/CharArraySet.java   |  136 +-
 .../org/apache/lucene/analysis/CharFilter.java     |   56 +-
 .../apache/lucene/analysis/CharFilterFactory.java  |   43 +-
 .../org/apache/lucene/analysis/CharacterUtils.java |  145 +-
 .../lucene/analysis/DelegatingAnalyzerWrapper.java |   56 +-
 .../lucene/analysis/FilteringTokenFilter.java      |   20 +-
 .../apache/lucene/analysis/GraphTokenFilter.java   |   36 +-
 .../apache/lucene/analysis/LowerCaseFilter.java    |   15 +-
 .../lucene/analysis/ReusableStringReader.java      |   18 +-
 .../org/apache/lucene/analysis/StopFilter.java     |   65 +-
 .../lucene/analysis/StopwordAnalyzerBase.java      |   97 +-
 .../org/apache/lucene/analysis/TokenFilter.java    |   40 +-
 .../apache/lucene/analysis/TokenFilterFactory.java |   44 +-
 .../org/apache/lucene/analysis/TokenStream.java    |  222 +-
 .../lucene/analysis/TokenStreamToAutomaton.java    |   58 +-
 .../java/org/apache/lucene/analysis/Tokenizer.java |   78 +-
 .../apache/lucene/analysis/TokenizerFactory.java   |   46 +-
 .../org/apache/lucene/analysis/WordlistLoader.java |  117 +-
 .../org/apache/lucene/analysis/package-info.java   | 1139 +++---
 .../org/apache/lucene/codecs/BlockTermState.java   |   20 +-
 .../src/java/org/apache/lucene/codecs/Codec.java   |  104 +-
 .../java/org/apache/lucene/codecs/CodecUtil.java   |  504 +--
 .../codecs/CompetitiveImpactAccumulator.java       |   41 +-
 .../apache/lucene/codecs/CompoundDirectory.java    |   35 +-
 .../org/apache/lucene/codecs/CompoundFormat.java   |   18 +-
 .../apache/lucene/codecs/DocValuesConsumer.java    | 1035 +++---
 .../org/apache/lucene/codecs/DocValuesFormat.java  |  106 +-
 .../apache/lucene/codecs/DocValuesProducer.java    |   74 +-
 .../org/apache/lucene/codecs/FieldInfosFormat.java |   26 +-
 .../org/apache/lucene/codecs/FieldsConsumer.java   |   78 +-
 .../org/apache/lucene/codecs/FieldsProducer.java   |   37 +-
 .../java/org/apache/lucene/codecs/FilterCodec.java |   28 +-
 .../org/apache/lucene/codecs/LiveDocsFormat.java   |   31 +-
 .../lucene/codecs/MultiLevelSkipListReader.java    |  123 +-
 .../lucene/codecs/MultiLevelSkipListWriter.java    |   78 +-
 .../apache/lucene/codecs/MutablePointValues.java   |   10 +-
 .../org/apache/lucene/codecs/NormsConsumer.java    |  228 +-
 .../java/org/apache/lucene/codecs/NormsFormat.java |   30 +-
 .../org/apache/lucene/codecs/NormsProducer.java    |   41 +-
 .../org/apache/lucene/codecs/PointsFormat.java     |   80 +-
 .../org/apache/lucene/codecs/PointsReader.java     |   25 +-
 .../org/apache/lucene/codecs/PointsWriter.java     |   89 +-
 .../org/apache/lucene/codecs/PostingsFormat.java   |  105 +-
 .../apache/lucene/codecs/PostingsReaderBase.java   |   63 +-
 .../apache/lucene/codecs/PostingsWriterBase.java   |   61 +-
 .../lucene/codecs/PushPostingsWriterBase.java      |   78 +-
 .../apache/lucene/codecs/SegmentInfoFormat.java    |   25 +-
 .../apache/lucene/codecs/StoredFieldsFormat.java   |   24 +-
 .../apache/lucene/codecs/StoredFieldsReader.java   |   38 +-
 .../apache/lucene/codecs/StoredFieldsWriter.java   |   99 +-
 .../java/org/apache/lucene/codecs/TermStats.java   |   11 +-
 .../apache/lucene/codecs/TermVectorsFormat.java    |   25 +-
 .../apache/lucene/codecs/TermVectorsReader.java    |   48 +-
 .../apache/lucene/codecs/TermVectorsWriter.java    |  184 +-
 .../org/apache/lucene/codecs/VectorFormat.java     |   57 +-
 .../org/apache/lucene/codecs/VectorReader.java     |   22 +-
 .../org/apache/lucene/codecs/VectorWriter.java     |   49 +-
 .../codecs/blocktree/BlockTreeTermsReader.java     |  175 +-
 .../codecs/blocktree/BlockTreeTermsWriter.java     |  559 +--
 .../codecs/blocktree/CompressionAlgorithm.java     |   15 +-
 .../lucene/codecs/blocktree/FieldReader.java       |   82 +-
 .../codecs/blocktree/IntersectTermsEnum.java       |  104 +-
 .../codecs/blocktree/IntersectTermsEnumFrame.java  |   21 +-
 .../lucene/codecs/blocktree/SegmentTermsEnum.java  |  462 ++-
 .../codecs/blocktree/SegmentTermsEnumFrame.java    |  180 +-
 .../org/apache/lucene/codecs/blocktree/Stats.java  |  137 +-
 .../lucene/codecs/blocktree/package-info.java      |   20 +-
 .../compressing/CompressingStoredFieldsFormat.java |  141 +-
 .../compressing/CompressingStoredFieldsReader.java |  267 +-
 .../compressing/CompressingStoredFieldsWriter.java |  323 +-
 .../compressing/CompressingTermVectorsFormat.java  |   79 +-
 .../compressing/CompressingTermVectorsReader.java  |  375 +-
 .../compressing/CompressingTermVectorsWriter.java  |  249 +-
 .../lucene/codecs/compressing/CompressionMode.java |  209 +-
 .../lucene/codecs/compressing/Compressor.java      |   13 +-
 .../lucene/codecs/compressing/Decompressor.java    |   21 +-
 .../lucene/codecs/compressing/FieldsIndex.java     |    2 -
 .../codecs/compressing/FieldsIndexReader.java      |   46 +-
 .../codecs/compressing/FieldsIndexWriter.java      |   45 +-
 .../compressing/LegacyFieldsIndexReader.java       |   38 +-
 .../lucene/codecs/compressing/MatchingReaders.java |   23 +-
 .../lucene/codecs/compressing/package-info.java    |    6 +-
 .../codecs/lucene50/Lucene50CompoundFormat.java    |   76 +-
 .../codecs/lucene50/Lucene50CompoundReader.java    |  101 +-
 .../codecs/lucene50/Lucene50LiveDocsFormat.java    |   81 +-
 .../codecs/lucene50/Lucene50TermVectorsFormat.java |  208 +-
 .../lucene/codecs/lucene50/package-info.java       |    5 +-
 .../codecs/lucene60/Lucene60FieldInfosFormat.java  |  304 +-
 .../lucene/codecs/lucene60/package-info.java       |    4 +-
 .../apache/lucene/codecs/lucene80/IndexedDISI.java |  369 +-
 .../codecs/lucene80/Lucene80DocValuesConsumer.java |  319 +-
 .../codecs/lucene80/Lucene80DocValuesFormat.java   |  177 +-
 .../codecs/lucene80/Lucene80DocValuesProducer.java |  327 +-
 .../codecs/lucene80/Lucene80NormsConsumer.java     |   37 +-
 .../codecs/lucene80/Lucene80NormsFormat.java       |   95 +-
 .../codecs/lucene80/Lucene80NormsProducer.java     |  103 +-
 .../lucene/codecs/lucene80/package-info.java       |    5 +-
 .../lucene/codecs/lucene84/ForDeltaUtil.java       |   21 +-
 .../org/apache/lucene/codecs/lucene84/ForUtil.java | 1197 +++----
 .../codecs/lucene84/Lucene84PostingsFormat.java    |  635 ++--
 .../codecs/lucene84/Lucene84PostingsReader.java    |  486 ++-
 .../codecs/lucene84/Lucene84PostingsWriter.java    |  128 +-
 .../codecs/lucene84/Lucene84ScoreSkipReader.java   |   59 +-
 .../lucene/codecs/lucene84/Lucene84SkipReader.java |   66 +-
 .../lucene/codecs/lucene84/Lucene84SkipWriter.java |   69 +-
 .../apache/lucene/codecs/lucene84/PForUtil.java    |   35 +-
 .../lucene/codecs/lucene84/package-info.java       |    4 +-
 .../codecs/lucene86/Lucene86PointsFormat.java      |   29 +-
 .../codecs/lucene86/Lucene86PointsReader.java      |   49 +-
 .../codecs/lucene86/Lucene86PointsWriter.java      |  156 +-
 .../codecs/lucene86/Lucene86SegmentInfoFormat.java |  107 +-
 .../lucene/codecs/lucene86/package-info.java       |    4 +-
 .../codecs/lucene87/BugfixDeflater_JDK8252739.java |  216 +-
 .../DeflateWithPresetDictCompressionMode.java      |   32 +-
 .../lucene87/LZ4WithPresetDictCompressionMode.java |   15 +-
 .../lucene87/Lucene87StoredFieldsFormat.java       |  146 +-
 .../lucene/codecs/lucene87/package-info.java       |    4 +-
 .../lucene/codecs/lucene90/Lucene90Codec.java      |   83 +-
 .../codecs/lucene90/Lucene90FieldInfosFormat.java  |  329 +-
 .../codecs/lucene90/Lucene90VectorFormat.java      |    5 +-
 .../codecs/lucene90/Lucene90VectorReader.java      |  126 +-
 .../codecs/lucene90/Lucene90VectorWriter.java      |   81 +-
 .../lucene/codecs/lucene90/package-info.java       |  526 ++-
 .../org/apache/lucene/codecs/package-info.java     |   74 +-
 .../codecs/perfield/PerFieldDocValuesFormat.java   |  124 +-
 .../lucene/codecs/perfield/PerFieldMergeState.java |   46 +-
 .../codecs/perfield/PerFieldPostingsFormat.java    |  155 +-
 .../lucene/codecs/perfield/package-info.java       |    4 +-
 .../lucene/document/BinaryDocValuesField.java      |   33 +-
 .../org/apache/lucene/document/BinaryPoint.java    |  167 +-
 .../lucene/document/BinaryRangeDocValues.java      |    7 +-
 .../lucene/document/BinaryRangeDocValuesField.java |    5 +-
 .../document/BinaryRangeFieldRangeQuery.java       |   41 +-
 .../java/org/apache/lucene/document/DateTools.java |  166 +-
 .../java/org/apache/lucene/document/Document.java  |  172 +-
 .../document/DocumentStoredFieldVisitor.java       |   39 +-
 .../lucene/document/DoubleDocValuesField.java      |   25 +-
 .../org/apache/lucene/document/DoublePoint.java    |  182 +-
 .../org/apache/lucene/document/DoubleRange.java    |  121 +-
 .../lucene/document/DoubleRangeDocValuesField.java |   23 +-
 .../lucene/document/DoubleRangeSlowRangeQuery.java |   10 +-
 .../lucene/document/FeatureDoubleValuesSource.java |   25 +-
 .../org/apache/lucene/document/FeatureField.java   |  353 +-
 .../org/apache/lucene/document/FeatureQuery.java   |   20 +-
 .../apache/lucene/document/FeatureSortField.java   |   24 +-
 .../src/java/org/apache/lucene/document/Field.java |  308 +-
 .../java/org/apache/lucene/document/FieldType.java |  229 +-
 .../lucene/document/FloatDocValuesField.java       |   26 +-
 .../org/apache/lucene/document/FloatPoint.java     |  176 +-
 .../org/apache/lucene/document/FloatRange.java     |  121 +-
 .../lucene/document/FloatRangeDocValuesField.java  |   23 +-
 .../lucene/document/FloatRangeSlowRangeQuery.java  |    7 +-
 .../apache/lucene/document/InetAddressPoint.java   |  172 +-
 .../apache/lucene/document/InetAddressRange.java   |   61 +-
 .../java/org/apache/lucene/document/IntPoint.java  |  159 +-
 .../java/org/apache/lucene/document/IntRange.java  |  121 +-
 .../lucene/document/IntRangeDocValuesField.java    |   26 +-
 .../lucene/document/IntRangeSlowRangeQuery.java    |    6 +-
 .../lucene/document/LatLonDocValuesBoxQuery.java   |   88 +-
 .../lucene/document/LatLonDocValuesField.java      |  167 +-
 .../LatLonDocValuesPointInGeometryQuery.java       |   60 +-
 .../org/apache/lucene/document/LatLonPoint.java    |  180 +-
 .../document/LatLonPointDistanceComparator.java    |   72 +-
 .../document/LatLonPointDistanceFeatureQuery.java  |  321 +-
 .../lucene/document/LatLonPointDistanceQuery.java  |  119 +-
 .../document/LatLonPointInGeometryQuery.java       |  158 +-
 .../lucene/document/LatLonPointSortField.java      |   15 +-
 .../org/apache/lucene/document/LatLonShape.java    |  140 +-
 .../document/LatLonShapeBoundingBoxQuery.java      |  459 ++-
 .../apache/lucene/document/LatLonShapeQuery.java   |  200 +-
 .../lucene/document/LongDistanceFeatureQuery.java  |  163 +-
 .../java/org/apache/lucene/document/LongPoint.java |  186 +-
 .../java/org/apache/lucene/document/LongRange.java |  117 +-
 .../lucene/document/LongRangeDocValuesField.java   |   23 +-
 .../lucene/document/LongRangeSlowRangeQuery.java   |    7 +-
 .../lucene/document/NumericDocValuesField.java     |   66 +-
 .../apache/lucene/document/RangeFieldQuery.java    |  319 +-
 .../org/apache/lucene/document/ShapeField.java     |  147 +-
 .../org/apache/lucene/document/ShapeQuery.java     |  310 +-
 .../lucene/document/SortedDocValuesField.java      |   68 +-
 .../document/SortedNumericDocValuesField.java      |   79 +-
 .../document/SortedNumericDocValuesRangeQuery.java |   68 +-
 .../lucene/document/SortedSetDocValuesField.java   |   72 +-
 .../document/SortedSetDocValuesRangeQuery.java     |   80 +-
 .../org/apache/lucene/document/StoredField.java    |   88 +-
 .../org/apache/lucene/document/StringField.java    |   46 +-
 .../java/org/apache/lucene/document/TextField.java |   24 +-
 .../org/apache/lucene/document/VectorField.java    |   57 +-
 .../apache/lucene/document/XYDocValuesField.java   |  141 +-
 .../document/XYDocValuesPointInGeometryQuery.java  |   44 +-
 .../lucene/document/XYPointDistanceComparator.java |   43 +-
 .../org/apache/lucene/document/XYPointField.java   |   89 +-
 .../lucene/document/XYPointInGeometryQuery.java    |   94 +-
 .../apache/lucene/document/XYPointSortField.java   |   12 +-
 .../java/org/apache/lucene/document/XYShape.java   |  105 +-
 .../org/apache/lucene/document/XYShapeQuery.java   |  177 +-
 .../org/apache/lucene/document/package-info.java   |   55 +-
 .../src/java/org/apache/lucene/geo/Circle.java     |   18 +-
 .../src/java/org/apache/lucene/geo/Circle2D.java   |  219 +-
 .../java/org/apache/lucene/geo/Component2D.java    |  215 +-
 .../java/org/apache/lucene/geo/ComponentTree.java  |  161 +-
 .../src/java/org/apache/lucene/geo/EdgeTree.java   |  264 +-
 .../org/apache/lucene/geo/GeoEncodingUtils.java    |  172 +-
 .../src/java/org/apache/lucene/geo/GeoUtils.java   |  133 +-
 .../java/org/apache/lucene/geo/LatLonGeometry.java |    4 +-
 .../core/src/java/org/apache/lucene/geo/Line.java  |   22 +-
 .../src/java/org/apache/lucene/geo/Line2D.java     |  113 +-
 .../core/src/java/org/apache/lucene/geo/Point.java |   16 +-
 .../src/java/org/apache/lucene/geo/Point2D.java    |   99 +-
 .../src/java/org/apache/lucene/geo/Polygon.java    |   69 +-
 .../src/java/org/apache/lucene/geo/Polygon2D.java  |  169 +-
 .../src/java/org/apache/lucene/geo/Rectangle.java  |   40 +-
 .../java/org/apache/lucene/geo/Rectangle2D.java    |  148 +-
 .../lucene/geo/SimpleGeoJSONPolygonParser.java     |   81 +-
 .../apache/lucene/geo/SimpleWKTShapeParser.java    |  112 +-
 .../java/org/apache/lucene/geo/Tessellator.java    |  782 ++--
 .../src/java/org/apache/lucene/geo/XYCircle.java   |   16 +-
 .../org/apache/lucene/geo/XYEncodingUtils.java     |   12 +-
 .../src/java/org/apache/lucene/geo/XYGeometry.java |    4 +-
 .../src/java/org/apache/lucene/geo/XYLine.java     |   18 +-
 .../src/java/org/apache/lucene/geo/XYPoint.java    |   16 +-
 .../src/java/org/apache/lucene/geo/XYPolygon.java  |   42 +-
 .../java/org/apache/lucene/geo/XYRectangle.java    |    7 +-
 .../java/org/apache/lucene/geo/package-info.java   |    6 +-
 .../apache/lucene/index/AutomatonTermsEnum.java    |  181 +-
 .../apache/lucene/index/BaseCompositeReader.java   |  110 +-
 .../org/apache/lucene/index/BaseTermsEnum.java     |   19 +-
 .../org/apache/lucene/index/BinaryDocValues.java   |   16 +-
 .../lucene/index/BinaryDocValuesFieldUpdates.java  |   34 +-
 .../apache/lucene/index/BinaryDocValuesWriter.java |   78 +-
 .../java/org/apache/lucene/index/BitsSlice.java    |    8 +-
 .../org/apache/lucene/index/BufferedUpdates.java   |   92 +-
 .../apache/lucene/index/BufferedUpdatesStream.java |  135 +-
 .../org/apache/lucene/index/ByteSliceReader.java   |   36 +-
 .../org/apache/lucene/index/ByteSliceWriter.java   |   22 +-
 .../java/org/apache/lucene/index/CheckIndex.java   | 2289 ++++++++----
 .../java/org/apache/lucene/index/CodecReader.java  |   98 +-
 .../org/apache/lucene/index/CompositeReader.java   |   84 +-
 .../lucene/index/CompositeReaderContext.java       |   65 +-
 .../lucene/index/ConcurrentMergeScheduler.java     |  321 +-
 .../apache/lucene/index/CorruptIndexException.java |   21 +-
 .../org/apache/lucene/index/DirectoryReader.java   |  342 +-
 .../java/org/apache/lucene/index/DocIDMerger.java  |   53 +-
 .../java/org/apache/lucene/index/DocValues.java    |  196 +-
 .../apache/lucene/index/DocValuesFieldUpdates.java |  171 +-
 .../org/apache/lucene/index/DocValuesIterator.java |   14 +-
 .../apache/lucene/index/DocValuesLeafReader.java   |    1 -
 .../org/apache/lucene/index/DocValuesType.java     |   45 +-
 .../org/apache/lucene/index/DocValuesUpdate.java   |   42 +-
 .../org/apache/lucene/index/DocValuesWriter.java   |    6 +-
 .../org/apache/lucene/index/DocsWithFieldSet.java  |   13 +-
 .../org/apache/lucene/index/DocumentsWriter.java   |  345 +-
 .../lucene/index/DocumentsWriterDeleteQueue.java   |  196 +-
 .../lucene/index/DocumentsWriterFlushControl.java  |  388 +-
 .../lucene/index/DocumentsWriterFlushQueue.java    |   32 +-
 .../lucene/index/DocumentsWriterPerThread.java     |  318 +-
 .../lucene/index/DocumentsWriterPerThreadPool.java |   74 +-
 .../lucene/index/DocumentsWriterStallControl.java  |   57 +-
 .../lucene/index/EmptyDocValuesProducer.java       |    8 +-
 .../lucene/index/ExitableDirectoryReader.java      |  464 +--
 .../java/org/apache/lucene/index/FieldInfo.java    |  368 +-
 .../java/org/apache/lucene/index/FieldInfos.java   |  603 +++-
 .../org/apache/lucene/index/FieldInvertState.java  |   82 +-
 .../org/apache/lucene/index/FieldTermIterator.java |   23 +-
 .../apache/lucene/index/FieldUpdatesBuffer.java    |  139 +-
 .../src/java/org/apache/lucene/index/Fields.java   |   31 +-
 .../apache/lucene/index/FilterBinaryDocValues.java |   13 +-
 .../org/apache/lucene/index/FilterCodecReader.java |   38 +-
 .../apache/lucene/index/FilterDirectoryReader.java |   47 +-
 .../org/apache/lucene/index/FilterLeafReader.java  |   71 +-
 .../org/apache/lucene/index/FilterMergePolicy.java |   30 +-
 .../lucene/index/FilterNumericDocValues.java       |   12 +-
 .../apache/lucene/index/FilterSortedDocValues.java |    7 +-
 .../lucene/index/FilterSortedNumericDocValues.java |    7 +-
 .../lucene/index/FilterSortedSetDocValues.java     |    7 +-
 .../org/apache/lucene/index/FilteredTermsEnum.java |  165 +-
 .../lucene/index/FlushByRamOrCountsPolicy.java     |  105 +-
 .../java/org/apache/lucene/index/FlushPolicy.java  |   91 +-
 .../org/apache/lucene/index/FreqProxFields.java    |   43 +-
 .../apache/lucene/index/FreqProxTermsWriter.java   |  104 +-
 .../lucene/index/FreqProxTermsWriterPerField.java  |   77 +-
 .../apache/lucene/index/FrozenBufferedUpdates.java |  205 +-
 .../src/java/org/apache/lucene/index/Impact.java   |   17 +-
 .../src/java/org/apache/lucene/index/Impacts.java  |   28 +-
 .../java/org/apache/lucene/index/ImpactsEnum.java  |    5 +-
 .../org/apache/lucene/index/ImpactsSource.java     |   34 +-
 .../java/org/apache/lucene/index/IndexCommit.java  |   96 +-
 .../apache/lucene/index/IndexDeletionPolicy.java   |  106 +-
 .../org/apache/lucene/index/IndexFileDeleter.java  |  357 +-
 .../org/apache/lucene/index/IndexFileNames.java    |  118 +-
 .../lucene/index/IndexFormatTooNewException.java   |   67 +-
 .../lucene/index/IndexFormatTooOldException.java   |  106 +-
 .../lucene/index/IndexNotFoundException.java       |    9 +-
 .../java/org/apache/lucene/index/IndexOptions.java |   36 +-
 .../java/org/apache/lucene/index/IndexReader.java  |  435 ++-
 .../apache/lucene/index/IndexReaderContext.java    |   43 +-
 .../java/org/apache/lucene/index/IndexSorter.java  |  225 +-
 .../org/apache/lucene/index/IndexUpgrader.java     |  133 +-
 .../java/org/apache/lucene/index/IndexWriter.java  | 3726 +++++++++++---------
 .../org/apache/lucene/index/IndexWriterConfig.java |  363 +-
 .../org/apache/lucene/index/IndexableField.java    |   37 +-
 .../apache/lucene/index/IndexableFieldType.java    |  108 +-
 .../org/apache/lucene/index/IndexingChain.java     |  519 ++-
 .../index/KeepOnlyLastCommitDeletionPolicy.java    |   22 +-
 .../org/apache/lucene/index/KnnGraphValues.java    |   45 +-
 .../java/org/apache/lucene/index/LeafMetaData.java |   24 +-
 .../java/org/apache/lucene/index/LeafReader.java   |  204 +-
 .../org/apache/lucene/index/LeafReaderContext.java |   30 +-
 .../apache/lucene/index/LiveIndexWriterConfig.java |  302 +-
 .../lucene/index/LogByteSizeMergePolicy.java       |  119 +-
 .../org/apache/lucene/index/LogDocMergePolicy.java |   40 +-
 .../org/apache/lucene/index/LogMergePolicy.java    |  383 +-
 .../org/apache/lucene/index/MappedMultiFields.java |   27 +-
 .../lucene/index/MappingMultiPostingsEnum.java     |   35 +-
 .../java/org/apache/lucene/index/MergePolicy.java  |  559 ++-
 .../org/apache/lucene/index/MergeRateLimiter.java  |   47 +-
 .../apache/lucene/index/MergeReaderWrapper.java    |   21 +-
 .../org/apache/lucene/index/MergeScheduler.java    |   62 +-
 .../java/org/apache/lucene/index/MergeState.java   |   93 +-
 .../java/org/apache/lucene/index/MergeTrigger.java |   34 +-
 .../java/org/apache/lucene/index/MultiBits.java    |   49 +-
 .../org/apache/lucene/index/MultiDocValues.java    |  261 +-
 .../java/org/apache/lucene/index/MultiFields.java  |   41 +-
 .../org/apache/lucene/index/MultiLeafReader.java   |    8 +-
 .../org/apache/lucene/index/MultiPostingsEnum.java |   55 +-
 .../java/org/apache/lucene/index/MultiReader.java  |   55 +-
 .../java/org/apache/lucene/index/MultiSorter.java  |   85 +-
 .../java/org/apache/lucene/index/MultiTerms.java   |   86 +-
 .../org/apache/lucene/index/MultiTermsEnum.java    |   80 +-
 .../org/apache/lucene/index/NoDeletionPolicy.java  |   10 +-
 .../org/apache/lucene/index/NoMergePolicy.java     |   42 +-
 .../org/apache/lucene/index/NoMergeScheduler.java  |   17 +-
 .../org/apache/lucene/index/NormValuesWriter.java  |   79 +-
 .../org/apache/lucene/index/NumericDocValues.java  |   16 +-
 .../lucene/index/NumericDocValuesFieldUpdates.java |   25 +-
 .../lucene/index/NumericDocValuesWriter.java       |   54 +-
 .../lucene/index/OneMergeWrappingMergePolicy.java  |   23 +-
 .../java/org/apache/lucene/index/OrdTermState.java |   11 +-
 .../java/org/apache/lucene/index/OrdinalMap.java   |  157 +-
 .../lucene/index/ParallelCompositeReader.java      |  101 +-
 .../apache/lucene/index/ParallelLeafReader.java    |  151 +-
 .../apache/lucene/index/ParallelPostingsArray.java |    2 +-
 .../org/apache/lucene/index/PendingDeletes.java    |  116 +-
 .../apache/lucene/index/PendingSoftDeletes.java    |   72 +-
 .../index/PersistentSnapshotDeletionPolicy.java    |  133 +-
 .../java/org/apache/lucene/index/PointValues.java  |  199 +-
 .../org/apache/lucene/index/PointValuesWriter.java |  268 +-
 .../java/org/apache/lucene/index/PostingsEnum.java |   88 +-
 .../org/apache/lucene/index/PrefixCodedTerms.java  |   27 +-
 .../java/org/apache/lucene/index/QueryTimeout.java |   17 +-
 .../org/apache/lucene/index/QueryTimeoutImpl.java  |   37 +-
 .../lucene/index/RandomAccessVectorValues.java     |   25 +-
 .../index/RandomAccessVectorValuesProducer.java    |    7 +-
 .../org/apache/lucene/index/ReaderManager.java     |   69 +-
 .../java/org/apache/lucene/index/ReaderPool.java   |  173 +-
 .../java/org/apache/lucene/index/ReaderSlice.java  |    3 +-
 .../java/org/apache/lucene/index/ReaderUtil.java   |   34 +-
 .../org/apache/lucene/index/ReadersAndUpdates.java |  383 +-
 .../org/apache/lucene/index/SegmentCommitInfo.java |  182 +-
 .../apache/lucene/index/SegmentCoreReaders.java    |  122 +-
 .../org/apache/lucene/index/SegmentDocValues.java  |   36 +-
 .../lucene/index/SegmentDocValuesProducer.java     |   32 +-
 .../java/org/apache/lucene/index/SegmentInfo.java  |  152 +-
 .../java/org/apache/lucene/index/SegmentInfos.java |  627 ++--
 .../org/apache/lucene/index/SegmentMerger.java     |  100 +-
 .../org/apache/lucene/index/SegmentReadState.java  |   41 +-
 .../org/apache/lucene/index/SegmentReader.java     |  157 +-
 .../org/apache/lucene/index/SegmentWriteState.java |   81 +-
 .../apache/lucene/index/SerialMergeScheduler.java  |   18 +-
 .../lucene/index/SimpleMergedSegmentWarmer.java    |   40 +-
 .../org/apache/lucene/index/SingleTermsEnum.java   |   19 +-
 .../index/SingletonSortedNumericDocValues.java     |   15 +-
 .../lucene/index/SingletonSortedSetDocValues.java  |   12 +-
 .../lucene/index/SlowCodecReaderWrapper.java       |   43 +-
 .../org/apache/lucene/index/SlowImpactsEnum.java   |   39 +-
 .../lucene/index/SnapshotDeletionPolicy.java       |  103 +-
 .../index/SoftDeletesDirectoryReaderWrapper.java   |  120 +-
 .../index/SoftDeletesRetentionMergePolicy.java     |  102 +-
 .../org/apache/lucene/index/SortFieldProvider.java |   55 +-
 .../org/apache/lucene/index/SortedDocValues.java   |   73 +-
 .../lucene/index/SortedDocValuesTermsEnum.java     |   11 +-
 .../apache/lucene/index/SortedDocValuesWriter.java |  109 +-
 .../lucene/index/SortedNumericDocValues.java       |   27 +-
 .../lucene/index/SortedNumericDocValuesWriter.java |   93 +-
 .../apache/lucene/index/SortedSetDocValues.java    |   75 +-
 .../lucene/index/SortedSetDocValuesTermsEnum.java  |   13 +-
 .../lucene/index/SortedSetDocValuesWriter.java     |  136 +-
 .../src/java/org/apache/lucene/index/Sorter.java   |  102 +-
 .../apache/lucene/index/SortingCodecReader.java    |  160 +-
 .../lucene/index/SortingStoredFieldsConsumer.java  |   87 +-
 .../lucene/index/SortingTermVectorsConsumer.java   |   69 +-
 .../lucene/index/StandardDirectoryReader.java      |  208 +-
 .../apache/lucene/index/StoredFieldVisitor.java    |   74 +-
 .../apache/lucene/index/StoredFieldsConsumer.java  |    7 +-
 .../src/java/org/apache/lucene/index/Term.java     |  144 +-
 .../java/org/apache/lucene/index/TermState.java    |   20 +-
 .../java/org/apache/lucene/index/TermStates.java   |  132 +-
 .../apache/lucene/index/TermVectorsConsumer.java   |   31 +-
 .../lucene/index/TermVectorsConsumerPerField.java  |   99 +-
 .../src/java/org/apache/lucene/index/Terms.java    |  125 +-
 .../java/org/apache/lucene/index/TermsEnum.java    |  313 +-
 .../java/org/apache/lucene/index/TermsHash.java    |   34 +-
 .../org/apache/lucene/index/TermsHashPerField.java |  104 +-
 .../org/apache/lucene/index/TieredMergePolicy.java |  500 ++-
 .../index/TrackingTmpOutputDirectoryWrapper.java   |    3 +-
 .../org/apache/lucene/index/TwoPhaseCommit.java    |   29 +-
 .../apache/lucene/index/TwoPhaseCommitTool.java    |   54 +-
 .../lucene/index/UpgradeIndexMergePolicy.java      |  108 +-
 .../java/org/apache/lucene/index/VectorValues.java |  165 +-
 .../apache/lucene/index/VectorValuesWriter.java    |   49 +-
 .../java/org/apache/lucene/index/package-info.java |  365 +-
 .../src/java/org/apache/lucene/package-info.java   |    6 +-
 .../org/apache/lucene/search/AutomatonQuery.java   |  101 +-
 .../org/apache/lucene/search/BlendedTermQuery.java |  139 +-
 .../lucene/search/BlockMaxConjunctionScorer.java   |   20 +-
 .../org/apache/lucene/search/BlockMaxDISI.java     |   10 +-
 .../lucene/search/Boolean2ScorerSupplier.java      |  104 +-
 .../org/apache/lucene/search/BooleanClause.java    |   75 +-
 .../org/apache/lucene/search/BooleanQuery.java     |  199 +-
 .../org/apache/lucene/search/BooleanScorer.java    |   74 +-
 .../org/apache/lucene/search/BooleanWeight.java    |  102 +-
 .../org/apache/lucene/search/BoostAttribute.java   |   22 +-
 .../apache/lucene/search/BoostAttributeImpl.java   |    9 +-
 .../java/org/apache/lucene/search/BoostQuery.java  |   39 +-
 .../java/org/apache/lucene/search/BulkScorer.java  |   71 +-
 .../org/apache/lucene/search/CachingCollector.java |  128 +-
 .../apache/lucene/search/CollectionStatistics.java |  124 +-
 .../search/CollectionTerminatedException.java      |   16 +-
 .../java/org/apache/lucene/search/Collector.java   |   61 +-
 .../org/apache/lucene/search/CollectorManager.java |   35 +-
 .../org/apache/lucene/search/ConjunctionDISI.java  |  156 +-
 .../apache/lucene/search/ConjunctionScorer.java    |    9 +-
 .../apache/lucene/search/ConstantScoreQuery.java   |   49 +-
 .../apache/lucene/search/ConstantScoreScorer.java  |   65 +-
 .../apache/lucene/search/ConstantScoreWeight.java  |   12 +-
 .../search/ControlledRealTimeReopenThread.java     |  113 +-
 .../apache/lucene/search/DisiPriorityQueue.java    |   15 +-
 .../java/org/apache/lucene/search/DisiWrapper.java |    7 +-
 .../search/DisjunctionDISIApproximation.java       |    9 +-
 .../lucene/search/DisjunctionMatchesIterator.java  |   68 +-
 .../apache/lucene/search/DisjunctionMaxQuery.java  |  123 +-
 .../apache/lucene/search/DisjunctionMaxScorer.java |   26 +-
 .../DisjunctionScoreBlockBoundaryPropagator.java   |   40 +-
 .../apache/lucene/search/DisjunctionScorer.java    |   44 +-
 .../apache/lucene/search/DisjunctionSumScorer.java |   14 +-
 .../java/org/apache/lucene/search/DocIdSet.java    |   73 +-
 .../org/apache/lucene/search/DocIdSetIterator.java |  112 +-
 .../lucene/search/DocValuesFieldExistsQuery.java   |   20 +-
 .../lucene/search/DocValuesRewriteMethod.java      |  160 +-
 .../org/apache/lucene/search/DoubleValues.java     |   38 +-
 .../apache/lucene/search/DoubleValuesSource.java   |  237 +-
 .../apache/lucene/search/ExactPhraseMatcher.java   |   68 +-
 .../java/org/apache/lucene/search/Explanation.java |   63 +-
 .../org/apache/lucene/search/FieldComparator.java  |  250 +-
 .../lucene/search/FieldComparatorSource.java       |   11 +-
 .../java/org/apache/lucene/search/FieldDoc.java    |   35 +-
 .../apache/lucene/search/FieldValueHitQueue.java   |   81 +-
 .../org/apache/lucene/search/FilterCollector.java  |    2 -
 .../apache/lucene/search/FilterLeafCollector.java  |    2 -
 .../lucene/search/FilterMatchesIterator.java       |    9 +-
 .../org/apache/lucene/search/FilterScorable.java   |   11 +-
 .../org/apache/lucene/search/FilterScorer.java     |   22 +-
 .../org/apache/lucene/search/FilterWeight.java     |   25 +-
 .../lucene/search/FilteredDocIdSetIterator.java    |   12 +-
 .../lucene/search/FuzzyAutomatonBuilder.java       |   14 +-
 .../java/org/apache/lucene/search/FuzzyQuery.java  |  170 +-
 .../org/apache/lucene/search/FuzzyTermsEnum.java   |  171 +-
 .../java/org/apache/lucene/search/HitQueue.java    |   65 +-
 .../apache/lucene/search/HitsThresholdChecker.java |   23 +-
 .../java/org/apache/lucene/search/ImpactsDISI.java |   25 +-
 .../lucene/search/IndexOrDocValuesQuery.java       |   54 +-
 .../org/apache/lucene/search/IndexSearcher.java    |  688 ++--
 .../org/apache/lucene/search/LRUQueryCache.java    |  279 +-
 .../org/apache/lucene/search/LeafCollector.java    |   74 +-
 .../apache/lucene/search/LeafFieldComparator.java  |  119 +-
 .../org/apache/lucene/search/LeafSimScorer.java    |   31 +-
 .../org/apache/lucene/search/LiveFieldValues.java  |   50 +-
 .../java/org/apache/lucene/search/LongValues.java  |   10 +-
 .../org/apache/lucene/search/LongValuesSource.java |   65 +-
 .../apache/lucene/search/MatchAllDocsQuery.java    |   14 +-
 .../org/apache/lucene/search/MatchNoDocsQuery.java |   12 +-
 .../src/java/org/apache/lucene/search/Matches.java |   19 +-
 .../org/apache/lucene/search/MatchesIterator.java  |   36 +-
 .../org/apache/lucene/search/MatchesUtils.java     |   73 +-
 .../search/MaxNonCompetitiveBoostAttribute.java    |   34 +-
 .../MaxNonCompetitiveBoostAttributeImpl.java       |   17 +-
 .../apache/lucene/search/MaxScoreAccumulator.java  |   14 +-
 .../org/apache/lucene/search/MaxScoreCache.java    |   30 +-
 .../lucene/search/MaxScoreSumPropagator.java       |   22 +-
 .../lucene/search/MinShouldMatchSumScorer.java     |  182 +-
 .../org/apache/lucene/search/MultiCollector.java   |   69 +-
 .../lucene/search/MultiCollectorManager.java       |   30 +-
 .../lucene/search/MultiLeafFieldComparator.java    |    7 +-
 .../org/apache/lucene/search/MultiPhraseQuery.java |  193 +-
 .../org/apache/lucene/search/MultiTermQuery.java   |  317 +-
 .../search/MultiTermQueryConstantScoreWrapper.java |   66 +-
 .../java/org/apache/lucene/search/Multiset.java    |   10 +-
 .../org/apache/lucene/search/NGramPhraseQuery.java |   32 +-
 .../org/apache/lucene/search/NamedMatches.java     |   32 +-
 .../lucene/search/NormsFieldExistsQuery.java       |   16 +-
 .../org/apache/lucene/search/PhraseMatcher.java    |   47 +-
 .../org/apache/lucene/search/PhrasePositions.java  |   41 +-
 .../java/org/apache/lucene/search/PhraseQuery.java |  244 +-
 .../java/org/apache/lucene/search/PhraseQueue.java |    5 +-
 .../org/apache/lucene/search/PhraseScorer.java     |    1 -
 .../org/apache/lucene/search/PhraseWeight.java     |  142 +-
 .../org/apache/lucene/search/PointInSetQuery.java  |  167 +-
 .../org/apache/lucene/search/PointRangeQuery.java  |  184 +-
 .../lucene/search/PositiveScoresOnlyCollector.java |   12 +-
 .../java/org/apache/lucene/search/PrefixQuery.java |   23 +-
 .../src/java/org/apache/lucene/search/Query.java   |  109 +-
 .../java/org/apache/lucene/search/QueryCache.java  |    9 +-
 .../apache/lucene/search/QueryCachingPolicy.java   |   23 +-
 .../org/apache/lucene/search/QueryRescorer.java    |  101 +-
 .../org/apache/lucene/search/QueryVisitor.java     |   35 +-
 .../lucene/search/QueueSizeBasedExecutor.java      |   10 +-
 .../org/apache/lucene/search/ReferenceManager.java |  215 +-
 .../java/org/apache/lucene/search/RegexpQuery.java |  117 +-
 .../apache/lucene/search/ReqExclBulkScorer.java    |    3 -
 .../org/apache/lucene/search/ReqExclScorer.java    |   33 +-
 .../org/apache/lucene/search/ReqOptSumScorer.java  |  269 +-
 .../java/org/apache/lucene/search/Rescorer.java    |   42 +-
 .../java/org/apache/lucene/search/Scorable.java    |   47 +-
 .../java/org/apache/lucene/search/ScoreAndDoc.java |    7 +-
 .../lucene/search/ScoreCachingWrappingScorer.java  |   17 +-
 .../java/org/apache/lucene/search/ScoreDoc.java    |   13 +-
 .../java/org/apache/lucene/search/ScoreMode.java   |   36 +-
 .../src/java/org/apache/lucene/search/Scorer.java  |   72 +-
 .../org/apache/lucene/search/ScorerSupplier.java   |   25 +-
 .../org/apache/lucene/search/ScoringRewrite.java   |  181 +-
 .../org/apache/lucene/search/SearcherFactory.java  |   35 +-
 .../lucene/search/SearcherLifetimeManager.java     |  168 +-
 .../org/apache/lucene/search/SearcherManager.java  |  153 +-
 .../org/apache/lucene/search/SegmentCacheable.java |   19 +-
 .../org/apache/lucene/search/SimpleCollector.java  |    3 -
 .../lucene/search/SimpleFieldComparator.java       |    5 +-
 .../org/apache/lucene/search/SliceExecutor.java    |   11 +-
 .../apache/lucene/search/SloppyPhraseMatcher.java  |  332 +-
 .../src/java/org/apache/lucene/search/Sort.java    |  146 +-
 .../java/org/apache/lucene/search/SortField.java   |  365 +-
 .../org/apache/lucene/search/SortRescorer.java     |   29 +-
 .../lucene/search/SortedNumericSelector.java       |   65 +-
 .../lucene/search/SortedNumericSortField.java      |  134 +-
 .../apache/lucene/search/SortedSetSelector.java    |   98 +-
 .../apache/lucene/search/SortedSetSortField.java   |   81 +-
 .../org/apache/lucene/search/SynonymQuery.java     |  161 +-
 .../lucene/search/TermCollectingRewrite.java       |   37 +-
 .../org/apache/lucene/search/TermInSetQuery.java   |   68 +-
 .../apache/lucene/search/TermMatchesIterator.java  |    9 +-
 .../java/org/apache/lucene/search/TermQuery.java   |   94 +-
 .../org/apache/lucene/search/TermRangeQuery.java   |  130 +-
 .../java/org/apache/lucene/search/TermScorer.java  |   19 +-
 .../org/apache/lucene/search/TermStatistics.java   |   92 +-
 .../lucene/search/TimeLimitingCollector.java       |  176 +-
 .../src/java/org/apache/lucene/search/TopDocs.java |  136 +-
 .../org/apache/lucene/search/TopDocsCollector.java |  123 +-
 .../apache/lucene/search/TopFieldCollector.java    |  214 +-
 .../org/apache/lucene/search/TopFieldDocs.java     |   24 +-
 .../apache/lucene/search/TopScoreDocCollector.java |  111 +-
 .../org/apache/lucene/search/TopTermsRewrite.java  |  244 +-
 .../lucene/search/TotalHitCountCollector.java      |    6 +-
 .../java/org/apache/lucene/search/TotalHits.java   |   32 +-
 .../org/apache/lucene/search/TwoPhaseIterator.java |   51 +-
 .../search/UsageTrackingQueryCachingPolicy.java    |   49 +-
 .../java/org/apache/lucene/search/WANDScorer.java  |  147 +-
 .../src/java/org/apache/lucene/search/Weight.java  |  177 +-
 .../org/apache/lucene/search/WildcardQuery.java    |   59 +-
 .../lucene/search/comparators/DocComparator.java   |   25 +-
 .../search/comparators/DoubleComparator.java       |   15 +-
 .../lucene/search/comparators/FloatComparator.java |   15 +-
 .../lucene/search/comparators/IntComparator.java   |   15 +-
 .../lucene/search/comparators/LongComparator.java  |   15 +-
 .../lucene/search/comparators/MinDocIterator.java  |    7 +-
 .../search/comparators/NumericComparator.java      |  171 +-
 .../lucene/search/comparators/package-info.java    |    5 +-
 .../org/apache/lucene/search/package-info.java     |  839 +++--
 .../lucene/search/similarities/AfterEffect.java    |   31 +-
 .../lucene/search/similarities/AfterEffectB.java   |   17 +-
 .../lucene/search/similarities/AfterEffectL.java   |    8 +-
 .../lucene/search/similarities/Axiomatic.java      |  160 +-
 .../lucene/search/similarities/AxiomaticF1EXP.java |   82 +-
 .../lucene/search/similarities/AxiomaticF1LOG.java |   76 +-
 .../lucene/search/similarities/AxiomaticF2EXP.java |   85 +-
 .../lucene/search/similarities/AxiomaticF2LOG.java |   80 +-
 .../lucene/search/similarities/AxiomaticF3EXP.java |   80 +-
 .../lucene/search/similarities/AxiomaticF3LOG.java |   73 +-
 .../lucene/search/similarities/BM25Similarity.java |  140 +-
 .../lucene/search/similarities/BasicModel.java     |   37 +-
 .../lucene/search/similarities/BasicModelG.java    |   34 +-
 .../lucene/search/similarities/BasicModelIF.java   |   23 +-
 .../lucene/search/similarities/BasicModelIn.java   |   18 +-
 .../lucene/search/similarities/BasicModelIne.java  |   27 +-
 .../lucene/search/similarities/BasicStats.java     |   32 +-
 .../search/similarities/BooleanSimilarity.java     |   18 +-
 .../search/similarities/ClassicSimilarity.java     |   23 +-
 .../lucene/search/similarities/DFISimilarity.java  |   85 +-
 .../lucene/search/similarities/DFRSimilarity.java  |  146 +-
 .../lucene/search/similarities/Distribution.java   |   30 +-
 .../lucene/search/similarities/DistributionLL.java |   10 +-
 .../search/similarities/DistributionSPL.java       |   19 +-
 .../lucene/search/similarities/IBSimilarity.java   |  149 +-
 .../lucene/search/similarities/Independence.java   |   21 +-
 .../similarities/IndependenceChiSquared.java       |   15 +-
 .../search/similarities/IndependenceSaturated.java |   13 +-
 .../similarities/IndependenceStandardized.java     |   16 +-
 .../search/similarities/LMDirichletSimilarity.java |   93 +-
 .../similarities/LMJelinekMercerSimilarity.java    |   70 +-
 .../lucene/search/similarities/LMSimilarity.java   |  101 +-
 .../apache/lucene/search/similarities/Lambda.java  |   21 +-
 .../lucene/search/similarities/LambdaDF.java       |   17 +-
 .../lucene/search/similarities/LambdaTTF.java      |   18 +-
 .../search/similarities/MultiSimilarity.java       |   25 +-
 .../lucene/search/similarities/Normalization.java  |   52 +-
 .../search/similarities/NormalizationH1.java       |   47 +-
 .../search/similarities/NormalizationH2.java       |   48 +-
 .../search/similarities/NormalizationH3.java       |   33 +-
 .../lucene/search/similarities/NormalizationZ.java |   28 +-
 .../similarities/PerFieldSimilarityWrapper.java    |   26 +-
 .../lucene/search/similarities/Similarity.java     |  205 +-
 .../lucene/search/similarities/SimilarityBase.java |  151 +-
 .../search/similarities/TFIDFSimilarity.java       |  485 ++-
 .../lucene/search/similarities/package-info.java   |  191 +-
 .../lucene/search/spans/ConjunctionSpans.java      |   24 +-
 .../apache/lucene/search/spans/ContainSpans.java   |   18 +-
 .../lucene/search/spans/FieldMaskingSpanQuery.java |   68 +-
 .../apache/lucene/search/spans/FilterSpans.java    |   57 +-
 .../lucene/search/spans/NearSpansOrdered.java      |   46 +-
 .../lucene/search/spans/NearSpansUnordered.java    |   40 +-
 .../apache/lucene/search/spans/SpanBoostQuery.java |   33 +-
 .../apache/lucene/search/spans/SpanCollector.java  |   16 +-
 .../lucene/search/spans/SpanContainQuery.java      |   35 +-
 .../lucene/search/spans/SpanContainingQuery.java   |   35 +-
 .../apache/lucene/search/spans/SpanFirstQuery.java |   27 +-
 .../search/spans/SpanMultiTermQueryWrapper.java    |  200 +-
 .../apache/lucene/search/spans/SpanNearQuery.java  |  131 +-
 .../apache/lucene/search/spans/SpanNotQuery.java   |  107 +-
 .../apache/lucene/search/spans/SpanOrQuery.java    |   59 +-
 .../search/spans/SpanPositionCheckQuery.java       |   63 +-
 .../lucene/search/spans/SpanPositionQueue.java     |    8 +-
 .../search/spans/SpanPositionRangeQuery.java       |   33 +-
 .../org/apache/lucene/search/spans/SpanQuery.java  |   11 +-
 .../org/apache/lucene/search/spans/SpanScorer.java |   39 +-
 .../apache/lucene/search/spans/SpanTermQuery.java  |   98 +-
 .../org/apache/lucene/search/spans/SpanWeight.java |  242 +-
 .../lucene/search/spans/SpanWithinQuery.java       |   36 +-
 .../java/org/apache/lucene/search/spans/Spans.java |   62 +-
 .../org/apache/lucene/search/spans/TermSpans.java  |   28 +-
 .../apache/lucene/search/spans/package-info.java   |   97 +-
 .../lucene/store/AlreadyClosedException.java       |    7 +-
 .../org/apache/lucene/store/BaseDirectory.java     |    9 +-
 .../org/apache/lucene/store/BufferedChecksum.java  |   18 +-
 .../lucene/store/BufferedChecksumIndexInput.java   |    9 +-
 .../apache/lucene/store/BufferedIndexInput.java    |  146 +-
 .../apache/lucene/store/ByteArrayDataInput.java    |   41 +-
 .../apache/lucene/store/ByteArrayDataOutput.java   |   11 +-
 .../org/apache/lucene/store/ByteBufferGuard.java   |   61 +-
 .../apache/lucene/store/ByteBufferIndexInput.java  |  162 +-
 .../apache/lucene/store/ByteBuffersDataInput.java  |   93 +-
 .../apache/lucene/store/ByteBuffersDataOutput.java |  232 +-
 .../apache/lucene/store/ByteBuffersDirectory.java  |  141 +-
 .../apache/lucene/store/ByteBuffersIndexInput.java |   27 +-
 .../lucene/store/ByteBuffersIndexOutput.java       |   25 +-
 .../apache/lucene/store/ChecksumIndexInput.java    |   25 +-
 .../java/org/apache/lucene/store/DataInput.java    |  153 +-
 .../java/org/apache/lucene/store/DataOutput.java   |  174 +-
 .../java/org/apache/lucene/store/Directory.java    |  106 +-
 .../java/org/apache/lucene/store/FSDirectory.java  |  260 +-
 .../org/apache/lucene/store/FSLockFactory.java     |   26 +-
 .../apache/lucene/store/FileSwitchDirectory.java   |   60 +-
 .../org/apache/lucene/store/FilterDirectory.java   |   35 +-
 .../java/org/apache/lucene/store/FlushInfo.java    |   45 +-
 .../java/org/apache/lucene/store/IOContext.java    |   78 +-
 .../java/org/apache/lucene/store/IndexInput.java   |  101 +-
 .../java/org/apache/lucene/store/IndexOutput.java  |   26 +-
 .../apache/lucene/store/InputStreamDataInput.java  |   13 +-
 .../src/java/org/apache/lucene/store/Lock.java     |   42 +-
 .../java/org/apache/lucene/store/LockFactory.java  |   35 +-
 .../lucene/store/LockObtainFailedException.java    |    9 +-
 .../lucene/store/LockReleaseFailedException.java   |    6 +-
 .../org/apache/lucene/store/LockStressTest.java    |   78 +-
 .../store/LockValidatingDirectoryWrapper.java      |   10 +-
 .../org/apache/lucene/store/LockVerifyServer.java  |  138 +-
 .../org/apache/lucene/store/MMapDirectory.java     |  355 +-
 .../java/org/apache/lucene/store/MergeInfo.java    |   64 +-
 .../org/apache/lucene/store/NIOFSDirectory.java    |  127 +-
 .../apache/lucene/store/NRTCachingDirectory.java   |  107 +-
 .../apache/lucene/store/NativeFSLockFactory.java   |   99 +-
 .../org/apache/lucene/store/NoLockFactory.java     |   18 +-
 .../lucene/store/OutputStreamDataOutput.java       |   11 +-
 .../lucene/store/OutputStreamIndexOutput.java      |   13 +-
 .../org/apache/lucene/store/RandomAccessInput.java |   22 +-
 .../lucene/store/RateLimitedIndexOutput.java       |   20 +-
 .../java/org/apache/lucene/store/RateLimiter.java  |   78 +-
 .../apache/lucene/store/SimpleFSLockFactory.java   |   75 +-
 .../lucene/store/SingleInstanceLockFactory.java    |   13 +-
 .../apache/lucene/store/SleepingLockWrapper.java   |   60 +-
 .../lucene/store/TrackingDirectoryWrapper.java     |    7 +-
 .../apache/lucene/store/VerifyingLockFactory.java  |   17 +-
 .../java/org/apache/lucene/store/package-info.java |    4 +-
 .../java/org/apache/lucene/util/Accountable.java   |   14 +-
 .../java/org/apache/lucene/util/Accountables.java  |  115 +-
 .../lucene/util/ArrayInPlaceMergeSorter.java       |    3 +-
 .../org/apache/lucene/util/ArrayIntroSorter.java   |    3 +-
 .../org/apache/lucene/util/ArrayTimSorter.java     |    3 +-
 .../src/java/org/apache/lucene/util/ArrayUtil.java |  400 ++-
 .../src/java/org/apache/lucene/util/Attribute.java |    8 +-
 .../org/apache/lucene/util/AttributeFactory.java   |  122 +-
 .../java/org/apache/lucene/util/AttributeImpl.java |   89 +-
 .../org/apache/lucene/util/AttributeReflector.java |   11 +-
 .../org/apache/lucene/util/AttributeSource.java    |  354 +-
 .../java/org/apache/lucene/util/BitDocIdSet.java   |   14 +-
 .../src/java/org/apache/lucene/util/BitSet.java    |   45 +-
 .../org/apache/lucene/util/BitSetIterator.java     |   13 +-
 .../src/java/org/apache/lucene/util/BitUtil.java   |  107 +-
 .../core/src/java/org/apache/lucene/util/Bits.java |   29 +-
 .../java/org/apache/lucene/util/ByteBlockPool.java |  251 +-
 .../src/java/org/apache/lucene/util/BytesRef.java  |  137 +-
 .../java/org/apache/lucene/util/BytesRefArray.java |  106 +-
 .../org/apache/lucene/util/BytesRefBuilder.java    |   62 +-
 .../org/apache/lucene/util/BytesRefComparator.java |   27 +-
 .../java/org/apache/lucene/util/BytesRefHash.java  |  245 +-
 .../org/apache/lucene/util/BytesRefIterator.java   |   22 +-
 .../src/java/org/apache/lucene/util/CharsRef.java  |  115 +-
 .../org/apache/lucene/util/CharsRefBuilder.java    |   35 +-
 .../org/apache/lucene/util/ClassLoaderUtils.java   |   13 +-
 .../lucene/util/ClasspathResourceLoader.java       |   44 +-
 .../apache/lucene/util/CloseableThreadLocal.java   |   56 +-
 .../org/apache/lucene/util/CollectionUtil.java     |   54 +-
 .../org/apache/lucene/util/CommandLineUtil.java    |   75 +-
 .../src/java/org/apache/lucene/util/Constants.java |   45 +-
 .../src/java/org/apache/lucene/util/Counter.java   |   29 +-
 .../org/apache/lucene/util/DocIdSetBuilder.java    |   59 +-
 .../org/apache/lucene/util/FilterIterator.java     |   20 +-
 .../java/org/apache/lucene/util/FixedBitSet.java   |  205 +-
 .../src/java/org/apache/lucene/util/FixedBits.java |    9 +-
 .../lucene/util/FixedLengthBytesRefArray.java      |   65 +-
 .../lucene/util/FrequencyTrackingRingBuffer.java   |   57 +-
 .../java/org/apache/lucene/util/IOSupplier.java    |    3 +-
 .../src/java/org/apache/lucene/util/IOUtils.java   |  356 +-
 .../org/apache/lucene/util/InPlaceMergeSorter.java |   12 +-
 .../java/org/apache/lucene/util/InfoStream.java    |   54 +-
 .../org/apache/lucene/util/IntArrayDocIdSet.java   |   11 +-
 .../java/org/apache/lucene/util/IntBlockPool.java  |  218 +-
 .../java/org/apache/lucene/util/IntroSelector.java |   53 +-
 .../java/org/apache/lucene/util/IntroSorter.java   |   13 +-
 .../src/java/org/apache/lucene/util/IntsRef.java   |   89 +-
 .../org/apache/lucene/util/IntsRefBuilder.java     |   25 +-
 .../org/apache/lucene/util/LSBRadixSorter.java     |   12 +-
 .../java/org/apache/lucene/util/LongBitSet.java    |  197 +-
 .../src/java/org/apache/lucene/util/LongHeap.java  |   73 +-
 .../java/org/apache/lucene/util/LongValues.java    |   38 +-
 .../src/java/org/apache/lucene/util/LongsRef.java  |   92 +-
 .../org/apache/lucene/util/MSBRadixSorter.java     |   62 +-
 .../src/java/org/apache/lucene/util/MapOfSets.java |   30 +-
 .../src/java/org/apache/lucene/util/MathUtil.java  |   83 +-
 .../org/apache/lucene/util/MergedIterator.java     |   60 +-
 .../org/apache/lucene/util/NamedSPILoader.java     |   75 +-
 .../org/apache/lucene/util/NamedThreadFactory.java |   33 +-
 .../java/org/apache/lucene/util/NotDocIdSet.java   |   13 +-
 .../java/org/apache/lucene/util/NumericUtils.java  |  157 +-
 .../java/org/apache/lucene/util/OfflineSorter.java |  306 +-
 .../java/org/apache/lucene/util/PagedBytes.java    |  127 +-
 .../apache/lucene/util/PrintStreamInfoStream.java  |   29 +-
 .../java/org/apache/lucene/util/PriorityQueue.java |  125 +-
 .../java/org/apache/lucene/util/QueryBuilder.java  |  347 +-
 .../java/org/apache/lucene/util/RadixSelector.java |   63 +-
 .../org/apache/lucene/util/RamUsageEstimator.java  |  297 +-
 .../lucene/util/RecyclingByteBlockAllocator.java   |   68 +-
 .../lucene/util/RecyclingIntBlockAllocator.java    |   70 +-
 .../src/java/org/apache/lucene/util/RefCount.java  |   37 +-
 .../org/apache/lucene/util/ResourceLoader.java     |   24 +-
 .../apache/lucene/util/ResourceLoaderAware.java    |   10 +-
 .../org/apache/lucene/util/RoaringDocIdSet.java    |   41 +-
 .../java/org/apache/lucene/util/RollingBuffer.java |   63 +-
 .../lucene/util/SameThreadExecutorService.java     |    6 +-
 .../src/java/org/apache/lucene/util/Selector.java  |   17 +-
 .../org/apache/lucene/util/SentinelIntSet.java     |   74 +-
 .../src/java/org/apache/lucene/util/SetOnce.java   |   29 +-
 .../java/org/apache/lucene/util/SloppyMath.java    |  226 +-
 .../java/org/apache/lucene/util/SmallFloat.java    |   76 +-
 .../src/java/org/apache/lucene/util/Sorter.java    |   55 +-
 .../org/apache/lucene/util/SparseFixedBitSet.java  |   58 +-
 .../apache/lucene/util/StrictStringTokenizer.java  |   13 +-
 .../java/org/apache/lucene/util/StringHelper.java  |  171 +-
 .../apache/lucene/util/StringMSBRadixSorter.java   |    3 +-
 .../org/apache/lucene/util/SuppressForbidden.java  |    6 +-
 .../lucene/util/ThreadInterruptedException.java    |    6 +-
 .../src/java/org/apache/lucene/util/TimSorter.java |   67 +-
 .../java/org/apache/lucene/util/ToStringUtils.java |   11 +-
 .../java/org/apache/lucene/util/UnicodeUtil.java   |  419 +--
 .../java/org/apache/lucene/util/VectorUtil.java    |   27 +-
 .../src/java/org/apache/lucene/util/Version.java   |  196 +-
 .../java/org/apache/lucene/util/VirtualMethod.java |  119 +-
 .../org/apache/lucene/util/WeakIdentityMap.java    |  164 +-
 .../org/apache/lucene/util/automaton/Automata.java |  215 +-
 .../apache/lucene/util/automaton/Automaton.java    |  709 ++--
 .../lucene/util/automaton/AutomatonProvider.java   |   15 +-
 .../lucene/util/automaton/ByteRunAutomaton.java    |   10 +-
 .../util/automaton/CharacterRunAutomaton.java      |   30 +-
 .../lucene/util/automaton/CompiledAutomaton.java   |  239 +-
 .../automaton/DaciukMihovAutomatonBuilder.java     |  224 +-
 .../util/automaton/FiniteStringsIterator.java      |   73 +-
 .../org/apache/lucene/util/automaton/IntSet.java   |   46 +-
 .../util/automaton/Lev1ParametricDescription.java  |   75 +-
 .../util/automaton/Lev1TParametricDescription.java |   82 +-
 .../util/automaton/Lev2ParametricDescription.java  |  225 +-
 .../util/automaton/Lev2TParametricDescription.java |  296 +-
 .../lucene/util/automaton/LevenshteinAutomata.java |  252 +-
 .../automaton/LimitedFiniteStringsIterator.java    |   26 +-
 .../util/automaton/MinimizationOperations.java     |  111 +-
 .../apache/lucene/util/automaton/Operations.java   |  507 +--
 .../org/apache/lucene/util/automaton/RegExp.java   |  629 ++--
 .../apache/lucene/util/automaton/RunAutomaton.java |   84 +-
 .../apache/lucene/util/automaton/SortedIntSet.java |   56 +-
 .../apache/lucene/util/automaton/StatePair.java    |   28 +-
 .../TooComplexToDeterminizeException.java          |   33 +-
 .../apache/lucene/util/automaton/Transition.java   |   20 +-
 .../apache/lucene/util/automaton/UTF32ToUTF8.java  |  132 +-
 .../apache/lucene/util/automaton/package-info.java |   28 +-
 .../java/org/apache/lucene/util/bkd/BKDConfig.java |   36 +-
 .../apache/lucene/util/bkd/BKDRadixSelector.java   |  331 +-
 .../java/org/apache/lucene/util/bkd/BKDReader.java |  465 ++-
 .../java/org/apache/lucene/util/bkd/BKDWriter.java | 1461 +++++---
 .../org/apache/lucene/util/bkd/DocIdsWriter.java   |   31 +-
 .../apache/lucene/util/bkd/HeapPointReader.java    |   23 +-
 .../apache/lucene/util/bkd/HeapPointWriter.java    |   53 +-
 .../lucene/util/bkd/MutablePointsReaderUtils.java  |  108 +-
 .../apache/lucene/util/bkd/OfflinePointReader.java |   59 +-
 .../apache/lucene/util/bkd/OfflinePointWriter.java |   43 +-
 .../org/apache/lucene/util/bkd/PointReader.java    |   11 +-
 .../org/apache/lucene/util/bkd/PointValue.java     |    9 +-
 .../org/apache/lucene/util/bkd/PointWriter.java    |   13 +-
 .../org/apache/lucene/util/bkd/package-info.java   |    5 +-
 .../java/org/apache/lucene/util/compress/LZ4.java  |  112 +-
 .../util/compress/LowercaseAsciiCompression.java   |   36 +-
 .../apache/lucene/util/compress/package-info.java  |    6 +-
 .../org/apache/lucene/util/fst/BitTableUtil.java   |   76 +-
 .../lucene/util/fst/ByteSequenceOutputs.java       |   22 +-
 .../apache/lucene/util/fst/BytesRefFSTEnum.java    |   37 +-
 .../org/apache/lucene/util/fst/BytesStore.java     |  112 +-
 .../lucene/util/fst/CharSequenceOutputs.java       |   32 +-
 .../src/java/org/apache/lucene/util/fst/FST.java   |  502 +--
 .../org/apache/lucene/util/fst/FSTCompiler.java    |  330 +-
 .../java/org/apache/lucene/util/fst/FSTEnum.java   |  214 +-
 .../java/org/apache/lucene/util/fst/FSTStore.java  |   14 +-
 .../apache/lucene/util/fst/ForwardBytesReader.java |    1 -
 .../apache/lucene/util/fst/IntSequenceOutputs.java |   34 +-
 .../org/apache/lucene/util/fst/IntsRefFSTEnum.java |   39 +-
 .../java/org/apache/lucene/util/fst/NoOutputs.java |   43 +-
 .../java/org/apache/lucene/util/fst/NodeHash.java  |   64 +-
 .../apache/lucene/util/fst/OffHeapFSTStore.java    |   77 +-
 .../org/apache/lucene/util/fst/OnHeapFSTStore.java |  115 +-
 .../java/org/apache/lucene/util/fst/Outputs.java   |   55 +-
 .../org/apache/lucene/util/fst/PairOutputs.java    |   64 +-
 .../apache/lucene/util/fst/PositiveIntOutputs.java |   16 +-
 .../apache/lucene/util/fst/ReverseBytesReader.java |    6 +-
 .../lucene/util/fst/ReverseRandomAccessReader.java |   76 +-
 .../src/java/org/apache/lucene/util/fst/Util.java  |  430 ++-
 .../org/apache/lucene/util/fst/package-info.java   |   41 +-
 .../util/graph/GraphTokenStreamFiniteStrings.java  |   63 +-
 .../org/apache/lucene/util/graph/package-info.java |    4 +-
 .../org/apache/lucene/util/hnsw/BoundsChecker.java |   84 +-
 .../org/apache/lucene/util/hnsw/HnswGraph.java     |   60 +-
 .../apache/lucene/util/hnsw/HnswGraphBuilder.java  |   97 +-
 .../org/apache/lucene/util/hnsw/NeighborArray.java |   11 +-
 .../org/apache/lucene/util/hnsw/NeighborQueue.java |   35 +-
 .../org/apache/lucene/util/hnsw/package-info.java  |    4 +-
 .../apache/lucene/util/mutable/MutableValue.java   |   10 +-
 .../lucene/util/mutable/MutableValueBool.java      |   12 +-
 .../lucene/util/mutable/MutableValueDate.java      |    3 +-
 .../lucene/util/mutable/MutableValueDouble.java    |   13 +-
 .../lucene/util/mutable/MutableValueFloat.java     |   11 +-
 .../lucene/util/mutable/MutableValueInt.java       |   20 +-
 .../lucene/util/mutable/MutableValueLong.java      |   18 +-
 .../lucene/util/mutable/MutableValueStr.java       |   12 +-
 .../apache/lucene/util/mutable/package-info.java   |    6 +-
 .../java/org/apache/lucene/util/package-info.java  |    4 +-
 .../util/packed/AbstractBlockPackedWriter.java     |   19 +-
 .../lucene/util/packed/AbstractPagedMutable.java   |   14 +-
 .../lucene/util/packed/BlockPackedReader.java      |   29 +-
 .../util/packed/BlockPackedReaderIterator.java     |   31 +-
 .../lucene/util/packed/BlockPackedWriter.java      |   56 +-
 .../apache/lucene/util/packed/BulkOperation.java   |  243 +-
 .../lucene/util/packed/BulkOperationPacked.java    |   51 +-
 .../lucene/util/packed/BulkOperationPacked1.java   |   17 +-
 .../lucene/util/packed/BulkOperationPacked10.java  |   17 +-
 .../lucene/util/packed/BulkOperationPacked11.java  |   17 +-
 .../lucene/util/packed/BulkOperationPacked12.java  |   17 +-
 .../lucene/util/packed/BulkOperationPacked13.java  |   17 +-
 .../lucene/util/packed/BulkOperationPacked14.java  |   17 +-
 .../lucene/util/packed/BulkOperationPacked15.java  |   17 +-
 .../lucene/util/packed/BulkOperationPacked16.java  |   23 +-
 .../lucene/util/packed/BulkOperationPacked17.java  |   17 +-
 .../lucene/util/packed/BulkOperationPacked18.java  |   17 +-
 .../lucene/util/packed/BulkOperationPacked19.java  |   23 +-
 .../lucene/util/packed/BulkOperationPacked2.java   |   17 +-
 .../lucene/util/packed/BulkOperationPacked20.java  |   17 +-
 .../lucene/util/packed/BulkOperationPacked21.java  |   29 +-
 .../lucene/util/packed/BulkOperationPacked22.java  |   17 +-
 .../lucene/util/packed/BulkOperationPacked23.java  |   35 +-
 .../lucene/util/packed/BulkOperationPacked24.java  |   17 +-
 .../lucene/util/packed/BulkOperationPacked3.java   |   17 +-
 .../lucene/util/packed/BulkOperationPacked4.java   |   17 +-
 .../lucene/util/packed/BulkOperationPacked5.java   |   17 +-
 .../lucene/util/packed/BulkOperationPacked6.java   |   17 +-
 .../lucene/util/packed/BulkOperationPacked7.java   |   17 +-
 .../lucene/util/packed/BulkOperationPacked8.java   |   17 +-
 .../lucene/util/packed/BulkOperationPacked9.java   |   17 +-
 .../packed/BulkOperationPackedSingleBlock.java     |   43 +-
 .../lucene/util/packed/DeltaPackedLongValues.java  |   18 +-
 .../lucene/util/packed/DirectMonotonicReader.java  |   68 +-
 .../lucene/util/packed/DirectMonotonicWriter.java  |   58 +-
 .../packed/DirectPacked64SingleBlockReader.java    |    5 +-
 .../lucene/util/packed/DirectPackedReader.java     |   21 +-
 .../apache/lucene/util/packed/DirectReader.java    |  157 +-
 .../apache/lucene/util/packed/DirectWriter.java    |   69 +-
 .../apache/lucene/util/packed/GrowableWriter.java  |   31 +-
 .../util/packed/MonotonicBlockPackedReader.java    |   40 +-
 .../util/packed/MonotonicBlockPackedWriter.java    |   47 +-
 .../lucene/util/packed/MonotonicLongValues.java    |   32 +-
 .../org/apache/lucene/util/packed/Packed64.java    |  112 +-
 .../lucene/util/packed/Packed64SingleBlock.java    |   56 +-
 .../apache/lucene/util/packed/PackedDataInput.java |   21 +-
 .../lucene/util/packed/PackedDataOutput.java       |   24 +-
 .../org/apache/lucene/util/packed/PackedInts.java  |  661 ++--
 .../lucene/util/packed/PackedLongValues.java       |   57 +-
 .../lucene/util/packed/PackedReaderIterator.java   |   13 +-
 .../apache/lucene/util/packed/PackedWriter.java    |    7 +-
 .../lucene/util/packed/PagedGrowableWriter.java    |   26 +-
 .../apache/lucene/util/packed/PagedMutable.java    |   14 +-
 .../apache/lucene/util/packed/gen_BulkOperation.py |    2 +
 .../lucene/util/packed/gen_Packed64SingleBlock.py  |    3 +
 .../apache/lucene/util/packed/package-info.java    |  143 +-
 .../src/test/org/apache/lucene/TestAssertions.java |   17 +-
 .../core/src/test/org/apache/lucene/TestDemo.java  |   10 +-
 .../test/org/apache/lucene/TestExternalCodecs.java |   50 +-
 .../apache/lucene/TestMergeSchedulerExternal.java  |   29 +-
 .../src/test/org/apache/lucene/TestSearch.java     |  141 +-
 .../org/apache/lucene/TestSearchForDuplicates.java |  163 +-
 .../lucene/analysis/FakeCharFilterFactory.java     |    4 +-
 .../lucene/analysis/FakeTokenFilterFactory.java    |    4 +-
 .../analysis/TestAbstractAnalysisFactory.java      |    2 +-
 .../lucene/analysis/TestAnalysisSPILoader.java     |  188 +-
 .../lucene/analysis/TestAnalyzerWrapper.java       |   49 +-
 .../lucene/analysis/TestCachingTokenFilter.java    |   93 +-
 .../apache/lucene/analysis/TestCharArrayMap.java   |  112 +-
 .../apache/lucene/analysis/TestCharArraySet.java   |  239 +-
 .../org/apache/lucene/analysis/TestCharFilter.java |    8 +-
 .../apache/lucene/analysis/TestCharacterUtils.java |   40 +-
 .../analysis/TestDelegatingAnalyzerWrapper.java    |   85 +-
 .../lucene/analysis/TestGraphTokenFilter.java      |   60 +-
 .../lucene/analysis/TestGraphTokenizers.java       |  415 +--
 .../lucene/analysis/TestReusableStringReader.java  |    8 +-
 .../org/apache/lucene/analysis/TestStopFilter.java |  104 +-
 .../test/org/apache/lucene/analysis/TestToken.java |  114 +-
 .../apache/lucene/analysis/TestWordlistLoader.java |   39 +-
 .../lucene/codecs/TestCodecLoadingDeadlock.java    |  167 +-
 .../org/apache/lucene/codecs/TestCodecUtil.java    |  275 +-
 .../codecs/TestCompetitiveFreqNormAccumulator.java |   11 +-
 .../compressing/AbstractTestCompressionMode.java   |   40 +-
 .../compressing/TestFastCompressionMode.java       |    1 -
 .../compressing/TestFastDecompressionMode.java     |    1 -
 .../compressing/TestHighCompressionMode.java       |    2 -
 .../lucene50/TestLucene50CompoundFormat.java       |    3 +-
 .../lucene50/TestLucene50LiveDocsFormat.java       |    1 -
 .../lucene50/TestLucene50TermVectorsFormat.java    |    1 -
 .../lucene50/TestLucene60FieldInfoFormat.java      |    5 +-
 .../BaseLucene80DocValuesFormatTestCase.java       |  176 +-
 ...TestBestCompressionLucene80DocValuesFormat.java |   12 +-
 .../TestBestSpeedLucene80DocValuesFormat.java      |    9 +-
 .../lucene/codecs/lucene80/TestIndexedDISI.java    |  236 +-
 .../codecs/lucene80/TestLucene80NormsFormat.java   |    7 +-
 .../TestLucene80NormsFormatMergeInstance.java      |    5 +-
 .../lucene/codecs/lucene84/TestForDeltaUtil.java   |   16 +-
 .../apache/lucene/codecs/lucene84/TestForUtil.java |   15 +-
 .../lucene84/TestLucene84PostingsFormat.java       |   23 +-
 .../lucene/codecs/lucene84/TestPForUtil.java       |   15 +-
 .../codecs/lucene86/TestLucene86PointsFormat.java  |  240 +-
 .../lucene86/TestLucene86SegmentInfoFormat.java    |    3 +-
 ...tLucene87StoredFieldsFormatHighCompression.java |   31 +-
 ...estLucene87StoredFieldsFormatMergeInstance.java |    5 +-
 .../perfield/TestPerFieldDocValuesFormat.java      |  104 +-
 .../perfield/TestPerFieldPostingsFormat.java       |    6 +-
 .../perfield/TestPerFieldPostingsFormat2.java      |  154 +-
 .../lucene/document/BaseLatLonShapeTestCase.java   |  144 +-
 .../lucene/document/BaseShapeEncodingTestCase.java |  169 +-
 .../apache/lucene/document/BaseShapeTestCase.java  |  275 +-
 .../lucene/document/BaseXYShapeTestCase.java       |   47 +-
 .../apache/lucene/document/TestBinaryDocument.java |   37 +-
 .../org/apache/lucene/document/TestDateTools.java  |  104 +-
 .../org/apache/lucene/document/TestDocument.java   |  164 +-
 .../apache/lucene/document/TestDoubleRange.java    |    6 +-
 .../lucene/document/TestFeatureDoubleValues.java   |  109 +-
 .../apache/lucene/document/TestFeatureField.java   |   77 +-
 .../apache/lucene/document/TestFeatureSort.java    |   48 +-
 .../test/org/apache/lucene/document/TestField.java |  251 +-
 .../org/apache/lucene/document/TestFieldType.java  |   40 +-
 .../org/apache/lucene/document/TestFloatRange.java |    8 +-
 .../lucene/document/TestInetAddressPoint.java      |  177 +-
 .../org/apache/lucene/document/TestIntRange.java   |    2 +-
 .../lucene/document/TestLatLonDocValuesField.java  |   10 +-
 .../document/TestLatLonLineShapeQueries.java       |   13 +-
 .../document/TestLatLonMultiLineShapeQueries.java  |   11 +-
 .../document/TestLatLonMultiPointShapeQueries.java |   11 +-
 .../TestLatLonMultiPolygonShapeQueries.java        |   16 +-
 .../apache/lucene/document/TestLatLonPoint.java    |   21 +-
 .../TestLatLonPointDistanceFeatureQuery.java       |  226 +-
 .../document/TestLatLonPointDistanceSort.java      |   87 +-
 .../document/TestLatLonPointShapeQueries.java      |   21 +-
 .../document/TestLatLonPolygonShapeQueries.java    |    5 +-
 .../apache/lucene/document/TestLatLonShape.java    |  469 +--
 .../lucene/document/TestLatLonShapeEncoding.java   |    2 +-
 .../document/TestLongDistanceFeatureQuery.java     |  117 +-
 .../org/apache/lucene/document/TestLongRange.java  |    2 +-
 .../lucene/document/TestXYLineShapeQueries.java    |   19 +-
 .../document/TestXYMultiLineShapeQueries.java      |   14 +-
 .../document/TestXYMultiPointShapeQueries.java     |    9 +-
 .../document/TestXYMultiPolygonShapeQueries.java   |   15 +-
 .../lucene/document/TestXYPointShapeQueries.java   |   23 +-
 .../lucene/document/TestXYPolygonShapeQueries.java |   13 +-
 .../org/apache/lucene/document/TestXYShape.java    |   54 +-
 .../lucene/document/TestXYShapeEncoding.java       |    2 +-
 .../src/test/org/apache/lucene/geo/TestCircle.java |   48 +-
 .../test/org/apache/lucene/geo/TestCircle2D.java   |   46 +-
 .../apache/lucene/geo/TestGeoEncodingUtils.java    |   75 +-
 .../test/org/apache/lucene/geo/TestGeoUtils.java   |  111 +-
 .../src/test/org/apache/lucene/geo/TestLine2D.java |   36 +-
 .../src/test/org/apache/lucene/geo/TestPoint.java  |   26 +-
 .../test/org/apache/lucene/geo/TestPoint2D.java    |   74 +-
 .../test/org/apache/lucene/geo/TestPolygon.java    |  142 +-
 .../test/org/apache/lucene/geo/TestPolygon2D.java  |  236 +-
 .../org/apache/lucene/geo/TestRectangle2D.java     |   51 +-
 .../lucene/geo/TestSimpleWKTShapeParsing.java      |   83 +-
 .../org/apache/lucene/geo/TestTessellator.java     |  857 +++--
 .../test/org/apache/lucene/geo/TestXYCircle.java   |   86 +-
 .../src/test/org/apache/lucene/geo/TestXYLine.java |   77 +-
 .../test/org/apache/lucene/geo/TestXYPoint.java    |   62 +-
 .../test/org/apache/lucene/geo/TestXYPolygon.java  |   88 +-
 .../org/apache/lucene/geo/TestXYRectangle.java     |  125 +-
 .../apache/lucene/index/Test2BBinaryDocValues.java |   87 +-
 .../test/org/apache/lucene/index/Test2BDocs.java   |   59 +-
 .../lucene/index/Test2BNumericDocValues.java       |   42 +-
 .../test/org/apache/lucene/index/Test2BPoints.java |   69 +-
 .../org/apache/lucene/index/Test2BPositions.java   |   41 +-
 .../org/apache/lucene/index/Test2BPostings.java    |   35 +-
 .../apache/lucene/index/Test2BPostingsBytes.java   |   72 +-
 .../index/Test2BSortedDocValuesFixedSorted.java    |   48 +-
 .../lucene/index/Test2BSortedDocValuesOrds.java    |   50 +-
 .../test/org/apache/lucene/index/Test2BTerms.java  |   74 +-
 .../apache/lucene/index/Test4GBStoredFields.java   |   30 +-
 .../org/apache/lucene/index/TestAddIndexes.java    |  670 ++--
 .../lucene/index/TestAllFilesCheckIndexHeader.java |   58 +-
 .../lucene/index/TestAllFilesDetectBitFlips.java   |   44 +-
 .../lucene/index/TestAllFilesDetectTruncation.java |   52 +-
 .../index/TestAllFilesHaveChecksumFooter.java      |   21 +-
 .../lucene/index/TestAllFilesHaveCodecHeader.java  |   35 +-
 .../org/apache/lucene/index/TestAtomicUpdate.java  |   24 +-
 .../apache/lucene/index/TestBagOfPositions.java    |   69 +-
 .../org/apache/lucene/index/TestBagOfPostings.java |   82 +-
 .../lucene/index/TestBinaryDocValuesUpdates.java   |  587 +--
 .../org/apache/lucene/index/TestBinaryTerms.java   |   22 +-
 .../apache/lucene/index/TestBufferedUpdates.java   |    4 +-
 .../org/apache/lucene/index/TestByteSlices.java    |   31 +-
 .../org/apache/lucene/index/TestCheckIndex.java    |    8 +-
 .../lucene/index/TestCodecHoldsOpenFiles.java      |    5 +-
 .../test/org/apache/lucene/index/TestCodecs.java   |  329 +-
 .../lucene/index/TestConcurrentMergeScheduler.java |  442 +--
 .../lucene/index/TestConsistentFieldNumbers.java   |  145 +-
 .../test/org/apache/lucene/index/TestCrash.java    |   25 +-
 .../lucene/index/TestCrashCausesCorruptIndex.java  |   86 +-
 .../org/apache/lucene/index/TestCustomNorms.java   |   15 +-
 .../apache/lucene/index/TestCustomTermFreq.java    |  265 +-
 .../apache/lucene/index/TestDeletionPolicy.java    |  301 +-
 .../lucene/index/TestDemoParallelLeafReader.java   |  650 ++--
 .../apache/lucene/index/TestDirectoryReader.java   |  527 +--
 .../lucene/index/TestDirectoryReaderReopen.java    |  702 ++--
 .../src/test/org/apache/lucene/index/TestDoc.java  |  127 +-
 .../test/org/apache/lucene/index/TestDocCount.java |   16 +-
 .../org/apache/lucene/index/TestDocIDMerger.java   |   59 +-
 .../index/TestDocInverterPerFieldErrorInfo.java    |   36 +-
 .../org/apache/lucene/index/TestDocValues.java     |  259 +-
 .../lucene/index/TestDocValuesFieldUpdates.java    |   16 +-
 .../apache/lucene/index/TestDocValuesIndexing.java |  300 +-
 .../apache/lucene/index/TestDocsAndPositions.java  |  148 +-
 .../apache/lucene/index/TestDocsWithFieldSet.java  |    2 -
 .../apache/lucene/index/TestDocumentWriter.java    |  190 +-
 .../index/TestDocumentsWriterDeleteQueue.java      |   55 +-
 .../index/TestDocumentsWriterPerThreadPool.java    |   53 +-
 .../index/TestDocumentsWriterStallControl.java     |  188 +-
 .../org/apache/lucene/index/TestDuelingCodecs.java |   46 +-
 .../lucene/index/TestDuelingCodecsAtNight.java     |    7 +-
 .../lucene/index/TestExceedMaxTermLength.java      |   70 +-
 .../lucene/index/TestExitableDirectoryReader.java  |  153 +-
 .../org/apache/lucene/index/TestFieldInfos.java    |   92 +-
 .../apache/lucene/index/TestFieldInvertState.java  |   40 +-
 .../org/apache/lucene/index/TestFieldReuse.java    |   42 +-
 .../lucene/index/TestFieldUpdatesBuffer.java       |   70 +-
 .../org/apache/lucene/index/TestFieldsReader.java  |   33 +-
 .../apache/lucene/index/TestFilterCodecReader.java |   21 +-
 .../lucene/index/TestFilterDirectoryReader.java    |   13 +-
 .../apache/lucene/index/TestFilterLeafReader.java  |   63 +-
 .../apache/lucene/index/TestFilterMergePolicy.java |    3 +-
 .../src/test/org/apache/lucene/index/TestFlex.java |   26 +-
 .../lucene/index/TestFlushByRamOrCountsPolicy.java |   89 +-
 .../apache/lucene/index/TestForTooMuchCloning.java |   42 +-
 .../apache/lucene/index/TestForceMergeForever.java |   35 +-
 .../lucene/index/TestFrozenBufferedUpdates.java    |   13 +-
 .../org/apache/lucene/index/TestIndexCommit.java   |  106 +-
 .../apache/lucene/index/TestIndexFileDeleter.java  |  282 +-
 .../org/apache/lucene/index/TestIndexInput.java    |  265 +-
 .../lucene/index/TestIndexManyDocuments.java       |   38 +-
 .../org/apache/lucene/index/TestIndexOptions.java  |   53 +-
 .../apache/lucene/index/TestIndexReaderClose.java  |   92 +-
 .../org/apache/lucene/index/TestIndexSorting.java  |  303 +-
 .../apache/lucene/index/TestIndexTooManyDocs.java  |  105 +-
 .../org/apache/lucene/index/TestIndexWriter.java   | 2219 +++++++-----
 .../apache/lucene/index/TestIndexWriterCommit.java |  412 ++-
 .../apache/lucene/index/TestIndexWriterConfig.java |  145 +-
 .../apache/lucene/index/TestIndexWriterDelete.java |  580 +--
 .../lucene/index/TestIndexWriterDeleteByQuery.java |    1 -
 .../lucene/index/TestIndexWriterExceptions.java    | 1432 ++++----
 .../lucene/index/TestIndexWriterExceptions2.java   |  103 +-
 .../lucene/index/TestIndexWriterForceMerge.java    |  152 +-
 .../lucene/index/TestIndexWriterFromReader.java    |  271 +-
 .../lucene/index/TestIndexWriterLockRelease.java   |   15 +-
 .../lucene/index/TestIndexWriterMaxDocs.java       |  306 +-
 .../lucene/index/TestIndexWriterMergePolicy.java   |  539 +--
 .../lucene/index/TestIndexWriterMerging.java       |  266 +-
 .../lucene/index/TestIndexWriterNRTIsCurrent.java  |   36 +-
 .../lucene/index/TestIndexWriterOnDiskFull.java    |  303 +-
 .../lucene/index/TestIndexWriterOnJRECrash.java    |  153 +-
 .../lucene/index/TestIndexWriterOnVMError.java     |  175 +-
 .../index/TestIndexWriterOutOfFileDescriptors.java |   22 +-
 .../apache/lucene/index/TestIndexWriterReader.java |  587 +--
 .../index/TestIndexWriterThreadsToSegments.java    |  224 +-
 .../lucene/index/TestIndexWriterUnicode.java       |  158 +-
 .../lucene/index/TestIndexWriterWithThreads.java   |  401 ++-
 .../apache/lucene/index/TestIndexableField.java    |  236 +-
 .../lucene/index/TestIndexingSequenceNumbers.java  |  463 ++-
 .../org/apache/lucene/index/TestInfoStream.java    |   75 +-
 .../org/apache/lucene/index/TestIntBlockPool.java  |   50 +-
 .../org/apache/lucene/index/TestIsCurrent.java     |   19 +-
 .../test/org/apache/lucene/index/TestKnnGraph.java |  122 +-
 .../apache/lucene/index/TestLazyProxSkipping.java  |  351 +-
 .../apache/lucene/index/TestLogMergePolicy.java    |    1 -
 .../org/apache/lucene/index/TestLongPostings.java  |   94 +-
 .../org/apache/lucene/index/TestManyFields.java    |   74 +-
 .../org/apache/lucene/index/TestMaxPosition.java   |   19 +-
 .../apache/lucene/index/TestMaxTermFrequency.java  |   30 +-
 .../org/apache/lucene/index/TestMergePolicy.java   |  107 +-
 .../apache/lucene/index/TestMergeRateLimiter.java  |    1 -
 .../org/apache/lucene/index/TestMixedCodecs.java   |    6 +-
 .../lucene/index/TestMixedDocValuesUpdates.java    |  385 +-
 .../apache/lucene/index/TestMultiDocValues.java    |  132 +-
 .../org/apache/lucene/index/TestMultiFields.java   |   55 +-
 .../lucene/index/TestMultiLevelSkipList.java       |   57 +-
 .../apache/lucene/index/TestMultiTermsEnum.java    |   19 +-
 .../apache/lucene/index/TestNRTReaderCleanup.java  |    6 +-
 .../lucene/index/TestNRTReaderWithThreads.java     |   29 +-
 .../org/apache/lucene/index/TestNRTThreads.java    |   23 +-
 .../org/apache/lucene/index/TestNeverDelete.java   |   55 +-
 .../org/apache/lucene/index/TestNewestSegment.java |    4 +-
 .../apache/lucene/index/TestNoDeletionPolicy.java  |   15 +-
 .../org/apache/lucene/index/TestNoMergePolicy.java |    7 +-
 .../apache/lucene/index/TestNoMergeScheduler.java  |   10 +-
 .../test/org/apache/lucene/index/TestNorms.java    |   22 +-
 .../lucene/index/TestNumericDocValuesUpdates.java  |  678 ++--
 .../org/apache/lucene/index/TestOmitNorms.java     |  114 +-
 .../org/apache/lucene/index/TestOmitPositions.java |   54 +-
 .../test/org/apache/lucene/index/TestOmitTf.java   |  400 ++-
 .../index/TestOneMergeWrappingMergePolicy.java     |   77 +-
 .../org/apache/lucene/index/TestOrdinalMap.java    |   61 +-
 .../lucene/index/TestParallelCompositeReader.java  |  234 +-
 .../lucene/index/TestParallelLeafReader.java       |  144 +-
 .../lucene/index/TestParallelReaderEmptyIndex.java |   52 +-
 .../apache/lucene/index/TestParallelTermEnum.java  |    8 +-
 .../test/org/apache/lucene/index/TestPayloads.java |  233 +-
 .../apache/lucene/index/TestPayloadsOnVectors.java |   29 +-
 .../apache/lucene/index/TestPendingDeletes.java    |   58 +-
 .../lucene/index/TestPendingSoftDeletes.java       |  220 +-
 .../apache/lucene/index/TestPerSegmentDeletes.java |   67 +-
 .../TestPersistentSnapshotDeletionPolicy.java      |   76 +-
 .../org/apache/lucene/index/TestPointValues.java   |  355 +-
 .../apache/lucene/index/TestPostingsOffsets.java   |  257 +-
 .../apache/lucene/index/TestPrefixCodedTerms.java  |   21 +-
 .../org/apache/lucene/index/TestReadOnlyIndex.java |   31 +-
 .../org/apache/lucene/index/TestReaderClosed.java  |   22 +-
 .../org/apache/lucene/index/TestReaderPool.java    |  131 +-
 .../lucene/index/TestReaderWrapperDVTypeCheck.java |   63 +-
 .../test/org/apache/lucene/index/TestRollback.java |   10 +-
 .../apache/lucene/index/TestRollingUpdates.java    |   53 +-
 .../lucene/index/TestSameTokenSamePosition.java    |   23 +-
 .../org/apache/lucene/index/TestSegmentInfos.java  |  254 +-
 .../org/apache/lucene/index/TestSegmentMerger.java |   73 +-
 .../org/apache/lucene/index/TestSegmentReader.java |  140 +-
 .../apache/lucene/index/TestSegmentTermDocs.java   |  146 +-
 .../apache/lucene/index/TestSegmentTermEnum.java   |   38 +-
 .../lucene/index/TestSegmentToThreadMapping.java   |   29 +-
 .../lucene/index/TestSizeBoundedForceMerge.java    |  129 +-
 .../lucene/index/TestSnapshotDeletionPolicy.java   |  200 +-
 .../TestSoftDeletesDirectoryReaderWrapper.java     |   72 +-
 .../index/TestSoftDeletesRetentionMergePolicy.java |  376 +-
 .../lucene/index/TestSortedSetDocValues.java       |    1 -
 .../lucene/index/TestSortingCodecReader.java       |   73 +-
 .../org/apache/lucene/index/TestStressAdvance.java |   20 +-
 .../org/apache/lucene/index/TestStressDeletes.java |   92 +-
 .../apache/lucene/index/TestStressIndexing.java    |   55 +-
 .../apache/lucene/index/TestStressIndexing2.java   |  421 ++-
 .../org/apache/lucene/index/TestStressNRT.java     |  574 +--
 .../org/apache/lucene/index/TestSumDocFreq.java    |   23 +-
 .../apache/lucene/index/TestSwappedIndexFiles.java |   37 +-
 .../src/test/org/apache/lucene/index/TestTerm.java |    1 -
 .../org/apache/lucene/index/TestTermStates.java    |    3 +-
 .../org/apache/lucene/index/TestTermVectors.java   |   50 +-
 .../apache/lucene/index/TestTermVectorsReader.java |  226 +-
 .../apache/lucene/index/TestTermVectorsWriter.java |  156 +-
 .../org/apache/lucene/index/TestTermdocPerf.java   |  101 +-
 .../test/org/apache/lucene/index/TestTerms.java    |   17 +-
 .../org/apache/lucene/index/TestTermsEnum.java     |  190 +-
 .../org/apache/lucene/index/TestTermsEnum2.java    |   62 +-
 .../apache/lucene/index/TestTermsHashPerField.java |  208 +-
 .../lucene/index/TestThreadedForceMerge.java       |  130 +-
 .../apache/lucene/index/TestTieredMergePolicy.java |  427 ++-
 .../index/TestTragicIndexWriterDeadlock.java       |  140 +-
 .../lucene/index/TestTransactionRollback.java      |  103 +-
 .../org/apache/lucene/index/TestTransactions.java  |   77 +-
 .../org/apache/lucene/index/TestTryDelete.java     |   45 +-
 .../lucene/index/TestTwoPhaseCommitTool.java       |    7 +-
 .../apache/lucene/index/TestUniqueTermCount.java   |   27 +-
 .../lucene/index/TestUpgradeIndexMergePolicy.java  |    5 +-
 .../org/apache/lucene/index/TestVectorValues.java  |  210 +-
 .../apache/lucene/search/BaseTestRangeFilter.java  |   70 +-
 .../lucene/search/FuzzyTermOnShortTermsTest.java   |  119 +-
 .../apache/lucene/search/JustCompileSearch.java    |   33 +-
 .../apache/lucene/search/TermInSetQueryTest.java   |   87 +-
 .../search/TestApproximationSearchEquivalence.java |   12 +-
 .../apache/lucene/search/TestAutomatonQuery.java   |  131 +-
 .../lucene/search/TestAutomatonQueryUnicode.java   |   19 +-
 .../apache/lucene/search/TestBlendedTermQuery.java |   68 +-
 .../lucene/search/TestBlockMaxConjunction.java     |   29 +-
 .../org/apache/lucene/search/TestBoolean2.java     |  125 +-
 .../lucene/search/TestBoolean2ScorerSupplier.java  |  144 +-
 .../lucene/search/TestBooleanMinShouldMatch.java   |  678 ++--
 .../org/apache/lucene/search/TestBooleanOr.java    |  107 +-
 .../org/apache/lucene/search/TestBooleanQuery.java |  327 +-
 .../search/TestBooleanQueryVisitSubscorers.java    |  100 +-
 .../apache/lucene/search/TestBooleanRewrites.java  |  495 +--
 .../apache/lucene/search/TestBooleanScorer.java    |  172 +-
 .../org/apache/lucene/search/TestBoostQuery.java   |   34 +-
 .../apache/lucene/search/TestCachingCollector.java |   83 +-
 .../lucene/search/TestComplexExplanations.java     |  238 +-
 .../TestComplexExplanationsOfNonMatches.java       |   11 +-
 .../apache/lucene/search/TestConjunctionDISI.java  |  139 +-
 .../org/apache/lucene/search/TestConjunctions.java |   74 +-
 .../lucene/search/TestConstantScoreQuery.java      |  141 +-
 .../lucene/search/TestConstantScoreScorer.java     |   69 +-
 .../search/TestControlledRealTimeReopenThread.java |  263 +-
 .../lucene/search/TestCustomSearcherSort.java      |  105 +-
 .../org/apache/lucene/search/TestDateSort.java     |   11 +-
 .../lucene/search/TestDisjunctionMaxQuery.java     |  436 ++-
 ...estDisjunctionScoreBlockBoundaryPropagator.java |    5 +-
 .../apache/lucene/search/TestDocIdSetIterator.java |   22 +-
 .../apache/lucene/search/TestDocValuesQueries.java |  117 +-
 .../lucene/search/TestDocValuesRewriteMethod.java  |   48 +-
 .../lucene/search/TestDoubleRangeFieldQueries.java |   86 +-
 .../lucene/search/TestDoubleValuesSource.java      |  195 +-
 .../apache/lucene/search/TestEarlyTermination.java |   56 +-
 .../lucene/search/TestElevationComparator.java     |  221 +-
 .../lucene/search/TestFieldCacheRewriteMethod.java |   18 +-
 .../search/TestFieldSortOptimizationSkipping.java  |  137 +-
 .../apache/lucene/search/TestFieldValueQuery.java  |   21 +-
 .../org/apache/lucene/search/TestFilterWeight.java |   30 +-
 .../lucene/search/TestFloatRangeFieldQueries.java  |   89 +-
 .../org/apache/lucene/search/TestFuzzyQuery.java   |  340 +-
 .../lucene/search/TestIndexOrDocValuesQuery.java   |   89 +-
 .../apache/lucene/search/TestIndexSearcher.java    |  228 +-
 .../lucene/search/TestInetAddressRangeQueries.java |   43 +-
 .../lucene/search/TestIntRangeFieldQueries.java    |   59 +-
 .../apache/lucene/search/TestLRUQueryCache.java    |  781 ++--
 .../lucene/search/TestLatLonDocValuesQueries.java  |    6 +-
 .../lucene/search/TestLatLonPointQueries.java      |    7 +-
 .../apache/lucene/search/TestLiveFieldValues.java  |  167 +-
 .../lucene/search/TestLongRangeFieldQueries.java   |   63 +-
 .../apache/lucene/search/TestLongValuesSource.java |   28 +-
 .../lucene/search/TestMatchAllDocsQuery.java       |   36 +-
 .../apache/lucene/search/TestMatchNoDocsQuery.java |   16 +-
 .../apache/lucene/search/TestMatchesIterator.java  |  709 ++--
 .../apache/lucene/search/TestMaxClauseLimit.java   |   72 +-
 .../lucene/search/TestMaxScoreAccumulator.java     |    3 +-
 .../lucene/search/TestMaxScoreSumPropagator.java   |   31 +-
 .../apache/lucene/search/TestMinShouldMatch2.java  |  177 +-
 .../apache/lucene/search/TestMultiCollector.java   |  218 +-
 .../apache/lucene/search/TestMultiPhraseEnum.java  |   56 +-
 .../apache/lucene/search/TestMultiPhraseQuery.java |  288 +-
 .../apache/lucene/search/TestMultiSliceMerge.java  |   25 +-
 .../lucene/search/TestMultiTermConstantScore.java  |  168 +-
 .../lucene/search/TestMultiTermQueryRewrites.java  |  166 +-
 .../lucene/search/TestMultiThreadTermVectors.java  |   25 +-
 .../org/apache/lucene/search/TestMultiset.java     |    3 -
 .../apache/lucene/search/TestNGramPhraseQuery.java |   27 +-
 .../org/apache/lucene/search/TestNeedsScores.java  |   36 +-
 .../lucene/search/TestNormsFieldExistsQuery.java   |   15 +-
 .../src/test/org/apache/lucene/search/TestNot.java |   12 +-
 .../lucene/search/TestPhrasePrefixQuery.java       |   26 +-
 .../org/apache/lucene/search/TestPhraseQuery.java  |  557 ++-
 .../org/apache/lucene/search/TestPointQueries.java |  980 +++--
 .../lucene/search/TestPositionIncrement.java       |  133 +-
 .../search/TestPositiveScoresOnlyCollector.java    |   53 +-
 .../lucene/search/TestPrefixInBooleanQuery.java    |   54 +-
 .../org/apache/lucene/search/TestPrefixQuery.java  |   21 +-
 .../org/apache/lucene/search/TestPrefixRandom.java |   48 +-
 .../apache/lucene/search/TestQueryRescorer.java    |  162 +-
 .../org/apache/lucene/search/TestQueryVisitor.java |  228 +-
 .../search/TestRangeFieldsDocValuesQuery.java      |    8 +-
 .../org/apache/lucene/search/TestRegexpQuery.java  |  118 +-
 .../org/apache/lucene/search/TestRegexpRandom.java |   46 +-
 .../apache/lucene/search/TestRegexpRandom2.java    |   51 +-
 .../lucene/search/TestReqExclBulkScorer.java       |   88 +-
 .../apache/lucene/search/TestReqOptSumScorer.java  |  162 +-
 .../lucene/search/TestSameScoresWithThreads.java   |   72 +-
 .../search/TestScoreCachingWrappingScorer.java     |   77 +-
 .../org/apache/lucene/search/TestScorerPerf.java   |  240 +-
 .../org/apache/lucene/search/TestSearchAfter.java  |   97 +-
 .../lucene/search/TestSearchWithThreads.java       |   50 +-
 .../apache/lucene/search/TestSearcherManager.java  |  554 +--
 .../lucene/search/TestSegmentCacheables.java       |    7 +-
 .../apache/lucene/search/TestShardSearching.java   |  101 +-
 .../org/apache/lucene/search/TestSimilarity.java   |  208 +-
 .../lucene/search/TestSimilarityProvider.java      |    8 +-
 .../lucene/search/TestSimpleExplanations.java      |  355 +-
 .../search/TestSimpleExplanationsOfNonMatches.java |   12 +-
 .../TestSimpleExplanationsWithFillerDocs.java      |   60 +-
 .../lucene/search/TestSimpleSearchEquivalence.java |   55 +-
 .../lucene/search/TestSloppyPhraseQuery.java       |  242 +-
 .../lucene/search/TestSloppyPhraseQuery2.java      |   26 +-
 .../test/org/apache/lucene/search/TestSort.java    |  124 +-
 .../org/apache/lucene/search/TestSortRandom.java   |  114 +-
 .../org/apache/lucene/search/TestSortRescorer.java |   91 +-
 .../lucene/search/TestSortedNumericSortField.java  |   53 +-
 .../lucene/search/TestSortedSetSelector.java       |   93 +-
 .../lucene/search/TestSortedSetSortField.java      |   37 +-
 .../apache/lucene/search/TestSubScorerFreqs.java   |   44 +-
 .../org/apache/lucene/search/TestSynonymQuery.java |  250 +-
 .../org/apache/lucene/search/TestTermQuery.java    |  107 +-
 .../apache/lucene/search/TestTermRangeQuery.java   |   84 +-
 .../org/apache/lucene/search/TestTermScorer.java   |  193 +-
 .../lucene/search/TestTimeLimitingCollector.java   |  267 +-
 .../apache/lucene/search/TestTopDocsCollector.java |  235 +-
 .../org/apache/lucene/search/TestTopDocsMerge.java |  117 +-
 .../lucene/search/TestTopFieldCollector.java       |  246 +-
 .../TestTopFieldCollectorEarlyTermination.java     |  157 +-
 .../lucene/search/TestTotalHitCountCollector.java  |    9 +-
 .../TestUsageTrackingFilterCachingPolicy.java      |   37 +-
 .../org/apache/lucene/search/TestWANDScorer.java   |  174 +-
 .../org/apache/lucene/search/TestWildcard.java     |  279 +-
 .../apache/lucene/search/TestWildcardRandom.java   |   49 +-
 .../lucene/search/TestXYPointDistanceSort.java     |   67 +-
 .../apache/lucene/search/TestXYPointQueries.java   |    2 +-
 .../search/similarities/AxiomaticTestCase.java     |    6 +-
 .../search/similarities/BasicModelTestCase.java    |   11 +-
 .../search/similarities/DistributionTestCase.java  |    5 +-
 .../search/similarities/TestAxiomaticF1EXP.java    |    1 -
 .../search/similarities/TestAxiomaticF1LOG.java    |    1 -
 .../search/similarities/TestAxiomaticF2EXP.java    |    1 -
 .../search/similarities/TestAxiomaticF2LOG.java    |    1 -
 .../search/similarities/TestAxiomaticF3EXP.java    |    1 -
 .../search/similarities/TestAxiomaticF3LOG.java    |    1 -
 .../similarities/TestAxiomaticSimilarity.java      |   81 +-
 .../search/similarities/TestBM25Similarity.java    |   80 +-
 .../search/similarities/TestBasicModelG.java       |    1 -
 .../search/similarities/TestBasicModelIF.java      |    1 -
 .../search/similarities/TestBasicModelIn.java      |    1 -
 .../search/similarities/TestBasicModelIne.java     |    1 -
 .../search/similarities/TestBooleanSimilarity.java |   26 +-
 .../search/similarities/TestClassicSimilarity.java |   77 +-
 .../search/similarities/TestDistributionLL.java    |    1 -
 .../search/similarities/TestDistributionSPL.java   |    1 -
 .../similarities/TestIndependenceChiSquared.java   |    1 -
 .../similarities/TestIndependenceSaturated.java    |    1 -
 .../similarities/TestIndependenceStandardized.java |    1 -
 .../similarities/TestLMDirichletSimilarity.java    |    1 -
 .../TestLMJelinekMercerSimilarity.java             |    1 -
 .../search/similarities/TestSimilarity2.java       |   62 +-
 .../search/similarities/TestSimilarityBase.java    |  397 +--
 .../search/spans/JustCompileSearchSpans.java       |   18 +-
 .../org/apache/lucene/search/spans/TestBasics.java |  377 +-
 .../search/spans/TestFieldMaskingSpanQuery.java    |  348 +-
 .../lucene/search/spans/TestFilterSpans.java       |    2 -
 .../lucene/search/spans/TestNearSpansOrdered.java  |  275 +-
 .../lucene/search/spans/TestSpanBoostQuery.java    |   14 +-
 .../lucene/search/spans/TestSpanCollection.java    |   51 +-
 .../lucene/search/spans/TestSpanContainQuery.java  |   34 +-
 .../lucene/search/spans/TestSpanExplanations.java  |  154 +-
 .../spans/TestSpanExplanationsOfNonMatches.java    |   13 +-
 .../lucene/search/spans/TestSpanFirstQuery.java    |   15 +-
 .../spans/TestSpanMultiTermQueryWrapper.java       |  133 +-
 .../lucene/search/spans/TestSpanNearQuery.java     |   54 +-
 .../lucene/search/spans/TestSpanNotQuery.java      |   32 +-
 .../lucene/search/spans/TestSpanOrQuery.java       |   18 +-
 .../search/spans/TestSpanSearchEquivalence.java    |  313 +-
 .../lucene/search/spans/TestSpanTermQuery.java     |   19 +-
 .../org/apache/lucene/search/spans/TestSpans.java  |  248 +-
 .../apache/lucene/search/spans/TestSpansEnum.java  |   56 +-
 .../lucene/store/BaseDataOutputTestCase.java       |  230 +-
 .../apache/lucene/store/TestBufferedChecksum.java  |    6 +-
 .../lucene/store/TestBufferedIndexInput.java       |  170 +-
 .../lucene/store/TestByteArrayDataInput.java       |    1 -
 .../lucene/store/TestByteBuffersDataInput.java     |  149 +-
 .../lucene/store/TestByteBuffersDataOutput.java    |   61 +-
 .../lucene/store/TestByteBuffersDirectory.java     |   74 +-
 .../org/apache/lucene/store/TestDirectory.java     |   56 +-
 .../lucene/store/TestFileSwitchDirectory.java      |   76 +-
 .../apache/lucene/store/TestFilterDirectory.java   |    8 +-
 .../org/apache/lucene/store/TestLockFactory.java   |  126 +-
 .../org/apache/lucene/store/TestMmapDirectory.java |   44 +-
 .../org/apache/lucene/store/TestMultiMMap.java     |  204 +-
 .../apache/lucene/store/TestNIOFSDirectory.java    |   29 +-
 .../lucene/store/TestNRTCachingDirectory.java      |   53 +-
 .../lucene/store/TestNativeFSLockFactory.java      |   57 +-
 .../org/apache/lucene/store/TestRateLimiter.java   |   83 +-
 .../lucene/store/TestSimpleFSLockFactory.java      |   16 +-
 .../store/TestSingleInstanceLockFactory.java       |   19 +-
 .../lucene/store/TestSleepingLockWrapper.java      |    9 +-
 .../lucene/store/TestStressLockFactories.java      |   74 +-
 .../lucene/store/TestTrackingDirectoryWrapper.java |    9 +-
 .../org/apache/lucene/util/BaseSortTestCase.java   |   39 +-
 .../lucene/util/StressRamUsageEstimator.java       |   15 +-
 .../org/apache/lucene/util/Test2BPagedBytes.java   |    8 +-
 .../test/org/apache/lucene/util/TestArrayUtil.java |  249 +-
 .../apache/lucene/util/TestAttributeSource.java    |  165 +-
 .../org/apache/lucene/util/TestByteBlockPool.java  |    4 +-
 .../test/org/apache/lucene/util/TestBytesRef.java  |   23 +-
 .../org/apache/lucene/util/TestBytesRefArray.java  |   27 +-
 .../org/apache/lucene/util/TestBytesRefHash.java   |  121 +-
 .../test/org/apache/lucene/util/TestCharsRef.java  |  112 +-
 .../apache/lucene/util/TestCharsRefBuilder.java    |    2 -
 .../apache/lucene/util/TestClassLoaderUtils.java   |    4 +-
 .../lucene/util/TestCloseableThreadLocal.java      |    6 +-
 .../org/apache/lucene/util/TestCollectionUtil.java |   23 +-
 .../apache/lucene/util/TestDocIdSetBuilder.java    |   17 +-
 .../org/apache/lucene/util/TestFilterIterator.java |  135 +-
 .../apache/lucene/util/TestFixedBitDocIdSet.java   |    1 -
 .../org/apache/lucene/util/TestFixedBitSet.java    |  318 +-
 .../lucene/util/TestFixedLengthBytesRefArray.java  |   41 +-
 .../util/TestFrequencyTrackingRingBuffer.java      |    5 +-
 .../test/org/apache/lucene/util/TestIOUtils.java   |   45 +-
 .../apache/lucene/util/TestInPlaceMergeSorter.java |    6 +-
 .../apache/lucene/util/TestIntArrayDocIdSet.java   |    3 -
 .../org/apache/lucene/util/TestIntroSelector.java  |   32 +-
 .../org/apache/lucene/util/TestIntroSorter.java    |    1 -
 .../test/org/apache/lucene/util/TestIntsRef.java   |   23 +-
 .../org/apache/lucene/util/TestLSBRadixSorter.java |    2 -
 .../org/apache/lucene/util/TestLongBitSet.java     |  199 +-
 .../test/org/apache/lucene/util/TestLongHeap.java  |   19 +-
 .../test/org/apache/lucene/util/TestLongsRef.java  |   22 +-
 .../org/apache/lucene/util/TestMSBRadixSorter.java |   27 +-
 .../test/org/apache/lucene/util/TestMathUtil.java  |   68 +-
 .../org/apache/lucene/util/TestMergedIterator.java |   13 +-
 .../org/apache/lucene/util/TestNamedSPILoader.java |   16 +-
 .../org/apache/lucene/util/TestNotDocIdSet.java    |    6 +-
 .../org/apache/lucene/util/TestNumericUtils.java   |  426 ++-
 .../org/apache/lucene/util/TestOfflineSorter.java  |  544 +--
 .../org/apache/lucene/util/TestPagedBytes.java     |   48 +-
 .../org/apache/lucene/util/TestPriorityQueue.java  |   52 +-
 .../org/apache/lucene/util/TestQueryBuilder.java   |  471 +--
 .../org/apache/lucene/util/TestRadixSelector.java  |   42 +-
 .../apache/lucene/util/TestRamUsageEstimator.java  |  118 +-
 .../util/TestRecyclingByteBlockAllocator.java      |   25 +-
 .../util/TestRecyclingIntBlockAllocator.java       |   27 +-
 .../apache/lucene/util/TestRoaringDocIdSet.java    |    5 +-
 .../org/apache/lucene/util/TestRollingBuffer.java  |   24 +-
 .../org/apache/lucene/util/TestSentinelIntSet.java |   31 +-
 .../test/org/apache/lucene/util/TestSetOnce.java   |   25 +-
 .../org/apache/lucene/util/TestSloppyMath.java     |  107 +-
 .../org/apache/lucene/util/TestSmallFloat.java     |   97 +-
 .../lucene/util/TestSparseFixedBitDocIdSet.java    |    2 -
 .../apache/lucene/util/TestSparseFixedBitSet.java  |    6 +-
 .../org/apache/lucene/util/TestStringHelper.java   |   41 +-
 .../lucene/util/TestStringMSBRadixSorter.java      |    4 +-
 .../test/org/apache/lucene/util/TestTimSorter.java |    3 +-
 .../apache/lucene/util/TestTimSorterWorstCase.java |   63 +-
 .../org/apache/lucene/util/TestUnicodeUtil.java    |   79 +-
 .../org/apache/lucene/util/TestVectorUtil.java     |   10 +-
 .../test/org/apache/lucene/util/TestVersion.java   |  176 +-
 .../org/apache/lucene/util/TestVirtualMethod.java  |   69 +-
 .../apache/lucene/util/TestWeakIdentityMap.java    |  197 +-
 .../util/automaton/FiniteStringsIteratorTest.java  |   53 +-
 .../LimitedFiniteStringsIteratorTest.java          |   32 +-
 .../lucene/util/automaton/TestAutomaton.java       |  982 +++---
 .../util/automaton/TestCompiledAutomaton.java      |   32 +-
 .../automaton/TestDaciukMihovAutomatonBuilder.java |   12 +-
 .../lucene/util/automaton/TestDeterminism.java     |   60 +-
 .../util/automaton/TestDeterminizeLexicon.java     |   10 +-
 .../apache/lucene/util/automaton/TestIntSet.java   |  124 +-
 .../util/automaton/TestLevenshteinAutomata.java    |  309 +-
 .../apache/lucene/util/automaton/TestMinimize.java |   29 +-
 .../lucene/util/automaton/TestOperations.java      |   81 +-
 .../apache/lucene/util/automaton/TestRegExp.java   |  136 +-
 .../lucene/util/automaton/TestUTF32ToUTF8.java     |   86 +-
 .../apache/lucene/util/bkd/Test2BBKDPoints.java    |   42 +-
 .../test/org/apache/lucene/util/bkd/TestBKD.java   | 1485 ++++----
 .../lucene/util/bkd/TestBKDRadixSelector.java      |  225 +-
 .../apache/lucene/util/bkd/TestBKDRadixSort.java   |   50 +-
 .../apache/lucene/util/bkd/TestDocIdsWriter.java   |   37 +-
 .../util/bkd/TestMutablePointsReaderUtils.java     |  116 +-
 .../apache/lucene/util/compress/LZ4TestCase.java   |  211 +-
 .../apache/lucene/util/compress/TestFastLZ4.java   |    1 -
 .../apache/lucene/util/compress/TestHighLZ4.java   |    3 +-
 .../compress/TestLowercaseAsciiCompression.java    |    7 +-
 .../test/org/apache/lucene/util/fst/Test2BFST.java |   88 +-
 .../apache/lucene/util/fst/TestBitTableUtil.java   |   79 +-
 .../org/apache/lucene/util/fst/TestBytesStore.java |  312 +-
 .../lucene/util/fst/TestFSTDirectAddressing.java   |  118 +-
 .../test/org/apache/lucene/util/fst/TestFSTs.java  |  659 ++--
 .../test/org/apache/lucene/util/fst/TestUtil.java  |   20 +-
 .../graph/TestGraphTokenStreamFiniteStrings.java   |  427 +--
 .../apache/lucene/util/hnsw/KnnGraphTester.java    |  188 +-
 .../apache/lucene/util/hnsw/MockVectorValues.java  |    6 +-
 .../test/org/apache/lucene/util/hnsw/TestHnsw.java |   88 +-
 .../org/apache/lucene/util/hnsw/TestNeighbors.java |    4 +-
 .../lucene/util/mutable/TestMutableValues.java     |   64 +-
 .../lucene/util/packed/TestDirectMonotonic.java    |   98 +-
 .../lucene/util/packed/TestDirectPacked.java       |   34 +-
 .../apache/lucene/util/packed/TestPackedInts.java  |  492 ++-
 .../lucene/index/BaseDocValuesFormatTestCase.java  |    3 +-
 1536 files changed, 92289 insertions(+), 74918 deletions(-)

diff --git a/dev-tools/scripts/checkJavadocLinks.py b/dev-tools/scripts/checkJavadocLinks.py
index 5d07e27..6eaff12 100644
--- a/dev-tools/scripts/checkJavadocLinks.py
+++ b/dev-tools/scripts/checkJavadocLinks.py
@@ -41,7 +41,7 @@ class FindHyperlinks(HTMLParser):
   def handle_starttag(self, tag, attrs):
     # NOTE: I don't think 'a' should be in here. But try debugging 
     # NumericRangeQuery.html. (Could be javadocs bug, it's a generic type...)
-    if tag not in ('link', 'meta', 'frame', 'br', 'hr', 'p', 'li', 'img', 'col', 'a'):
+    if tag not in ('link', 'meta', 'frame', 'br', 'hr', 'p', 'li', 'img', 'col', 'a', 'dd'):
       self.stack.append(tag)
     if tag == 'a':
       id = None
@@ -79,7 +79,7 @@ class FindHyperlinks(HTMLParser):
         raise RuntimeError('couldn\'t find an href nor name in link in %s: only got these attrs: %s' % (self.baseURL, attrs))
 
   def handle_endtag(self, tag):
-    if tag in ('link', 'meta', 'frame', 'br', 'hr', 'p', 'li', 'img', 'col', 'a'):
+    if tag in ('link', 'meta', 'frame', 'br', 'hr', 'p', 'li', 'img', 'col', 'a', 'dd'):
       return
     
     if len(self.stack) == 0:
diff --git a/gradle/generation/util.gradle b/gradle/generation/util.gradle
index 36ad86b..2ce7856 100644
--- a/gradle/generation/util.gradle
+++ b/gradle/generation/util.gradle
@@ -95,4 +95,21 @@ configure(project(":lucene:core")) {
       )
     }
   }
+
+  task regenerate() {
+    description "Regenerate any generated sources"
+    group "generation"
+
+    // Run regeneration tasks.
+    dependsOn utilGenPacked
+    dependsOn utilGenLev
+
+    // Clean up and reformat the generated sources after generation.
+    dependsOn "tidy"
+  }
+
+  // Make sure tidy runs after generation, if they're defined.
+  tasks.matching { it.name == "tidy" }.configureEach {
+    mustRunAfter utilGenPacked, utilGenLev
+  }
 }
diff --git a/gradle/validation/spotless.gradle b/gradle/validation/spotless.gradle
index 8a06eef..7276c9d 100644
--- a/gradle/validation/spotless.gradle
+++ b/gradle/validation/spotless.gradle
@@ -27,7 +27,10 @@ allprojects { prj ->
 
     spotless {
       java {
-        licenseHeaderFile file("${resources}/asl-header.txt"), '^(\\s*package)'
+        // TODO: work out how to have multiple different header files (we have
+        // classes in the codebase that have original headers).
+
+        // licenseHeaderFile file("${resources}/asl-header.txt"), '^(\\s*package)'
         lineEndings 'UNIX'
         endWithNewline()
         googleJavaFormat('1.9')
@@ -35,15 +38,25 @@ allprojects { prj ->
         switch (project.path) {
           // These modules are complete - all sources scanned.
           case ":lucene:highlighter":
-            target "src/**"
-            targetExclude "**/resources/**", "**/CambridgeMA.utf8", "**/overview.html"
+            target "src/java/**", "src/test/**"
+            targetExclude "**/overview.html", "**/CambridgeMA.utf8"
             break
 
           // Partially complete.
           case ":lucene:core":
-            target "src/**/org/apache/lucene/analysis/standard/**",
-                   "src/**/org/apache/lucene/analysis/tokenattributes/**"
-            targetExclude "**/resources/**", "**/StandardTokenizerImpl.jflex", "**/StandardTokenizerImpl.java"
+            target "src/java/**", "src/test/**"
+            targetExclude "**/overview.html",
+                "**/META-INF/**",
+                "**/StandardTokenizerImpl.jflex",
+                "**/StandardTokenizerImpl.java",
+                "**/createLevAutomata.py",
+                "**/UTF32ToUTF8.py",
+                "**/gen_BulkOperation.py",
+                "**/gen_Packed64SingleBlock.py",
+                "**/makeEuroparlLineFile.py",
+                "**/wordliststopwords.txt",
+                "**/wordliststopwords_nocomment.txt",
+                "**/gen_ForUtil.py"
             break
 
           case ":lucene:analysis:common":
@@ -53,6 +66,36 @@ allprojects { prj ->
             break
 
           // All others - disable reformatting/ checks for now.
+          case ":lucene:analysis:icu":
+          case ":lucene:analysis:kuromoji":
+          case ":lucene:analysis:morfologik":
+          case ":lucene:analysis:nori":
+          case ":lucene:analysis:opennlp":
+          case ":lucene:analysis:phonetic":
+          case ":lucene:analysis:smartcn":
+          case ":lucene:analysis:stempel":
+          case ":lucene:backward-codecs":
+          case ":lucene:benchmark":
+          case ":lucene:classification":
+          case ":lucene:codecs":
+          case ":lucene:demo":
+          case ":lucene:expressions":
+          case ":lucene:facet":
+          case ":lucene:grouping":
+          case ":lucene:join":
+          case ":lucene:luke":
+          case ":lucene:memory":
+          case ":lucene:misc":
+          case ":lucene:monitor":
+          case ":lucene:queries":
+          case ":lucene:queryparser":
+          case ":lucene:replicator":
+          case ":lucene:sandbox":
+          case ":lucene:spatial3d":
+          case ":lucene:spatial-extras":
+          case ":lucene:suggest":
+          case ":lucene:test-framework":
+
           default:
             target 'non-existing/**'
             break
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene50/Lucene50SkipWriter.java b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene50/Lucene50SkipWriter.java
index 5316f78..fec3be5 100644
--- a/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene50/Lucene50SkipWriter.java
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/backward_codecs/lucene50/Lucene50SkipWriter.java
@@ -32,10 +32,11 @@ import org.apache.lucene.store.IndexOutput;
  * Write skip lists with multiple levels, and support skip within block ints.
  *
  * Assume that docFreq = 28, skipInterval = blockSize = 12
- *
+ * <pre>
  *  |       block#0       | |      block#1        | |vInts|
  *  d d d d d d d d d d d d d d d d d d d d d d d d d d d d (posting list)
  *                          ^                       ^       (level 0 skip point)
+ * </pre>
  *
  * Note that skipWriter will ignore first document in block#0, since 
  * it is useless as a skip point.  Also, we'll never skip into the vInts
diff --git a/lucene/core/src/java/org/apache/lucene/LucenePackage.java b/lucene/core/src/java/org/apache/lucene/LucenePackage.java
index 02fe86e..4e44edd 100644
--- a/lucene/core/src/java/org/apache/lucene/LucenePackage.java
+++ b/lucene/core/src/java/org/apache/lucene/LucenePackage.java
@@ -16,11 +16,10 @@
  */
 package org.apache.lucene;
 
-
-/** Lucene's package information, including version. **/
+/** Lucene's package information, including version. * */
 public final class LucenePackage {
 
-  private LucenePackage() {}                      // can't construct
+  private LucenePackage() {} // can't construct
 
   /** Return Lucene's package, including version information. */
   public static Package get() {
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/AbstractAnalysisFactory.java b/lucene/core/src/java/org/apache/lucene/analysis/AbstractAnalysisFactory.java
index 2ae8e2c..a9a5842 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/AbstractAnalysisFactory.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/AbstractAnalysisFactory.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.analysis;
 
-
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
@@ -36,20 +35,21 @@ import java.util.Set;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 import java.util.regex.PatternSyntaxException;
-
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.ResourceLoader;
 import org.apache.lucene.util.ResourceLoaderAware;
 import org.apache.lucene.util.Version;
 
 /**
- * Abstract parent class for analysis factories {@link TokenizerFactory},
- * {@link TokenFilterFactory} and {@link CharFilterFactory}.
- * <p>
- * The typical lifecycle for a factory consumer is:
+ * Abstract parent class for analysis factories {@link TokenizerFactory}, {@link TokenFilterFactory}
+ * and {@link CharFilterFactory}.
+ *
+ * <p>The typical lifecycle for a factory consumer is:
+ *
  * <ol>
  *   <li>Create factory via its constructor (or via XXXFactory.forName)
- *   <li>(Optional) If the factory uses resources such as files, {@link ResourceLoaderAware#inform(ResourceLoader)} is called to initialize those resources.
+ *   <li>(Optional) If the factory uses resources such as files, {@link
+ *       ResourceLoaderAware#inform(ResourceLoader)} is called to initialize those resources.
  *   <li>Consumer calls create() to obtain instances.
  * </ol>
  */
@@ -57,19 +57,19 @@ public abstract class AbstractAnalysisFactory {
   public static final String LUCENE_MATCH_VERSION_PARAM = "luceneMatchVersion";
 
   /** The original args, before any processing */
-  private final Map<String,String> originalArgs;
+  private final Map<String, String> originalArgs;
 
   /** the luceneVersion arg */
   protected final Version luceneMatchVersion;
   /** whether the luceneMatchVersion arg is explicitly specified in the serialized schema */
   private boolean isExplicitLuceneMatchVersion = false;
-  
+
   /**
-   * This default ctor is required to be implemented by all subclasses because of
-   * service loader (SPI) specification, but it is never called by Lucene.
-   * <p>
-   * Subclass ctors should call: {@code throw defaultCtorException();}
-   * 
+   * This default ctor is required to be implemented by all subclasses because of service loader
+   * (SPI) specification, but it is never called by Lucene.
+   *
+   * <p>Subclass ctors should call: {@code throw defaultCtorException();}
+   *
    * @throws UnsupportedOperationException if invoked
    * @see #defaultCtorException()
    * @see #AbstractAnalysisFactory(Map)
@@ -77,24 +77,23 @@ public abstract class AbstractAnalysisFactory {
   protected AbstractAnalysisFactory() {
     throw defaultCtorException();
   }
-  
+
   /**
-   * Helper method to be called from mandatory default constructor of
-   * all subclasses to make {@link ServiceLoader} happy.
-   * <p>
-   * Should be used in subclass ctors like: {@code throw defaultCtorException();}
-   * 
+   * Helper method to be called from mandatory default constructor of all subclasses to make {@link
+   * ServiceLoader} happy.
+   *
+   * <p>Should be used in subclass ctors like: {@code throw defaultCtorException();}
+   *
    * @see #AbstractAnalysisFactory()
    */
   protected static RuntimeException defaultCtorException() {
-    return new UnsupportedOperationException("Analysis factories cannot be instantiated without arguments. " +
-        "Use applicable factory methods of TokenizerFactory, CharFilterFactory, or TokenFilterFactory.");
+    return new UnsupportedOperationException(
+        "Analysis factories cannot be instantiated without arguments. "
+            + "Use applicable factory methods of TokenizerFactory, CharFilterFactory, or TokenFilterFactory.");
   }
 
-  /**
-   * Initialize this factory via a set of key-value pairs.
-   */
-  protected AbstractAnalysisFactory(Map<String,String> args) {
+  /** Initialize this factory via a set of key-value pairs. */
+  protected AbstractAnalysisFactory(Map<String, String> args) {
     originalArgs = Map.copyOf(args);
     String version = get(args, LUCENE_MATCH_VERSION_PARAM);
     if (version == null) {
@@ -106,29 +105,35 @@ public abstract class AbstractAnalysisFactory {
         throw new IllegalArgumentException(pe);
       }
     }
-    args.remove(CLASS_NAME);  // consume the class arg
-    args.remove(SPI_NAME);    // consume the spi arg
+    args.remove(CLASS_NAME); // consume the class arg
+    args.remove(SPI_NAME); // consume the spi arg
   }
-  
-  public final Map<String,String> getOriginalArgs() {
+
+  public final Map<String, String> getOriginalArgs() {
     return originalArgs;
   }
 
   public final Version getLuceneMatchVersion() {
     return this.luceneMatchVersion;
   }
-  
-  public String require(Map<String,String> args, String name) {
+
+  public String require(Map<String, String> args, String name) {
     String s = args.remove(name);
     if (s == null) {
       throw new IllegalArgumentException("Configuration Error: missing parameter '" + name + "'");
     }
     return s;
   }
-  public String require(Map<String,String> args, String name, Collection<String> allowedValues) {
+
+  public String require(Map<String, String> args, String name, Collection<String> allowedValues) {
     return require(args, name, allowedValues, true);
   }
-  public String require(Map<String,String> args, String name, Collection<String> allowedValues, boolean caseSensitive) {
+
+  public String require(
+      Map<String, String> args,
+      String name,
+      Collection<String> allowedValues,
+      boolean caseSensitive) {
     String s = args.remove(name);
     if (s == null) {
       throw new IllegalArgumentException("Configuration Error: missing parameter '" + name + "'");
@@ -144,23 +149,35 @@ public abstract class AbstractAnalysisFactory {
           }
         }
       }
-      throw new IllegalArgumentException("Configuration Error: '" + name + "' value must be one of " + allowedValues);
+      throw new IllegalArgumentException(
+          "Configuration Error: '" + name + "' value must be one of " + allowedValues);
     }
   }
-  public String get(Map<String,String> args, String name) {
+
+  public String get(Map<String, String> args, String name) {
     return args.remove(name); // defaultVal = null
   }
-  public String get(Map<String,String> args, String name, String defaultVal) {
+
+  public String get(Map<String, String> args, String name, String defaultVal) {
     String s = args.remove(name);
     return s == null ? defaultVal : s;
   }
-  public String get(Map<String,String> args, String name, Collection<String> allowedValues) {
+
+  public String get(Map<String, String> args, String name, Collection<String> allowedValues) {
     return get(args, name, allowedValues, null); // defaultVal = null
   }
-  public String get(Map<String,String> args, String name, Collection<String> allowedValues, String defaultVal) {
+
+  public String get(
+      Map<String, String> args, String name, Collection<String> allowedValues, String defaultVal) {
     return get(args, name, allowedValues, defaultVal, true);
   }
-  public String get(Map<String,String> args, String name, Collection<String> allowedValues, String defaultVal, boolean caseSensitive) {
+
+  public String get(
+      Map<String, String> args,
+      String name,
+      Collection<String> allowedValues,
+      String defaultVal,
+      boolean caseSensitive) {
     String s = args.remove(name);
     if (s == null) {
       return defaultVal;
@@ -176,42 +193,47 @@ public abstract class AbstractAnalysisFactory {
           }
         }
       }
-      throw new IllegalArgumentException("Configuration Error: '" + name + "' value must be one of " + allowedValues);
+      throw new IllegalArgumentException(
+          "Configuration Error: '" + name + "' value must be one of " + allowedValues);
     }
   }
 
-  protected final int requireInt(Map<String,String> args, String name) {
+  protected final int requireInt(Map<String, String> args, String name) {
     return Integer.parseInt(require(args, name));
   }
-  protected final int getInt(Map<String,String> args, String name, int defaultVal) {
+
+  protected final int getInt(Map<String, String> args, String name, int defaultVal) {
     String s = args.remove(name);
     return s == null ? defaultVal : Integer.parseInt(s);
   }
 
-  protected final boolean requireBoolean(Map<String,String> args, String name) {
+  protected final boolean requireBoolean(Map<String, String> args, String name) {
     return Boolean.parseBoolean(require(args, name));
   }
-  protected final boolean getBoolean(Map<String,String> args, String name, boolean defaultVal) {
+
+  protected final boolean getBoolean(Map<String, String> args, String name, boolean defaultVal) {
     String s = args.remove(name);
     return s == null ? defaultVal : Boolean.parseBoolean(s);
   }
 
-  protected final float requireFloat(Map<String,String> args, String name) {
+  protected final float requireFloat(Map<String, String> args, String name) {
     return Float.parseFloat(require(args, name));
   }
-  protected final float getFloat(Map<String,String> args, String name, float defaultVal) {
+
+  protected final float getFloat(Map<String, String> args, String name, float defaultVal) {
     String s = args.remove(name);
     return s == null ? defaultVal : Float.parseFloat(s);
   }
 
-  public char requireChar(Map<String,String> args, String name) {
+  public char requireChar(Map<String, String> args, String name) {
     return require(args, name).charAt(0);
   }
-  public char getChar(Map<String,String> args, String name, char defaultValue) {
+
+  public char getChar(Map<String, String> args, String name, char defaultValue) {
     String s = args.remove(name);
     if (s == null) {
       return defaultValue;
-    } else { 
+    } else {
       if (s.length() != 1) {
         throw new IllegalArgumentException(name + " should be a char. \"" + s + "\" is invalid");
       } else {
@@ -219,14 +241,14 @@ public abstract class AbstractAnalysisFactory {
       }
     }
   }
-  
+
   private static final Pattern ITEM_PATTERN = Pattern.compile("[^,\\s]+");
 
   /** Returns whitespace- and/or comma-separated set of values, or null if none are found */
-  public Set<String> getSet(Map<String,String> args, String name) {
+  public Set<String> getSet(Map<String, String> args, String name) {
     String s = args.remove(name);
     if (s == null) {
-     return null;
+      return null;
     } else {
       Set<String> set = null;
       Matcher matcher = ITEM_PATTERN.matcher(s);
@@ -241,25 +263,26 @@ public abstract class AbstractAnalysisFactory {
     }
   }
 
-  /**
-   * Compiles a pattern for the value of the specified argument key <code>name</code> 
-   */
-  protected final Pattern getPattern(Map<String,String> args, String name) {
+  /** Compiles a pattern for the value of the specified argument key <code>name</code> */
+  protected final Pattern getPattern(Map<String, String> args, String name) {
     try {
       return Pattern.compile(require(args, name));
     } catch (PatternSyntaxException e) {
-      throw new IllegalArgumentException
-        ("Configuration Error: '" + name + "' can not be parsed in " +
-         this.getClass().getSimpleName(), e);
+      throw new IllegalArgumentException(
+          "Configuration Error: '"
+              + name
+              + "' can not be parsed in "
+              + this.getClass().getSimpleName(),
+          e);
     }
   }
 
   /**
-   * Returns as {@link CharArraySet} from wordFiles, which
-   * can be a comma-separated list of filenames
+   * Returns as {@link CharArraySet} from wordFiles, which can be a comma-separated list of
+   * filenames
    */
-  protected final CharArraySet getWordSet(ResourceLoader loader,
-      String wordFiles, boolean ignoreCase) throws IOException {
+  protected final CharArraySet getWordSet(
+      ResourceLoader loader, String wordFiles, boolean ignoreCase) throws IOException {
     List<String> files = splitFileNames(wordFiles);
     CharArraySet words = null;
     if (files.size() > 0) {
@@ -273,18 +296,18 @@ public abstract class AbstractAnalysisFactory {
     }
     return words;
   }
-  
-  /**
-   * Returns the resource's lines (with content treated as UTF-8)
-   */
+
+  /** Returns the resource's lines (with content treated as UTF-8) */
   protected final List<String> getLines(ResourceLoader loader, String resource) throws IOException {
     return WordlistLoader.getLines(loader.openResource(resource), StandardCharsets.UTF_8);
   }
 
-  /** same as {@link #getWordSet(ResourceLoader, String, boolean)},
-   * except the input is in snowball format. */
-  protected final CharArraySet getSnowballWordSet(ResourceLoader loader,
-      String wordFiles, boolean ignoreCase) throws IOException {
+  /**
+   * same as {@link #getWordSet(ResourceLoader, String, boolean)}, except the input is in snowball
+   * format.
+   */
+  protected final CharArraySet getSnowballWordSet(
+      ResourceLoader loader, String wordFiles, boolean ignoreCase) throws IOException {
     List<String> files = splitFileNames(wordFiles);
     CharArraySet words = null;
     if (files.size() > 0) {
@@ -296,9 +319,11 @@ public abstract class AbstractAnalysisFactory {
         Reader reader = null;
         try {
           stream = loader.openResource(file.trim());
-          CharsetDecoder decoder = StandardCharsets.UTF_8.newDecoder()
-              .onMalformedInput(CodingErrorAction.REPORT)
-              .onUnmappableCharacter(CodingErrorAction.REPORT);
+          CharsetDecoder decoder =
+              StandardCharsets.UTF_8
+                  .newDecoder()
+                  .onMalformedInput(CodingErrorAction.REPORT)
+                  .onUnmappableCharacter(CodingErrorAction.REPORT);
           reader = new InputStreamReader(stream, decoder);
           WordlistLoader.getSnowballWordSet(reader, words);
         } finally {
@@ -310,8 +335,8 @@ public abstract class AbstractAnalysisFactory {
   }
 
   /**
-   * Splits file names separated by comma character.
-   * File names can contain comma characters escaped by backslash '\'
+   * Splits file names separated by comma character. File names can contain comma characters escaped
+   * by backslash '\'
    *
    * @param fileNames the string containing file names
    * @return a list of file names with the escaping backslashed removed
@@ -321,16 +346,15 @@ public abstract class AbstractAnalysisFactory {
   }
 
   /**
-   * Splits a list separated by zero or more given separator characters.
-   * List items can contain comma characters escaped by backslash '\'.
-   * Whitespace is NOT trimmed from the returned list items.
+   * Splits a list separated by zero or more given separator characters. List items can contain
+   * comma characters escaped by backslash '\'. Whitespace is NOT trimmed from the returned list
+   * items.
    *
    * @param list the string containing the split list items
    * @return a list of items with the escaping backslashes removed
    */
   protected final List<String> splitAt(char separator, String list) {
-    if (list == null)
-      return Collections.emptyList();
+    if (list == null) return Collections.emptyList();
 
     List<String> result = new ArrayList<>();
     for (String item : list.split("(?<!\\\\)[" + separator + "]")) {
@@ -343,11 +367,12 @@ public abstract class AbstractAnalysisFactory {
   private static final String CLASS_NAME = "class";
 
   private static final String SPI_NAME = "name";
-  
+
   /**
-   * @return the string used to specify the concrete class name in a serialized representation: the class arg.  
-   *         If the concrete class name was not specified via a class arg, returns {@code getClass().getName()}.
-   */ 
+   * @return the string used to specify the concrete class name in a serialized representation: the
+   *     class arg. If the concrete class name was not specified via a class arg, returns {@code
+   *     getClass().getName()}.
+   */
   public String getClassArg() {
     if (null != originalArgs) {
       String className = originalArgs.get(CLASS_NAME);
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/AnalysisSPILoader.java b/lucene/core/src/java/org/apache/lucene/analysis/AnalysisSPILoader.java
index 47f7f27..e39699e 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/AnalysisSPILoader.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/AnalysisSPILoader.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.analysis;
 
-
 import java.lang.reflect.Field;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Modifier;
@@ -30,16 +29,16 @@ import java.util.ServiceConfigurationError;
 import java.util.ServiceLoader;
 import java.util.Set;
 import java.util.regex.Pattern;
-
 import org.apache.lucene.util.ClassLoaderUtils;
 
 /**
  * Helper class for loading named SPIs from classpath (e.g. Tokenizers, TokenStreams).
+ *
  * @lucene.internal
  */
 public final class AnalysisSPILoader<S extends AbstractAnalysisFactory> {
 
-  private volatile Map<String,Class<? extends S>> services = Collections.emptyMap();
+  private volatile Map<String, Class<? extends S>> services = Collections.emptyMap();
   private volatile Set<String> originalNames = Collections.emptySet();
   private final Class<S> clazz;
 
@@ -56,65 +55,77 @@ public final class AnalysisSPILoader<S extends AbstractAnalysisFactory> {
     if (classloader == null) {
       classloader = clazzClassloader;
     }
-    if (clazzClassloader != null && !ClassLoaderUtils.isParentClassLoader(clazzClassloader, classloader)) {
+    if (clazzClassloader != null
+        && !ClassLoaderUtils.isParentClassLoader(clazzClassloader, classloader)) {
       reload(clazzClassloader);
     }
     reload(classloader);
   }
 
-  /** 
-   * Reloads the internal SPI list from the given {@link ClassLoader}.
-   * Changes to the service list are visible after the method ends, all
-   * iterators (e.g., from {@link #availableServices()},...) stay consistent. 
-   * 
-   * <p><b>NOTE:</b> Only new service providers are added, existing ones are
-   * never removed or replaced.
-   * 
-   * <p><em>This method is expensive and should only be called for discovery
-   * of new service providers on the given classpath/classloader!</em>
+  /**
+   * Reloads the internal SPI list from the given {@link ClassLoader}. Changes to the service list
+   * are visible after the method ends, all iterators (e.g., from {@link #availableServices()},...)
+   * stay consistent.
+   *
+   * <p><b>NOTE:</b> Only new service providers are added, existing ones are never removed or
+   * replaced.
+   *
+   * <p><em>This method is expensive and should only be called for discovery of new service
+   * providers on the given classpath/classloader!</em>
    */
   public synchronized void reload(ClassLoader classloader) {
     Objects.requireNonNull(classloader, "classloader");
-    final LinkedHashMap<String,Class<? extends S>> services = new LinkedHashMap<>(this.services);
+    final LinkedHashMap<String, Class<? extends S>> services = new LinkedHashMap<>(this.services);
     final LinkedHashSet<String> originalNames = new LinkedHashSet<>(this.originalNames);
-    ServiceLoader.load(clazz, classloader).stream().map(ServiceLoader.Provider::type).forEachOrdered(service -> {
-      String name = null;
-      String originalName = null;
-      Throwable cause = null;
-      try {
-        originalName = lookupSPIName(service);
-        name = originalName.toLowerCase(Locale.ROOT);
-        if (!isValidName(originalName)) {
-          throw new ServiceConfigurationError("The name " + originalName + " for " + service.getName() +
-              " is invalid: Allowed characters are (English) alphabet, digits, and underscore. It should be started with an alphabet.");
-        }
-      } catch (NoSuchFieldException | IllegalAccessException | IllegalStateException e) {
-        cause = e;
-      }
-      if (name == null) {
-        throw new ServiceConfigurationError("The class name " + service.getName() +
-            " has no service name field: [public static final String NAME]", cause);
-      }
-      // only add the first one for each name, later services will be ignored
-      // this allows to place services before others in classpath to make 
-      // them used instead of others
-      //
-      // TODO: Should we disallow duplicate names here?
-      // Allowing it may get confusing on collisions, as different packages
-      // could contain same factory class, which is a naming bug!
-      // When changing this be careful to allow reload()!
-      if (!services.containsKey(name)) {
-        services.put(name, service);
-        // preserve (case-sensitive) original name for reference
-        originalNames.add(originalName);
-      }
-    });
+    ServiceLoader.load(clazz, classloader).stream()
+        .map(ServiceLoader.Provider::type)
+        .forEachOrdered(
+            service -> {
+              String name = null;
+              String originalName = null;
+              Throwable cause = null;
+              try {
+                originalName = lookupSPIName(service);
+                name = originalName.toLowerCase(Locale.ROOT);
+                if (!isValidName(originalName)) {
+                  throw new ServiceConfigurationError(
+                      "The name "
+                          + originalName
+                          + " for "
+                          + service.getName()
+                          + " is invalid: Allowed characters are (English) alphabet, digits, and underscore. It should be started with an alphabet.");
+                }
+              } catch (NoSuchFieldException | IllegalAccessException | IllegalStateException e) {
+                cause = e;
+              }
+              if (name == null) {
+                throw new ServiceConfigurationError(
+                    "The class name "
+                        + service.getName()
+                        + " has no service name field: [public static final String NAME]",
+                    cause);
+              }
+              // only add the first one for each name, later services will be ignored
+              // this allows to place services before others in classpath to make
+              // them used instead of others
+              //
+              // TODO: Should we disallow duplicate names here?
+              // Allowing it may get confusing on collisions, as different packages
+              // could contain same factory class, which is a naming bug!
+              // When changing this be careful to allow reload()!
+              if (!services.containsKey(name)) {
+                services.put(name, service);
+                // preserve (case-sensitive) original name for reference
+                originalNames.add(originalName);
+              }
+            });
 
     // make sure that the number of lookup keys is same to the number of original names.
     // in fact this constraint should be met in existence checks of the lookup map key,
     // so this is more like an assertion rather than a status check.
     if (services.keySet().size() != originalNames.size()) {
-      throw new ServiceConfigurationError("Service lookup key set is inconsistent with original name set!");
+      throw new ServiceConfigurationError(
+          "Service lookup key set is inconsistent with original name set!");
     }
 
     this.services = Map.copyOf(services);
@@ -125,46 +136,61 @@ public final class AnalysisSPILoader<S extends AbstractAnalysisFactory> {
     return SERVICE_NAME_PATTERN.matcher(name).matches();
   }
 
-  public S newInstance(String name, Map<String,String> args) {
+  public S newInstance(String name, Map<String, String> args) {
     final Class<? extends S> service = lookupClass(name);
     return newFactoryClassInstance(service, args);
   }
-  
+
   public Class<? extends S> lookupClass(String name) {
     final Class<? extends S> service = services.get(name.toLowerCase(Locale.ROOT));
     if (service != null) {
       return service;
     } else {
-      throw new IllegalArgumentException("A SPI class of type "+clazz.getName()+" with name '"+name+"' does not exist. "+
-          "You need to add the corresponding JAR file supporting this SPI to your classpath. "+
-          "The current classpath supports the following names: "+availableServices());
+      throw new IllegalArgumentException(
+          "A SPI class of type "
+              + clazz.getName()
+              + " with name '"
+              + name
+              + "' does not exist. "
+              + "You need to add the corresponding JAR file supporting this SPI to your classpath. "
+              + "The current classpath supports the following names: "
+              + availableServices());
     }
   }
 
   public Set<String> availableServices() {
     return originalNames;
-  }  
+  }
 
   /**
-   * Looks up SPI name (static "NAME" field) with appropriate modifiers.
-   * Also it must be a String class and declared in the concrete class.
+   * Looks up SPI name (static "NAME" field) with appropriate modifiers. Also it must be a String
+   * class and declared in the concrete class.
+   *
    * @return the SPI name
    * @throws NoSuchFieldException - if the "NAME" field is not defined.
    * @throws IllegalAccessException - if the "NAME" field is inaccessible.
-   * @throws IllegalStateException - if the "NAME" field does not have appropriate modifiers or isn't a String field.
+   * @throws IllegalStateException - if the "NAME" field does not have appropriate modifiers or
+   *     isn't a String field.
    */
-  public static String lookupSPIName(Class<? extends AbstractAnalysisFactory> service) throws NoSuchFieldException, IllegalAccessException, IllegalStateException {
+  public static String lookupSPIName(Class<? extends AbstractAnalysisFactory> service)
+      throws NoSuchFieldException, IllegalAccessException, IllegalStateException {
     final Field field = service.getDeclaredField("NAME");
     int modifier = field.getModifiers();
-    if (Modifier.isPublic(modifier) && Modifier.isStatic(modifier) &&
-        Modifier.isFinal(modifier) && Objects.equals(field.getType(), String.class)) {
+    if (Modifier.isPublic(modifier)
+        && Modifier.isStatic(modifier)
+        && Modifier.isFinal(modifier)
+        && Objects.equals(field.getType(), String.class)) {
       return ((String) field.get(null));
     }
     throw new IllegalStateException("No SPI name defined.");
   }
-  
-  /** Creates a new instance of the given {@link AbstractAnalysisFactory} by invoking the constructor, passing the given argument map. */
-  public static <T extends AbstractAnalysisFactory> T newFactoryClassInstance(Class<T> clazz, Map<String,String> args) {
+
+  /**
+   * Creates a new instance of the given {@link AbstractAnalysisFactory} by invoking the
+   * constructor, passing the given argument map.
+   */
+  public static <T extends AbstractAnalysisFactory> T newFactoryClassInstance(
+      Class<T> clazz, Map<String, String> args) {
     try {
       return clazz.getConstructor(Map.class).newInstance(args);
     } catch (InvocationTargetException ite) {
@@ -175,9 +201,14 @@ public final class AnalysisSPILoader<S extends AbstractAnalysisFactory> {
       if (cause instanceof Error) {
         throw (Error) cause;
       }
-      throw new RuntimeException("Unexpected checked exception while calling constructor of "+clazz.getName(), cause);
+      throw new RuntimeException(
+          "Unexpected checked exception while calling constructor of " + clazz.getName(), cause);
     } catch (ReflectiveOperationException e) {
-      throw new UnsupportedOperationException("Factory "+clazz.getName()+" cannot be instantiated. This is likely due to missing Map<String,String> constructor.", e);
+      throw new UnsupportedOperationException(
+          "Factory "
+              + clazz.getName()
+              + " cannot be instantiated. This is likely due to missing Map<String,String> constructor.",
+          e);
     }
   }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/Analyzer.java b/lucene/core/src/java/org/apache/lucene/analysis/Analyzer.java
index 6d57bd8..61c63ac 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/Analyzer.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/Analyzer.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.analysis;
 
-
 import java.io.Closeable;
 import java.io.IOException;
 import java.io.Reader;
@@ -24,7 +23,6 @@ import java.io.StringReader;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.function.Consumer;
-
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
@@ -35,14 +33,15 @@ import org.apache.lucene.util.CloseableThreadLocal;
 import org.apache.lucene.util.Version;
 
 /**
- * An Analyzer builds TokenStreams, which analyze text.  It thus represents a
- * policy for extracting index terms from text.
- * <p>
- * In order to define what analysis is done, subclasses must define their
- * {@link TokenStreamComponents TokenStreamComponents} in {@link #createComponents(String)}.
- * The components are then reused in each call to {@link #tokenStream(String, Reader)}.
- * <p>
- * Simple example:
+ * An Analyzer builds TokenStreams, which analyze text. It thus represents a policy for extracting
+ * index terms from text.
+ *
+ * <p>In order to define what analysis is done, subclasses must define their {@link
+ * TokenStreamComponents TokenStreamComponents} in {@link #createComponents(String)}. The components
+ * are then reused in each call to {@link #tokenStream(String, Reader)}.
+ *
+ * <p>Simple example:
+ *
  * <pre class="prettyprint">
  * Analyzer analyzer = new Analyzer() {
  *  {@literal @Override}
@@ -60,24 +59,26 @@ import org.apache.lucene.util.Version;
  *   }
  * };
  * </pre>
+ *
  * For more examples, see the {@link org.apache.lucene.analysis Analysis package documentation}.
- * <p>
- * For some concrete implementations bundled with Lucene, look in the analysis modules:
+ *
+ * <p>For some concrete implementations bundled with Lucene, look in the analysis modules:
+ *
  * <ul>
- *   <li><a href="{@docRoot}/../analysis/common/overview-summary.html">Common</a>:
- *       Analyzers for indexing content in different languages and domains.
- *   <li><a href="{@docRoot}/../analysis/icu/overview-summary.html">ICU</a>:
- *       Exposes functionality from ICU to Apache Lucene. 
- *   <li><a href="{@docRoot}/../analysis/kuromoji/overview-summary.html">Kuromoji</a>:
- *       Morphological analyzer for Japanese text.
+ *   <li><a href="{@docRoot}/../analysis/common/overview-summary.html">Common</a>: Analyzers for
+ *       indexing content in different languages and domains.
+ *   <li><a href="{@docRoot}/../analysis/icu/overview-summary.html">ICU</a>: Exposes functionality
+ *       from ICU to Apache Lucene.
+ *   <li><a href="{@docRoot}/../analysis/kuromoji/overview-summary.html">Kuromoji</a>: Morphological
+ *       analyzer for Japanese text.
  *   <li><a href="{@docRoot}/../analysis/morfologik/overview-summary.html">Morfologik</a>:
  *       Dictionary-driven lemmatization for the Polish language.
- *   <li><a href="{@docRoot}/../analysis/phonetic/overview-summary.html">Phonetic</a>:
- *       Analysis for indexing phonetic signatures (for sounds-alike search).
- *   <li><a href="{@docRoot}/../analysis/smartcn/overview-summary.html">Smart Chinese</a>:
- *       Analyzer for Simplified Chinese, which indexes words.
- *   <li><a href="{@docRoot}/../analysis/stempel/overview-summary.html">Stempel</a>:
- *       Algorithmic Stemmer for the Polish Language.
+ *   <li><a href="{@docRoot}/../analysis/phonetic/overview-summary.html">Phonetic</a>: Analysis for
+ *       indexing phonetic signatures (for sounds-alike search).
+ *   <li><a href="{@docRoot}/../analysis/smartcn/overview-summary.html">Smart Chinese</a>: Analyzer
+ *       for Simplified Chinese, which indexes words.
+ *   <li><a href="{@docRoot}/../analysis/stempel/overview-summary.html">Stempel</a>: Algorithmic
+ *       Stemmer for the Polish Language.
  * </ul>
  *
  * @since 3.1
@@ -87,12 +88,13 @@ public abstract class Analyzer implements Closeable {
   private final ReuseStrategy reuseStrategy;
   private Version version = Version.LATEST;
 
-  // non final as it gets nulled if closed; pkg private for access by ReuseStrategy's final helper methods:
+  // non final as it gets nulled if closed; pkg private for access by ReuseStrategy's final helper
+  // methods:
   CloseableThreadLocal<Object> storedValue = new CloseableThreadLocal<>();
 
   /**
-   * Create a new Analyzer, reusing the same set of components per-thread
-   * across calls to {@link #tokenStream(String, Reader)}. 
+   * Create a new Analyzer, reusing the same set of components per-thread across calls to {@link
+   * #tokenStream(String, Reader)}.
    */
   protected Analyzer() {
     this(GLOBAL_REUSE_STRATEGY);
@@ -100,10 +102,10 @@ public abstract class Analyzer implements Closeable {
 
   /**
    * Expert: create a new Analyzer with a custom {@link ReuseStrategy}.
-   * <p>
-   * NOTE: if you just want to reuse on a per-field basis, it's easier to
-   * use a subclass of {@link AnalyzerWrapper} such as 
-   * <a href="{@docRoot}/../analysis/common/org/apache/lucene/analysis/miscellaneous/PerFieldAnalyzerWrapper.html">
+   *
+   * <p>NOTE: if you just want to reuse on a per-field basis, it's easier to use a subclass of
+   * {@link AnalyzerWrapper} such as <a
+   * href="{@docRoot}/../analysis/common/org/apache/lucene/analysis/miscellaneous/PerFieldAnalyzerWrapper.html">
    * PerFieldAnalyzerWrapper</a> instead.
    */
   protected Analyzer(ReuseStrategy reuseStrategy) {
@@ -112,51 +114,45 @@ public abstract class Analyzer implements Closeable {
 
   /**
    * Creates a new {@link TokenStreamComponents} instance for this analyzer.
-   * 
-   * @param fieldName
-   *          the name of the fields content passed to the
-   *          {@link TokenStreamComponents} sink as a reader
-
+   *
+   * @param fieldName the name of the fields content passed to the {@link TokenStreamComponents}
+   *     sink as a reader
    * @return the {@link TokenStreamComponents} for this analyzer.
    */
   protected abstract TokenStreamComponents createComponents(String fieldName);
 
   /**
-   * Wrap the given {@link TokenStream} in order to apply normalization filters.
-   * The default implementation returns the {@link TokenStream} as-is. This is
-   * used by {@link #normalize(String, String)}.
+   * Wrap the given {@link TokenStream} in order to apply normalization filters. The default
+   * implementation returns the {@link TokenStream} as-is. This is used by {@link #normalize(String,
+   * String)}.
    */
   protected TokenStream normalize(String fieldName, TokenStream in) {
     return in;
   }
 
   /**
-   * Returns a TokenStream suitable for <code>fieldName</code>, tokenizing
-   * the contents of <code>reader</code>.
-   * <p>
-   * This method uses {@link #createComponents(String)} to obtain an
-   * instance of {@link TokenStreamComponents}. It returns the sink of the
-   * components and stores the components internally. Subsequent calls to this
-   * method will reuse the previously stored components after resetting them
-   * through {@link TokenStreamComponents#setReader(Reader)}.
-   * <p>
-   * <b>NOTE:</b> After calling this method, the consumer must follow the 
-   * workflow described in {@link TokenStream} to properly consume its contents.
-   * See the {@link org.apache.lucene.analysis Analysis package documentation} for
-   * some examples demonstrating this.
-   * 
-   * <b>NOTE:</b> If your data is available as a {@code String}, use
-   * {@link #tokenStream(String, String)} which reuses a {@code StringReader}-like
-   * instance internally.
-   * 
+   * Returns a TokenStream suitable for <code>fieldName</code>, tokenizing the contents of <code>
+   * reader</code>.
+   *
+   * <p>This method uses {@link #createComponents(String)} to obtain an instance of {@link
+   * TokenStreamComponents}. It returns the sink of the components and stores the components
+   * internally. Subsequent calls to this method will reuse the previously stored components after
+   * resetting them through {@link TokenStreamComponents#setReader(Reader)}.
+   *
+   * <p><b>NOTE:</b> After calling this method, the consumer must follow the workflow described in
+   * {@link TokenStream} to properly consume its contents. See the {@link org.apache.lucene.analysis
+   * Analysis package documentation} for some examples demonstrating this.
+   *
+   * <p><b>NOTE:</b> If your data is available as a {@code String}, use {@link #tokenStream(String,
+   * String)} which reuses a {@code StringReader}-like instance internally.
+   *
    * @param fieldName the name of the field the created TokenStream is used for
    * @param reader the reader the streams source reads from
    * @return TokenStream for iterating the analyzed content of <code>reader</code>
    * @throws AlreadyClosedException if the Analyzer is closed.
    * @see #tokenStream(String, String)
    */
-  public final TokenStream tokenStream(final String fieldName,
-                                       final Reader reader) {
+  public final TokenStream tokenStream(final String fieldName, final Reader reader) {
     TokenStreamComponents components = reuseStrategy.getReusableComponents(this, fieldName);
     final Reader r = initReader(fieldName, reader);
     if (components == null) {
@@ -166,22 +162,20 @@ public abstract class Analyzer implements Closeable {
     components.setReader(r);
     return components.getTokenStream();
   }
-  
+
   /**
-   * Returns a TokenStream suitable for <code>fieldName</code>, tokenizing
-   * the contents of <code>text</code>.
-   * <p>
-   * This method uses {@link #createComponents(String)} to obtain an
-   * instance of {@link TokenStreamComponents}. It returns the sink of the
-   * components and stores the components internally. Subsequent calls to this
-   * method will reuse the previously stored components after resetting them
-   * through {@link TokenStreamComponents#setReader(Reader)}.
-   * <p>
-   * <b>NOTE:</b> After calling this method, the consumer must follow the 
-   * workflow described in {@link TokenStream} to properly consume its contents.
-   * See the {@link org.apache.lucene.analysis Analysis package documentation} for
-   * some examples demonstrating this.
-   * 
+   * Returns a TokenStream suitable for <code>fieldName</code>, tokenizing the contents of <code>
+   * text</code>.
+   *
+   * <p>This method uses {@link #createComponents(String)} to obtain an instance of {@link
+   * TokenStreamComponents}. It returns the sink of the components and stores the components
+   * internally. Subsequent calls to this method will reuse the previously stored components after
+   * resetting them through {@link TokenStreamComponents#setReader(Reader)}.
+   *
+   * <p><b>NOTE:</b> After calling this method, the consumer must follow the workflow described in
+   * {@link TokenStream} to properly consume its contents. See the {@link org.apache.lucene.analysis
+   * Analysis package documentation} for some examples demonstrating this.
+   *
    * @param fieldName the name of the field the created TokenStream is used for
    * @param text the String the streams source reads from
    * @return TokenStream for iterating the analyzed content of <code>reader</code>
@@ -190,9 +184,11 @@ public abstract class Analyzer implements Closeable {
    */
   public final TokenStream tokenStream(final String fieldName, final String text) {
     TokenStreamComponents components = reuseStrategy.getReusableComponents(this, fieldName);
-    @SuppressWarnings("resource") final ReusableStringReader strReader = 
-        (components == null || components.reusableStringReader == null) ?
-        new ReusableStringReader() : components.reusableStringReader;
+    @SuppressWarnings("resource")
+    final ReusableStringReader strReader =
+        (components == null || components.reusableStringReader == null)
+            ? new ReusableStringReader()
+            : components.reusableStringReader;
     strReader.setValue(text);
     final Reader r = initReader(fieldName, strReader);
     if (components == null) {
@@ -206,18 +202,15 @@ public abstract class Analyzer implements Closeable {
   }
 
   /**
-   * Normalize a string down to the representation that it would have in the
-   * index.
-   * <p>
-   * This is typically used by query parsers in order to generate a query on
-   * a given term, without tokenizing or stemming, which are undesirable if
-   * the string to analyze is a partial word (eg. in case of a wildcard or
-   * fuzzy query).
-   * <p>
-   * This method uses {@link #initReaderForNormalization(String, Reader)} in
-   * order to apply necessary character-level normalization and then
-   * {@link #normalize(String, TokenStream)} in order to apply the normalizing
-   * token filters.
+   * Normalize a string down to the representation that it would have in the index.
+   *
+   * <p>This is typically used by query parsers in order to generate a query on a given term,
+   * without tokenizing or stemming, which are undesirable if the string to analyze is a partial
+   * word (eg. in case of a wildcard or fuzzy query).
+   *
+   * <p>This method uses {@link #initReaderForNormalization(String, Reader)} in order to apply
+   * necessary character-level normalization and then {@link #normalize(String, TokenStream)} in
+   * order to apply the normalizing token filters.
    */
   public final BytesRef normalize(final String fieldName, final String text) {
     try {
@@ -227,7 +220,7 @@ public abstract class Analyzer implements Closeable {
         Reader filterReader = initReaderForNormalization(fieldName, reader);
         char[] buffer = new char[64];
         StringBuilder builder = new StringBuilder();
-        for (;;) {
+        for (; ; ) {
           final int read = filterReader.read(buffer, 0, buffer.length);
           if (read == -1) {
             break;
@@ -240,20 +233,29 @@ public abstract class Analyzer implements Closeable {
       }
 
       final AttributeFactory attributeFactory = attributeFactory(fieldName);
-      try (TokenStream ts = normalize(fieldName,
-          new StringTokenStream(attributeFactory, filteredText, text.length()))) {
+      try (TokenStream ts =
+          normalize(
+              fieldName, new StringTokenStream(attributeFactory, filteredText, text.length()))) {
         final TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class);
         ts.reset();
         if (ts.incrementToken() == false) {
-          throw new IllegalStateException("The normalization token stream is "
-              + "expected to produce exactly 1 token, but got 0 for analyzer "
-              + this + " and input \"" + text + "\"");
+          throw new IllegalStateException(
+              "The normalization token stream is "
+                  + "expected to produce exactly 1 token, but got 0 for analyzer "
+                  + this
+                  + " and input \""
+                  + text
+                  + "\"");
         }
         final BytesRef term = BytesRef.deepCopyOf(termAtt.getBytesRef());
         if (ts.incrementToken()) {
-          throw new IllegalStateException("The normalization token stream is "
-              + "expected to produce exactly 1 token, but got 2+ for analyzer "
-              + this + " and input \"" + text + "\"");
+          throw new IllegalStateException(
+              "The normalization token stream is "
+                  + "expected to produce exactly 1 token, but got 2+ for analyzer "
+                  + this
+                  + " and input \""
+                  + text
+                  + "\"");
         }
         ts.end();
         return term;
@@ -265,10 +267,9 @@ public abstract class Analyzer implements Closeable {
 
   /**
    * Override this if you want to add a CharFilter chain.
-   * <p>
-   * The default implementation returns <code>reader</code>
-   * unchanged.
-   * 
+   *
+   * <p>The default implementation returns <code>reader</code> unchanged.
+   *
    * @param fieldName IndexableField name being indexed
    * @param reader original Reader
    * @return reader, optionally decorated with CharFilter(s)
@@ -277,72 +278,63 @@ public abstract class Analyzer implements Closeable {
     return reader;
   }
 
-  /** Wrap the given {@link Reader} with {@link CharFilter}s that make sense
-   *  for normalization. This is typically a subset of the {@link CharFilter}s
-   *  that are applied in {@link #initReader(String, Reader)}. This is used by
-   *  {@link #normalize(String, String)}. */
+  /**
+   * Wrap the given {@link Reader} with {@link CharFilter}s that make sense for normalization. This
+   * is typically a subset of the {@link CharFilter}s that are applied in {@link #initReader(String,
+   * Reader)}. This is used by {@link #normalize(String, String)}.
+   */
   protected Reader initReaderForNormalization(String fieldName, Reader reader) {
     return reader;
   }
 
-  /** Return the {@link AttributeFactory} to be used for
-   *  {@link #tokenStream analysis} and
-   *  {@link #normalize(String, String) normalization} on the given
-   *  {@code FieldName}. The default implementation returns
-   *  {@link TokenStream#DEFAULT_TOKEN_ATTRIBUTE_FACTORY}. */
+  /**
+   * Return the {@link AttributeFactory} to be used for {@link #tokenStream analysis} and {@link
+   * #normalize(String, String) normalization} on the given {@code FieldName}. The default
+   * implementation returns {@link TokenStream#DEFAULT_TOKEN_ATTRIBUTE_FACTORY}.
+   */
   protected AttributeFactory attributeFactory(String fieldName) {
     return TokenStream.DEFAULT_TOKEN_ATTRIBUTE_FACTORY;
   }
 
   /**
-   * Invoked before indexing a IndexableField instance if
-   * terms have already been added to that field.  This allows custom
-   * analyzers to place an automatic position increment gap between
-   * IndexbleField instances using the same field name.  The default value
-   * position increment gap is 0.  With a 0 position increment gap and
-   * the typical default token position increment of 1, all terms in a field,
-   * including across IndexableField instances, are in successive positions, allowing
-   * exact PhraseQuery matches, for instance, across IndexableField instance boundaries.
+   * Invoked before indexing a IndexableField instance if terms have already been added to that
+   * field. This allows custom analyzers to place an automatic position increment gap between
+   * IndexbleField instances using the same field name. The default value position increment gap is
+   * 0. With a 0 position increment gap and the typical default token position increment of 1, all
+   * terms in a field, including across IndexableField instances, are in successive positions,
+   * allowing exact PhraseQuery matches, for instance, across IndexableField instance boundaries.
    *
    * @param fieldName IndexableField name being indexed.
-   * @return position increment gap, added to the next token emitted from {@link #tokenStream(String,Reader)}.
-   *         This value must be {@code >= 0}.
+   * @return position increment gap, added to the next token emitted from {@link
+   *     #tokenStream(String,Reader)}. This value must be {@code >= 0}.
    */
   public int getPositionIncrementGap(String fieldName) {
     return 0;
   }
 
   /**
-   * Just like {@link #getPositionIncrementGap}, except for
-   * Token offsets instead.  By default this returns 1.
-   * This method is only called if the field
-   * produced at least one token for indexing.
+   * Just like {@link #getPositionIncrementGap}, except for Token offsets instead. By default this
+   * returns 1. This method is only called if the field produced at least one token for indexing.
    *
    * @param fieldName the field just indexed
    * @return offset gap, added to the next token emitted from {@link #tokenStream(String,Reader)}.
-   *         This value must be {@code >= 0}.
+   *     This value must be {@code >= 0}.
    */
   public int getOffsetGap(String fieldName) {
     return 1;
   }
 
-  /**
-   * Returns the used {@link ReuseStrategy}.
-   */
+  /** Returns the used {@link ReuseStrategy}. */
   public final ReuseStrategy getReuseStrategy() {
     return reuseStrategy;
   }
 
-  /**
-   * Set the version of Lucene this analyzer should mimic the behavior for for analysis.
-   */
+  /** Set the version of Lucene this analyzer should mimic the behavior for for analysis. */
   public void setVersion(Version v) {
     version = v; // TODO: make write once?
   }
 
-  /**
-   * Return the version of Lucene this analyzer will mimic the behavior of for analysis.
-   */
+  /** Return the version of Lucene this analyzer will mimic the behavior of for analysis. */
   public Version getVersion() {
     return version;
   }
@@ -357,62 +349,54 @@ public abstract class Analyzer implements Closeable {
   }
 
   /**
-   * This class encapsulates the outer components of a token stream. It provides
-   * access to the source (a {@link Reader} {@link Consumer} and the outer end (sink), an
-   * instance of {@link TokenFilter} which also serves as the
-   * {@link TokenStream} returned by
-   * {@link Analyzer#tokenStream(String, Reader)}.
+   * This class encapsulates the outer components of a token stream. It provides access to the
+   * source (a {@link Reader} {@link Consumer} and the outer end (sink), an instance of {@link
+   * TokenFilter} which also serves as the {@link TokenStream} returned by {@link
+   * Analyzer#tokenStream(String, Reader)}.
    */
   public static final class TokenStreamComponents {
-    /**
-     * Original source of the tokens.
-     */
+    /** Original source of the tokens. */
     protected final Consumer<Reader> source;
     /**
-     * Sink tokenstream, such as the outer tokenfilter decorating
-     * the chain. This can be the source if there are no filters.
+     * Sink tokenstream, such as the outer tokenfilter decorating the chain. This can be the source
+     * if there are no filters.
      */
     protected final TokenStream sink;
-    
+
     /** Internal cache only used by {@link Analyzer#tokenStream(String, String)}. */
     transient ReusableStringReader reusableStringReader;
 
     /**
      * Creates a new {@link TokenStreamComponents} instance.
-     * 
-     * @param source
-     *          the source to set the reader on
-     * @param result
-     *          the analyzer's resulting token stream
+     *
+     * @param source the source to set the reader on
+     * @param result the analyzer's resulting token stream
      */
-    public TokenStreamComponents(final Consumer<Reader> source,
-        final TokenStream result) {
+    public TokenStreamComponents(final Consumer<Reader> source, final TokenStream result) {
       this.source = source;
       this.sink = result;
     }
 
     /**
      * Creates a new {@link TokenStreamComponents} instance
+     *
      * @param tokenizer the analyzer's Tokenizer
-     * @param result    the analyzer's resulting token stream
+     * @param result the analyzer's resulting token stream
      */
     public TokenStreamComponents(final Tokenizer tokenizer, final TokenStream result) {
       this(tokenizer::setReader, result);
     }
 
-    /**
-     * Creates a new {@link TokenStreamComponents} from a Tokenizer
-     */
+    /** Creates a new {@link TokenStreamComponents} from a Tokenizer */
     public TokenStreamComponents(final Tokenizer tokenizer) {
       this(tokenizer::setReader, tokenizer);
     }
 
     /**
-     * Resets the encapsulated components with the given reader. If the components
-     * cannot be reset, an Exception should be thrown.
-     * 
-     * @param reader
-     *          a reader to reset the source component
+     * Resets the encapsulated components with the given reader. If the components cannot be reset,
+     * an Exception should be thrown.
+     *
+     * @param reader a reader to reset the source component
      */
     private void setReader(final Reader reader) {
       source.accept(reader);
@@ -420,26 +404,24 @@ public abstract class Analyzer implements Closeable {
 
     /**
      * Returns the sink {@link TokenStream}
-     * 
+     *
      * @return the sink {@link TokenStream}
      */
     public TokenStream getTokenStream() {
       return sink;
     }
 
-    /**
-     * Returns the component's source
-     */
+    /** Returns the component's source */
     public Consumer<Reader> getSource() {
       return source;
     }
   }
 
   /**
-   * Strategy defining how TokenStreamComponents are reused per call to
-   * {@link Analyzer#tokenStream(String, java.io.Reader)}.
+   * Strategy defining how TokenStreamComponents are reused per call to {@link
+   * Analyzer#tokenStream(String, java.io.Reader)}.
    */
-  public static abstract class ReuseStrategy {
+  public abstract static class ReuseStrategy {
     /** Sole constructor. (For invocation by subclass constructors, typically implicit.) */
     // Explicitly declared so that we have non-empty javadoc
     protected ReuseStrategy() {}
@@ -447,24 +429,25 @@ public abstract class Analyzer implements Closeable {
     /**
      * Gets the reusable TokenStreamComponents for the field with the given name.
      *
-     * @param analyzer Analyzer from which to get the reused components. Use
-     *        {@link #getStoredValue(Analyzer)} and {@link #setStoredValue(Analyzer, Object)}
-     *        to access the data on the Analyzer.
-     * @param fieldName Name of the field whose reusable TokenStreamComponents
-     *        are to be retrieved
-     * @return Reusable TokenStreamComponents for the field, or {@code null}
-     *         if there was no previous components for the field
+     * @param analyzer Analyzer from which to get the reused components. Use {@link
+     *     #getStoredValue(Analyzer)} and {@link #setStoredValue(Analyzer, Object)} to access the
+     *     data on the Analyzer.
+     * @param fieldName Name of the field whose reusable TokenStreamComponents are to be retrieved
+     * @return Reusable TokenStreamComponents for the field, or {@code null} if there was no
+     *     previous components for the field
      */
-    public abstract TokenStreamComponents getReusableComponents(Analyzer analyzer, String fieldName);
+    public abstract TokenStreamComponents getReusableComponents(
+        Analyzer analyzer, String fieldName);
 
     /**
-     * Stores the given TokenStreamComponents as the reusable components for the
-     * field with the give name.
+     * Stores the given TokenStreamComponents as the reusable components for the field with the give
+     * name.
      *
      * @param fieldName Name of the field whose TokenStreamComponents are being set
      * @param components TokenStreamComponents which are to be reused for the field
      */
-    public abstract void setReusableComponents(Analyzer analyzer, String fieldName, TokenStreamComponents components);
+    public abstract void setReusableComponents(
+        Analyzer analyzer, String fieldName, TokenStreamComponents components);
 
     /**
      * Returns the currently stored value.
@@ -491,50 +474,52 @@ public abstract class Analyzer implements Closeable {
       }
       analyzer.storedValue.set(storedValue);
     }
-
   }
 
-  /**
-   * A predefined {@link ReuseStrategy}  that reuses the same components for
-   * every field.
-   */
-  public static final ReuseStrategy GLOBAL_REUSE_STRATEGY = new ReuseStrategy() {
+  /** A predefined {@link ReuseStrategy} that reuses the same components for every field. */
+  public static final ReuseStrategy GLOBAL_REUSE_STRATEGY =
+      new ReuseStrategy() {
 
-    @Override
-    public TokenStreamComponents getReusableComponents(Analyzer analyzer, String fieldName) {
-      return (TokenStreamComponents) getStoredValue(analyzer);
-    }
+        @Override
+        public TokenStreamComponents getReusableComponents(Analyzer analyzer, String fieldName) {
+          return (TokenStreamComponents) getStoredValue(analyzer);
+        }
 
-    @Override
-    public void setReusableComponents(Analyzer analyzer, String fieldName, TokenStreamComponents components) {
-      setStoredValue(analyzer, components);
-    }
-  };
+        @Override
+        public void setReusableComponents(
+            Analyzer analyzer, String fieldName, TokenStreamComponents components) {
+          setStoredValue(analyzer, components);
+        }
+      };
 
   /**
-   * A predefined {@link ReuseStrategy} that reuses components per-field by
-   * maintaining a Map of TokenStreamComponent per field name.
+   * A predefined {@link ReuseStrategy} that reuses components per-field by maintaining a Map of
+   * TokenStreamComponent per field name.
    */
-  public static final ReuseStrategy PER_FIELD_REUSE_STRATEGY = new ReuseStrategy() {
-
-    @SuppressWarnings("unchecked")
-    @Override
-    public TokenStreamComponents getReusableComponents(Analyzer analyzer, String fieldName) {
-      Map<String, TokenStreamComponents> componentsPerField = (Map<String, TokenStreamComponents>) getStoredValue(analyzer);
-      return componentsPerField != null ? componentsPerField.get(fieldName) : null;
-    }
+  public static final ReuseStrategy PER_FIELD_REUSE_STRATEGY =
+      new ReuseStrategy() {
+
+        @SuppressWarnings("unchecked")
+        @Override
+        public TokenStreamComponents getReusableComponents(Analyzer analyzer, String fieldName) {
+          Map<String, TokenStreamComponents> componentsPerField =
+              (Map<String, TokenStreamComponents>) getStoredValue(analyzer);
+          return componentsPerField != null ? componentsPerField.get(fieldName) : null;
+        }
 
-    @SuppressWarnings("unchecked")
-    @Override
-    public void setReusableComponents(Analyzer analyzer, String fieldName, TokenStreamComponents components) {
-      Map<String, TokenStreamComponents> componentsPerField = (Map<String, TokenStreamComponents>) getStoredValue(analyzer);
-      if (componentsPerField == null) {
-        componentsPerField = new HashMap<>();
-        setStoredValue(analyzer, componentsPerField);
-      }
-      componentsPerField.put(fieldName, components);
-    }
-  };
+        @SuppressWarnings("unchecked")
+        @Override
+        public void setReusableComponents(
+            Analyzer analyzer, String fieldName, TokenStreamComponents components) {
+          Map<String, TokenStreamComponents> componentsPerField =
+              (Map<String, TokenStreamComponents>) getStoredValue(analyzer);
+          if (componentsPerField == null) {
+            componentsPerField = new HashMap<>();
+            setStoredValue(analyzer, componentsPerField);
+          }
+          componentsPerField.put(fieldName, components);
+        }
+      };
 
   private static final class StringTokenStream extends TokenStream {
 
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/AnalyzerWrapper.java b/lucene/core/src/java/org/apache/lucene/analysis/AnalyzerWrapper.java
index 0e5297e..29cd6dc 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/AnalyzerWrapper.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/AnalyzerWrapper.java
@@ -16,44 +16,40 @@
  */
 package org.apache.lucene.analysis;
 
-
 import java.io.Reader;
-
 import org.apache.lucene.util.AttributeFactory;
 
 /**
- * Extension to {@link Analyzer} suitable for Analyzers which wrap
- * other Analyzers.
- * 
- * <p>{@link #getWrappedAnalyzer(String)} allows the Analyzer
- * to wrap multiple Analyzers which are selected on a per field basis.
- * 
+ * Extension to {@link Analyzer} suitable for Analyzers which wrap other Analyzers.
+ *
+ * <p>{@link #getWrappedAnalyzer(String)} allows the Analyzer to wrap multiple Analyzers which are
+ * selected on a per field basis.
+ *
  * <p>{@link #wrapComponents(String, Analyzer.TokenStreamComponents)} allows the
- * TokenStreamComponents of the wrapped Analyzer to then be wrapped
- * (such as adding a new {@link TokenFilter} to form new TokenStreamComponents.
+ * TokenStreamComponents of the wrapped Analyzer to then be wrapped (such as adding a new {@link
+ * TokenFilter} to form new TokenStreamComponents.
  *
- * <p>{@link #wrapReader(String, Reader)} allows the Reader of the wrapped
- * Analyzer to then be wrapped (such as adding a new {@link CharFilter}.
+ * <p>{@link #wrapReader(String, Reader)} allows the Reader of the wrapped Analyzer to then be
+ * wrapped (such as adding a new {@link CharFilter}.
  *
- * <p><b>Important:</b> If you do not want to wrap the TokenStream
- * using {@link #wrapComponents(String, Analyzer.TokenStreamComponents)}
- * or the Reader using {@link #wrapReader(String, Reader)} and just delegate
- * to other analyzers (like by field name), use {@link DelegatingAnalyzerWrapper}
- * as superclass!
+ * <p><b>Important:</b> If you do not want to wrap the TokenStream using {@link
+ * #wrapComponents(String, Analyzer.TokenStreamComponents)} or the Reader using {@link
+ * #wrapReader(String, Reader)} and just delegate to other analyzers (like by field name), use
+ * {@link DelegatingAnalyzerWrapper} as superclass!
  *
  * @see DelegatingAnalyzerWrapper
- *
  * @since 4.0.0
  */
 public abstract class AnalyzerWrapper extends Analyzer {
 
   /**
    * Creates a new AnalyzerWrapper with the given reuse strategy.
-   * <p>If you want to wrap a single delegate Analyzer you can probably
-   * reuse its strategy when instantiating this subclass:
-   * {@code super(delegate.getReuseStrategy());}.
-   * <p>If you choose different analyzers per field, use
-   * {@link #PER_FIELD_REUSE_STRATEGY}.
+   *
+   * <p>If you want to wrap a single delegate Analyzer you can probably reuse its strategy when
+   * instantiating this subclass: {@code super(delegate.getReuseStrategy());}.
+   *
+   * <p>If you choose different analyzers per field, use {@link #PER_FIELD_REUSE_STRATEGY}.
+   *
    * @see #getReuseStrategy()
    */
   protected AnalyzerWrapper(ReuseStrategy reuseStrategy) {
@@ -61,40 +57,34 @@ public abstract class AnalyzerWrapper extends Analyzer {
   }
 
   /**
-   * Retrieves the wrapped Analyzer appropriate for analyzing the field with
-   * the given name
+   * Retrieves the wrapped Analyzer appropriate for analyzing the field with the given name
    *
    * @param fieldName Name of the field which is to be analyzed
-   * @return Analyzer for the field with the given name.  Assumed to be non-null
+   * @return Analyzer for the field with the given name. Assumed to be non-null
    */
   protected abstract Analyzer getWrappedAnalyzer(String fieldName);
 
   /**
-   * Wraps / alters the given TokenStreamComponents, taken from the wrapped
-   * Analyzer, to form new components. It is through this method that new
-   * TokenFilters can be added by AnalyzerWrappers. By default, the given
-   * components are returned.
-   * 
-   * @param fieldName
-   *          Name of the field which is to be analyzed
-   * @param components
-   *          TokenStreamComponents taken from the wrapped Analyzer
+   * Wraps / alters the given TokenStreamComponents, taken from the wrapped Analyzer, to form new
+   * components. It is through this method that new TokenFilters can be added by AnalyzerWrappers.
+   * By default, the given components are returned.
+   *
+   * @param fieldName Name of the field which is to be analyzed
+   * @param components TokenStreamComponents taken from the wrapped Analyzer
    * @return Wrapped / altered TokenStreamComponents.
    */
-  protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) {
+  protected TokenStreamComponents wrapComponents(
+      String fieldName, TokenStreamComponents components) {
     return components;
   }
 
   /**
-   * Wraps / alters the given TokenStream for normalization purposes, taken
-   * from the wrapped Analyzer, to form new components. It is through this
-   * method that new TokenFilters can be added by AnalyzerWrappers. By default,
-   * the given token stream are returned.
-   * 
-   * @param fieldName
-   *          Name of the field which is to be analyzed
-   * @param in
-   *          TokenStream taken from the wrapped Analyzer
+   * Wraps / alters the given TokenStream for normalization purposes, taken from the wrapped
+   * Analyzer, to form new components. It is through this method that new TokenFilters can be added
+   * by AnalyzerWrappers. By default, the given token stream are returned.
+   *
+   * @param fieldName Name of the field which is to be analyzed
+   * @param in TokenStream taken from the wrapped Analyzer
    * @return Wrapped / altered TokenStreamComponents.
    */
   protected TokenStream wrapTokenStreamForNormalization(String fieldName, TokenStream in) {
@@ -102,14 +92,11 @@ public abstract class AnalyzerWrapper extends Analyzer {
   }
 
   /**
-   * Wraps / alters the given Reader. Through this method AnalyzerWrappers can
-   * implement {@link #initReader(String, Reader)}. By default, the given reader
-   * is returned.
-   * 
-   * @param fieldName
-   *          name of the field which is to be analyzed
-   * @param reader
-   *          the reader to wrap
+   * Wraps / alters the given Reader. Through this method AnalyzerWrappers can implement {@link
+   * #initReader(String, Reader)}. By default, the given reader is returned.
+   *
+   * @param fieldName name of the field which is to be analyzed
+   * @param reader the reader to wrap
    * @return the wrapped reader
    */
   protected Reader wrapReader(String fieldName, Reader reader) {
@@ -117,14 +104,11 @@ public abstract class AnalyzerWrapper extends Analyzer {
   }
 
   /**
-   * Wraps / alters the given Reader. Through this method AnalyzerWrappers can
-   * implement {@link #initReaderForNormalization(String, Reader)}. By default,
-   * the given reader  is returned.
-   * 
-   * @param fieldName
-   *          name of the field which is to be analyzed
-   * @param reader
-   *          the reader to wrap
+   * Wraps / alters the given Reader. Through this method AnalyzerWrappers can implement {@link
+   * #initReaderForNormalization(String, Reader)}. By default, the given reader is returned.
+   *
+   * @param fieldName name of the field which is to be analyzed
+   * @param reader the reader to wrap
    * @return the wrapped reader
    */
   protected Reader wrapReaderForNormalization(String fieldName, Reader reader) {
@@ -138,7 +122,8 @@ public abstract class AnalyzerWrapper extends Analyzer {
 
   @Override
   protected final TokenStream normalize(String fieldName, TokenStream in) {
-    return wrapTokenStreamForNormalization(fieldName, getWrappedAnalyzer(fieldName).normalize(fieldName, in));
+    return wrapTokenStreamForNormalization(
+        fieldName, getWrappedAnalyzer(fieldName).normalize(fieldName, in));
   }
 
   @Override
@@ -158,7 +143,8 @@ public abstract class AnalyzerWrapper extends Analyzer {
 
   @Override
   protected final Reader initReaderForNormalization(String fieldName, Reader reader) {
-    return getWrappedAnalyzer(fieldName).initReaderForNormalization(fieldName, wrapReaderForNormalization(fieldName, reader));
+    return getWrappedAnalyzer(fieldName)
+        .initReaderForNormalization(fieldName, wrapReaderForNormalization(fieldName, reader));
   }
 
   @Override
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/CachingTokenFilter.java b/lucene/core/src/java/org/apache/lucene/analysis/CachingTokenFilter.java
index 010f41f..f87ee88 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/CachingTokenFilter.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/CachingTokenFilter.java
@@ -16,45 +16,41 @@
  */
 package org.apache.lucene.analysis;
 
-
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
-
 import org.apache.lucene.util.AttributeSource;
 
 /**
- * This class can be used if the token attributes of a TokenStream
- * are intended to be consumed more than once. It caches
- * all token attribute states locally in a List when the first call to
- * {@link #incrementToken()} is called. Subsequent calls will used the cache.
- * <p>
- * <em>Important:</em> Like any proper TokenFilter, {@link #reset()} propagates
- * to the input, although only before {@link #incrementToken()} is called the
- * first time. Prior to  Lucene 5, it was never propagated.
+ * This class can be used if the token attributes of a TokenStream are intended to be consumed more
+ * than once. It caches all token attribute states locally in a List when the first call to {@link
+ * #incrementToken()} is called. Subsequent calls will used the cache.
+ *
+ * <p><em>Important:</em> Like any proper TokenFilter, {@link #reset()} propagates to the input,
+ * although only before {@link #incrementToken()} is called the first time. Prior to Lucene 5, it
+ * was never propagated.
  */
 public final class CachingTokenFilter extends TokenFilter {
   private List<AttributeSource.State> cache = null;
-  private Iterator<AttributeSource.State> iterator = null; 
+  private Iterator<AttributeSource.State> iterator = null;
   private AttributeSource.State finalState;
-  
+
   /**
-   * Create a new CachingTokenFilter around <code>input</code>. As with
-   * any normal TokenFilter, do <em>not</em> call reset on the input; this filter
-   * will do it normally.
+   * Create a new CachingTokenFilter around <code>input</code>. As with any normal TokenFilter, do
+   * <em>not</em> call reset on the input; this filter will do it normally.
    */
   public CachingTokenFilter(TokenStream input) {
     super(input);
   }
 
   /**
-   * Propagates reset if incrementToken has not yet been called. Otherwise
-   * it rewinds the iterator to the beginning of the cached list.
+   * Propagates reset if incrementToken has not yet been called. Otherwise it rewinds the iterator
+   * to the beginning of the cached list.
    */
   @Override
   public void reset() throws IOException {
-    if (cache == null) {//first time
+    if (cache == null) { // first time
       input.reset();
     } else {
       iterator = cache.iterator();
@@ -64,13 +60,13 @@ public final class CachingTokenFilter extends TokenFilter {
   /** The first time called, it'll read and cache all tokens from the input. */
   @Override
   public final boolean incrementToken() throws IOException {
-    if (cache == null) {//first-time
+    if (cache == null) { // first-time
       // fill cache lazily
       cache = new ArrayList<>(64);
       fillCache();
       iterator = cache.iterator();
     }
-    
+
     if (!iterator.hasNext()) {
       // the cache is exhausted, return false
       return false;
@@ -100,5 +96,4 @@ public final class CachingTokenFilter extends TokenFilter {
   public boolean isCached() {
     return cache != null;
   }
-
 }
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/CharArrayMap.java b/lucene/core/src/java/org/apache/lucene/analysis/CharArrayMap.java
index ac88886..ea94f96 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/CharArrayMap.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/CharArrayMap.java
@@ -16,75 +16,73 @@
  */
 package org.apache.lucene.analysis;
 
-
-import java.util.Arrays;
 import java.util.AbstractMap;
 import java.util.AbstractSet;
+import java.util.Arrays;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.Set;
 
 /**
- * A simple class that stores key Strings as char[]'s in a
- * hash table. Note that this is not a general purpose
- * class.  For example, it cannot remove items from the
- * map, nor does it resize its hash table to be smaller,
- * etc.  It is designed to be quick to retrieve items
- * by char[] keys without the necessity of converting
- * to a String first.
+ * A simple class that stores key Strings as char[]'s in a hash table. Note that this is not a
+ * general purpose class. For example, it cannot remove items from the map, nor does it resize its
+ * hash table to be smaller, etc. It is designed to be quick to retrieve items by char[] keys
+ * without the necessity of converting to a String first.
  */
-public class CharArrayMap<V> extends AbstractMap<Object,V> {
+public class CharArrayMap<V> extends AbstractMap<Object, V> {
   // private only because missing generics
   private static final CharArrayMap<?> EMPTY_MAP = new EmptyCharArrayMap<>();
 
-  private final static int INIT_SIZE = 8;
-  private boolean ignoreCase;  
+  private static final int INIT_SIZE = 8;
+  private boolean ignoreCase;
   private int count;
-  char[][] keys; // package private because used in CharArraySet's non Set-conform CharArraySetIterator
-  V[] values; // package private because used in CharArraySet's non Set-conform CharArraySetIterator
+  // package private because used in CharArraySet's non Set-conform CharArraySetIterator
+  char[][] keys;
+  // package private because used in CharArraySet's non Set-conform CharArraySetIterator
+  V[] values;
 
   /**
    * Create map with enough capacity to hold startSize terms
    *
-   * @param startSize
-   *          the initial capacity
-   * @param ignoreCase
-   *          <code>false</code> if and only if the set should be case sensitive
-   *          otherwise <code>true</code>.
+   * @param startSize the initial capacity
+   * @param ignoreCase <code>false</code> if and only if the set should be case sensitive otherwise
+   *     <code>true</code>.
    */
   @SuppressWarnings("unchecked")
   public CharArrayMap(int startSize, boolean ignoreCase) {
     this.ignoreCase = ignoreCase;
     int size = INIT_SIZE;
-    while(startSize + (startSize>>2) > size)
+    while (startSize + (startSize >> 2) > size) {
       size <<= 1;
+    }
     keys = new char[size][];
     values = (V[]) new Object[size];
   }
 
   /**
-   * Creates a map from the mappings in another map. 
+   * Creates a map from the mappings in another map.
    *
-   * @param c
-   *          a map whose mappings to be copied
-   * @param ignoreCase
-   *          <code>false</code> if and only if the set should be case sensitive
-   *          otherwise <code>true</code>.
+   * @param c a map whose mappings to be copied
+   * @param ignoreCase <code>false</code> if and only if the set should be case sensitive otherwise
+   *     <code>true</code>.
    */
-  public CharArrayMap(Map<?,? extends V> c, boolean ignoreCase) {
+  public CharArrayMap(Map<?, ? extends V> c, boolean ignoreCase) {
     this(c.size(), ignoreCase);
     putAll(c);
   }
-  
+
   /** Create set from the supplied map (used internally for readonly maps...) */
-  private CharArrayMap(CharArrayMap<V> toCopy){
+  private CharArrayMap(CharArrayMap<V> toCopy) {
     this.keys = toCopy.keys;
     this.values = toCopy.values;
     this.ignoreCase = toCopy.ignoreCase;
     this.count = toCopy.count;
   }
-  
-  /** Clears all entries in this map. This method is supported for reusing, but not {@link Map#remove}. */
+
+  /**
+   * Clears all entries in this map. This method is supported for reusing, but not {@link
+   * Map#remove}.
+   */
   @Override
   public void clear() {
     count = 0;
@@ -92,8 +90,10 @@ public class CharArrayMap<V> extends AbstractMap<Object,V> {
     Arrays.fill(values, null);
   }
 
-  /** true if the <code>len</code> chars of <code>text</code> starting at <code>off</code>
-   * are in the {@link #keySet()} */
+  /**
+   * true if the <code>len</code> chars of <code>text</code> starting at <code>off</code> are in the
+   * {@link #keySet()}
+   */
   public boolean containsKey(char[] text, int off, int len) {
     return keys[getSlot(text, off, len)] != null;
   }
@@ -106,14 +106,16 @@ public class CharArrayMap<V> extends AbstractMap<Object,V> {
   @Override
   public boolean containsKey(Object o) {
     if (o instanceof char[]) {
-      final char[] text = (char[])o;
+      final char[] text = (char[]) o;
       return containsKey(text, 0, text.length);
-    } 
+    }
     return containsKey(o.toString());
   }
 
-  /** returns the value of the mapping of <code>len</code> chars of <code>text</code>
-   * starting at <code>off</code> */
+  /**
+   * returns the value of the mapping of <code>len</code> chars of <code>text</code> starting at
+   * <code>off</code>
+   */
   public V get(char[] text, int off, int len) {
     return values[getSlot(text, off, len)];
   }
@@ -126,37 +128,37 @@ public class CharArrayMap<V> extends AbstractMap<Object,V> {
   @Override
   public V get(Object o) {
     if (o instanceof char[]) {
-      final char[] text = (char[])o;
+      final char[] text = (char[]) o;
       return get(text, 0, text.length);
-    } 
+    }
     return get(o.toString());
   }
 
   private int getSlot(char[] text, int off, int len) {
     int code = getHashCode(text, off, len);
-    int pos = code & (keys.length-1);
+    int pos = code & (keys.length - 1);
     char[] text2 = keys[pos];
     if (text2 != null && !equals(text, off, len, text2)) {
-      final int inc = ((code>>8)+code)|1;
+      final int inc = ((code >> 8) + code) | 1;
       do {
         code += inc;
-        pos = code & (keys.length-1);
+        pos = code & (keys.length - 1);
         text2 = keys[pos];
       } while (text2 != null && !equals(text, off, len, text2));
     }
     return pos;
   }
 
-  /** Returns true if the String is in the set */  
+  /** Returns true if the String is in the set */
   private int getSlot(CharSequence text) {
     int code = getHashCode(text);
-    int pos = code & (keys.length-1);
+    int pos = code & (keys.length - 1);
     char[] text2 = keys[pos];
     if (text2 != null && !equals(text, text2)) {
-      final int inc = ((code>>8)+code)|1;
+      final int inc = ((code >> 8) + code) | 1;
       do {
         code += inc;
-        pos = code & (keys.length-1);
+        pos = code & (keys.length - 1);
         text2 = keys[pos];
       } while (text2 != null && !equals(text, text2));
     }
@@ -171,19 +173,19 @@ public class CharArrayMap<V> extends AbstractMap<Object,V> {
   @Override
   public V put(Object o, V value) {
     if (o instanceof char[]) {
-      return put((char[])o, value);
+      return put((char[]) o, value);
     }
     return put(o.toString(), value);
   }
-  
+
   /** Add the given mapping. */
   public V put(String text, V value) {
     return put(text.toCharArray(), value);
   }
 
-  /** Add the given mapping.
-   * If ignoreCase is true for this Set, the text array will be directly modified.
-   * The user should never modify this text array after calling this method.
+  /**
+   * Add the given mapping. If ignoreCase is true for this Set, the text array will be directly
+   * modified. The user should never modify this text array after calling this method.
    */
   public V put(char[] text, V value) {
     if (ignoreCase) {
@@ -199,7 +201,7 @@ public class CharArrayMap<V> extends AbstractMap<Object,V> {
     values[slot] = value;
     count++;
 
-    if (count + (count>>2) > keys.length) {
+    if (count + (count >> 2) > keys.length) {
       rehash();
     }
 
@@ -209,38 +211,36 @@ public class CharArrayMap<V> extends AbstractMap<Object,V> {
   @SuppressWarnings("unchecked")
   private void rehash() {
     assert keys.length == values.length;
-    final int newSize = 2*keys.length;
+    final int newSize = 2 * keys.length;
     final char[][] oldkeys = keys;
     final V[] oldvalues = values;
     keys = new char[newSize][];
     values = (V[]) new Object[newSize];
 
-    for(int i=0; i<oldkeys.length; i++) {
+    for (int i = 0; i < oldkeys.length; i++) {
       char[] text = oldkeys[i];
       if (text != null) {
         // todo: could be faster... no need to compare strings on collision
-        final int slot = getSlot(text,0,text.length);
+        final int slot = getSlot(text, 0, text.length);
         keys[slot] = text;
         values[slot] = oldvalues[i];
       }
     }
   }
-  
+
   private boolean equals(char[] text1, int off, int len, char[] text2) {
-    if (len != text2.length)
-      return false;
-    final int limit = off+len;
+    if (len != text2.length) return false;
+    final int limit = off + len;
     if (ignoreCase) {
-      for(int i=0;i<len;) {
-        final int codePointAt = Character.codePointAt(text1, off+i, limit);
+      for (int i = 0; i < len; ) {
+        final int codePointAt = Character.codePointAt(text1, off + i, limit);
         if (Character.toLowerCase(codePointAt) != Character.codePointAt(text2, i, text2.length))
           return false;
-        i += Character.charCount(codePointAt); 
+        i += Character.charCount(codePointAt);
       }
     } else {
-      for(int i=0;i<len;i++) {
-        if (text1[off+i] != text2[i])
-          return false;
+      for (int i = 0; i < len; i++) {
+        if (text1[off + i] != text2[i]) return false;
       }
     }
     return true;
@@ -248,57 +248,53 @@ public class CharArrayMap<V> extends AbstractMap<Object,V> {
 
   private boolean equals(CharSequence text1, char[] text2) {
     int len = text1.length();
-    if (len != text2.length)
-      return false;
+    if (len != text2.length) return false;
     if (ignoreCase) {
-      for(int i=0;i<len;) {
+      for (int i = 0; i < len; ) {
         final int codePointAt = Character.codePointAt(text1, i);
         if (Character.toLowerCase(codePointAt) != Character.codePointAt(text2, i, text2.length))
           return false;
         i += Character.charCount(codePointAt);
       }
     } else {
-      for(int i=0;i<len;i++) {
-        if (text1.charAt(i) != text2[i])
-          return false;
+      for (int i = 0; i < len; i++) {
+        if (text1.charAt(i) != text2[i]) return false;
       }
     }
     return true;
   }
-  
+
   private int getHashCode(char[] text, int offset, int len) {
-    if (text == null)
-      throw new NullPointerException();
+    if (text == null) throw new NullPointerException();
     int code = 0;
     final int stop = offset + len;
     if (ignoreCase) {
-      for (int i=offset; i<stop;) {
+      for (int i = offset; i < stop; ) {
         final int codePointAt = Character.codePointAt(text, i, stop);
-        code = code*31 + Character.toLowerCase(codePointAt);
+        code = code * 31 + Character.toLowerCase(codePointAt);
         i += Character.charCount(codePointAt);
       }
     } else {
-      for (int i=offset; i<stop; i++) {
-        code = code*31 + text[i];
+      for (int i = offset; i < stop; i++) {
+        code = code * 31 + text[i];
       }
     }
     return code;
   }
 
   private int getHashCode(CharSequence text) {
-    if (text == null)
-      throw new NullPointerException();
+    if (text == null) throw new NullPointerException();
     int code = 0;
     int len = text.length();
     if (ignoreCase) {
-      for (int i=0; i<len;) {
+      for (int i = 0; i < len; ) {
         int codePointAt = Character.codePointAt(text, i);
-        code = code*31 + Character.toLowerCase(codePointAt);
+        code = code * 31 + Character.toLowerCase(codePointAt);
         i += Character.charCount(codePointAt);
       }
     } else {
-      for (int i=0; i<len; i++) {
-        code = code*31 + text.charAt(i);
+      for (int i = 0; i < len; i++) {
+        code = code * 31 + text.charAt(i);
       }
     }
     return code;
@@ -317,8 +313,8 @@ public class CharArrayMap<V> extends AbstractMap<Object,V> {
   @Override
   public String toString() {
     final StringBuilder sb = new StringBuilder("{");
-    for (Map.Entry<Object,V> entry : entrySet()) {
-      if (sb.length()>1) sb.append(", ");
+    for (Map.Entry<Object, V> entry : entrySet()) {
+      if (sb.length() > 1) sb.append(", ");
       sb.append(entry);
     }
     return sb.append('}').toString();
@@ -326,11 +322,11 @@ public class CharArrayMap<V> extends AbstractMap<Object,V> {
 
   private EntrySet entrySet = null;
   private CharArraySet keySet = null;
-  
+
   EntrySet createEntrySet() {
     return new EntrySet(true);
   }
-  
+
   @Override
   public final EntrySet entrySet() {
     if (entrySet == null) {
@@ -338,46 +334,53 @@ public class CharArrayMap<V> extends AbstractMap<Object,V> {
     }
     return entrySet;
   }
-  
+
   // helper for CharArraySet to not produce endless recursion
   final Set<Object> originalKeySet() {
     return super.keySet();
   }
 
-  /** Returns an {@link CharArraySet} view on the map's keys.
-   * The set will use the same {@code matchVersion} as this map. */
-  @Override @SuppressWarnings({"unchecked","rawtypes"})
+  /**
+   * Returns an {@link CharArraySet} view on the map's keys. The set will use the same {@code
+   * matchVersion} as this map.
+   */
+  @Override
+  @SuppressWarnings({"unchecked", "rawtypes"})
   public final CharArraySet keySet() {
     if (keySet == null) {
       // prevent adding of entries
-      keySet = new CharArraySet((CharArrayMap) this) {
-        @Override
-        public boolean add(Object o) {
-          throw new UnsupportedOperationException();
-        }
-        @Override
-        public boolean add(CharSequence text) {
-          throw new UnsupportedOperationException();
-        }
-        @Override
-        public boolean add(String text) {
-          throw new UnsupportedOperationException();
-        }
-        @Override
-        public boolean add(char[] text) {
-          throw new UnsupportedOperationException();
-        }
-      };
+      keySet =
+          new CharArraySet((CharArrayMap) this) {
+            @Override
+            public boolean add(Object o) {
+              throw new UnsupportedOperationException();
+            }
+
+            @Override
+            public boolean add(CharSequence text) {
+              throw new UnsupportedOperationException();
+            }
+
+            @Override
+            public boolean add(String text) {
+              throw new UnsupportedOperationException();
+            }
+
+            @Override
+            public boolean add(char[] text) {
+              throw new UnsupportedOperationException();
+            }
+          };
     }
     return keySet;
   }
 
   /** public iterator class so efficient methods are exposed to users */
-  public class EntryIterator implements Iterator<Map.Entry<Object,V>> {
-    private int pos=-1;
+  public class EntryIterator implements Iterator<Map.Entry<Object, V>> {
+    private int pos = -1;
     private int lastPos;
     private final boolean allowModify;
-    
+
     private EntryIterator(boolean allowModify) {
       this.allowModify = allowModify;
       goNext();
@@ -410,18 +413,17 @@ public class CharArrayMap<V> extends AbstractMap<Object,V> {
       return values[lastPos];
     }
 
-    /** sets the value associated with the last key returned */    
+    /** sets the value associated with the last key returned */
     public V setValue(V value) {
-      if (!allowModify)
-        throw new UnsupportedOperationException();
+      if (!allowModify) throw new UnsupportedOperationException();
       V old = values[lastPos];
       values[lastPos] = value;
-      return old;      
+      return old;
     }
 
     /** use nextCharArray() + currentValue() for better efficiency. */
     @Override
-    public Map.Entry<Object,V> next() {
+    public Map.Entry<Object, V> next() {
       goNext();
       return new MapEntry(lastPos, allowModify);
     }
@@ -432,7 +434,7 @@ public class CharArrayMap<V> extends AbstractMap<Object,V> {
     }
   }
 
-  private final class MapEntry implements Map.Entry<Object,V> {
+  private final class MapEntry implements Map.Entry<Object, V> {
     private final int pos;
     private final boolean allowModify;
 
@@ -455,8 +457,7 @@ public class CharArrayMap<V> extends AbstractMap<Object,V> {
 
     @Override
     public V setValue(V value) {
-      if (!allowModify)
-        throw new UnsupportedOperationException();
+      if (!allowModify) throw new UnsupportedOperationException();
       final V old = values[pos];
       values[pos] = value;
       return old;
@@ -464,91 +465,84 @@ public class CharArrayMap<V> extends AbstractMap<Object,V> {
 
     @Override
     public String toString() {
-      return new StringBuilder().append(keys[pos]).append('=')
-        .append((values[pos] == CharArrayMap.this) ? "(this Map)" : values[pos])
-        .toString();
+      return new StringBuilder()
+          .append(keys[pos])
+          .append('=')
+          .append((values[pos] == CharArrayMap.this) ? "(this Map)" : values[pos])
+          .toString();
     }
   }
 
   /** public EntrySet class so efficient methods are exposed to users */
-  public final class EntrySet extends AbstractSet<Map.Entry<Object,V>> {
+  public final class EntrySet extends AbstractSet<Map.Entry<Object, V>> {
     private final boolean allowModify;
-    
+
     private EntrySet(boolean allowModify) {
       this.allowModify = allowModify;
     }
-  
+
     @Override
     public EntryIterator iterator() {
       return new EntryIterator(allowModify);
     }
-    
+
     @Override
     @SuppressWarnings("unchecked")
     public boolean contains(Object o) {
-      if (!(o instanceof Map.Entry))
-        return false;
-      final Map.Entry<Object,V> e = (Map.Entry<Object,V>)o;
+      if (!(o instanceof Map.Entry)) return false;
+      final Map.Entry<Object, V> e = (Map.Entry<Object, V>) o;
       final Object key = e.getKey();
       final Object val = e.getValue();
       final Object v = get(key);
       return v == null ? val == null : v.equals(val);
     }
-    
+
     @Override
     public boolean remove(Object o) {
       throw new UnsupportedOperationException();
     }
-    
+
     @Override
     public int size() {
       return count;
     }
-    
+
     @Override
     public void clear() {
-      if (!allowModify)
-        throw new UnsupportedOperationException();
+      if (!allowModify) throw new UnsupportedOperationException();
       CharArrayMap.this.clear();
     }
   }
-  
+
   /**
-   * Returns an unmodifiable {@link CharArrayMap}. This allows to provide
-   * unmodifiable views of internal map for "read-only" use.
-   * 
-   * @param map
-   *          a map for which the unmodifiable map is returned.
+   * Returns an unmodifiable {@link CharArrayMap}. This allows to provide unmodifiable views of
+   * internal map for "read-only" use.
+   *
+   * @param map a map for which the unmodifiable map is returned.
    * @return an new unmodifiable {@link CharArrayMap}.
-   * @throws NullPointerException
-   *           if the given map is <code>null</code>.
+   * @throws NullPointerException if the given map is <code>null</code>.
    */
   @SuppressWarnings("ReferenceEquality")
   public static <V> CharArrayMap<V> unmodifiableMap(CharArrayMap<V> map) {
-    if (map == null)
-      throw new NullPointerException("Given map is null");
-    if (map == emptyMap() || map.isEmpty())
-      return emptyMap();
-    if (map instanceof UnmodifiableCharArrayMap)
-      return map;
+    if (map == null) throw new NullPointerException("Given map is null");
+    if (map == emptyMap() || map.isEmpty()) return emptyMap();
+    if (map instanceof UnmodifiableCharArrayMap) return map;
     return new UnmodifiableCharArrayMap<>(map);
   }
 
   /**
-   * Returns a copy of the given map as a {@link CharArrayMap}. If the given map
-   * is a {@link CharArrayMap} the ignoreCase property will be preserved.
-   * 
-   * @param map
-   *          a map to copy
-   * @return a copy of the given map as a {@link CharArrayMap}. If the given map
-   *         is a {@link CharArrayMap} the ignoreCase property as well as the
-   *         matchVersion will be of the given map will be preserved.
+   * Returns a copy of the given map as a {@link CharArrayMap}. If the given map is a {@link
+   * CharArrayMap} the ignoreCase property will be preserved.
+   *
+   * @param map a map to copy
+   * @return a copy of the given map as a {@link CharArrayMap}. If the given map is a {@link
+   *     CharArrayMap} the ignoreCase property as well as the matchVersion will be of the given map
+   *     will be preserved.
    */
   @SuppressWarnings("unchecked")
-  public static <V> CharArrayMap<V> copy(final Map<?,? extends V> map) {
-    if(map == EMPTY_MAP)
-      return emptyMap();
-    if(map instanceof CharArrayMap) {
+  public static <V> CharArrayMap<V> copy(final Map<?, ? extends V> map) {
+    if (map == EMPTY_MAP) return emptyMap();
+    if (map instanceof CharArrayMap) {
       CharArrayMap<V> m = (CharArrayMap<V>) map;
       // use fast path instead of iterating all values
       // this is even on very small sets ~10 times faster than iterating
@@ -563,13 +557,13 @@ public class CharArrayMap<V> extends AbstractMap<Object,V> {
     }
     return new CharArrayMap<>(map, false);
   }
-  
+
   /** Returns an empty, unmodifiable map. */
   @SuppressWarnings("unchecked")
   public static <V> CharArrayMap<V> emptyMap() {
     return (CharArrayMap<V>) EMPTY_MAP;
   }
-  
+
   // package private CharArraySet instanceof check in CharArraySet
   static class UnmodifiableCharArrayMap<V> extends CharArrayMap<V> {
 
@@ -583,10 +577,10 @@ public class CharArrayMap<V> extends AbstractMap<Object,V> {
     }
 
     @Override
-    public V put(Object o, V val){
+    public V put(Object o, V val) {
       throw new UnsupportedOperationException();
     }
-    
+
     @Override
     public V put(char[] text, V val) {
       throw new UnsupportedOperationException();
@@ -601,67 +595,60 @@ public class CharArrayMap<V> extends AbstractMap<Object,V> {
     public V put(String text, V val) {
       throw new UnsupportedOperationException();
     }
-    
+
     @Override
     public V remove(Object key) {
       throw new UnsupportedOperationException();
     }
-  
+
     @Override
     EntrySet createEntrySet() {
       return new EntrySet(false);
     }
   }
-  
+
   /**
-   * Empty {@link org.apache.lucene.analysis.CharArrayMap.UnmodifiableCharArrayMap} optimized for speed.
-   * Contains checks will always return <code>false</code> or throw
-   * NPE if necessary.
+   * Empty {@link org.apache.lucene.analysis.CharArrayMap.UnmodifiableCharArrayMap} optimized for
+   * speed. Contains checks will always return <code>false</code> or throw NPE if necessary.
    */
   private static final class EmptyCharArrayMap<V> extends UnmodifiableCharArrayMap<V> {
     EmptyCharArrayMap() {
       super(new CharArrayMap<V>(0, false));
     }
-    
+
     @Override
     public boolean containsKey(char[] text, int off, int len) {
-      if(text == null)
-        throw new NullPointerException();
+      if (text == null) throw new NullPointerException();
       return false;
     }
 
     @Override
     public boolean containsKey(CharSequence cs) {
-      if(cs == null)
-        throw new NullPointerException();
+      if (cs == null) throw new NullPointerException();
       return false;
     }
 
     @Override
     public boolean containsKey(Object o) {
-      if(o == null)
-        throw new NullPointerException();
+      if (o == null) throw new NullPointerException();
       return false;
     }
-    
+
     @Override
     public V get(char[] text, int off, int len) {
-      if(text == null)
-        throw new NullPointerException();
+      if (text == null) throw new NullPointerException();
       return null;
     }
 
     @Override
     public V get(CharSequence cs) {
-      if(cs == null)
-        throw new NullPointerException();
+      if (cs == null) throw new NullPointerException();
       return null;
     }
 
     @Override
     public V get(Object o) {
-      if(o == null)
-        throw new NullPointerException();
+      if (o == null) throw new NullPointerException();
       return null;
     }
   }
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/CharArraySet.java b/lucene/core/src/java/org/apache/lucene/analysis/CharArraySet.java
index 4c8066a..7b3bced 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/CharArraySet.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/CharArraySet.java
@@ -16,79 +16,75 @@
  */
 package org.apache.lucene.analysis;
 
-
 import java.util.AbstractSet;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.Set;
 
 /**
- * A simple class that stores Strings as char[]'s in a
- * hash table.  Note that this is not a general purpose
- * class.  For example, it cannot remove items from the
- * set, nor does it resize its hash table to be smaller,
- * etc.  It is designed to be quick to test if a char[]
- * is in the set without the necessity of converting it
- * to a String first.
+ * A simple class that stores Strings as char[]'s in a hash table. Note that this is not a general
+ * purpose class. For example, it cannot remove items from the set, nor does it resize its hash
+ * table to be smaller, etc. It is designed to be quick to test if a char[] is in the set without
+ * the necessity of converting it to a String first.
  *
- * <P>
- * <em>Please note:</em> This class implements {@link java.util.Set Set} but
- * does not behave like it should in all cases. The generic type is
- * {@code Set<Object>}, because you can add any object to it,
- * that has a string representation. The add methods will use
- * {@link Object#toString} and store the result using a {@code char[]}
- * buffer. The same behavior have the {@code contains()} methods.
- * The {@link #iterator()} returns an {@code Iterator<char[]>}.
+ * <p><em>Please note:</em> This class implements {@link java.util.Set Set} but does not behave like
+ * it should in all cases. The generic type is {@code Set<Object>}, because you can add any object
+ * to it, that has a string representation. The add methods will use {@link Object#toString} and
+ * store the result using a {@code char[]} buffer. The same behavior have the {@code contains()}
+ * methods. The {@link #iterator()} returns an {@code Iterator<char[]>}.
  */
 public class CharArraySet extends AbstractSet<Object> {
 
   /** An empty {@code CharArraySet}. */
   public static final CharArraySet EMPTY_SET = new CharArraySet(CharArrayMap.<Object>emptyMap());
-  
+
   private static final Object PLACEHOLDER = new Object();
-  
+
   private final CharArrayMap<Object> map;
-  
+
   /**
    * Create set with enough capacity to hold startSize terms
-   * 
-   * @param startSize
-   *          the initial capacity
-   * @param ignoreCase
-   *          <code>false</code> if and only if the set should be case sensitive
-   *          otherwise <code>true</code>.
+   *
+   * @param startSize the initial capacity
+   * @param ignoreCase <code>false</code> if and only if the set should be case sensitive otherwise
+   *     <code>true</code>.
    */
   public CharArraySet(int startSize, boolean ignoreCase) {
     this(new CharArrayMap<>(startSize, ignoreCase));
   }
 
   /**
-   * Creates a set from a Collection of objects. 
-   * 
-   * @param c
-   *          a collection whose elements to be placed into the set
-   * @param ignoreCase
-   *          <code>false</code> if and only if the set should be case sensitive
-   *          otherwise <code>true</code>.
+   * Creates a set from a Collection of objects.
+   *
+   * @param c a collection whose elements to be placed into the set
+   * @param ignoreCase <code>false</code> if and only if the set should be case sensitive otherwise
+   *     <code>true</code>.
    */
   public CharArraySet(Collection<?> c, boolean ignoreCase) {
     this(c.size(), ignoreCase);
     addAll(c);
   }
 
-  /** Create set from the specified map (internal only), used also by {@link CharArrayMap#keySet()} */
-  CharArraySet(final CharArrayMap<Object> map){
+  /**
+   * Create set from the specified map (internal only), used also by {@link CharArrayMap#keySet()}
+   */
+  CharArraySet(final CharArrayMap<Object> map) {
     this.map = map;
   }
-  
-  /** Clears all entries in this set. This method is supported for reusing, but not {@link Set#remove}. */
+
+  /**
+   * Clears all entries in this set. This method is supported for reusing, but not {@link
+   * Set#remove}.
+   */
   @Override
   public void clear() {
     map.clear();
   }
 
-  /** true if the <code>len</code> chars of <code>text</code> starting at <code>off</code>
-   * are in the set */
+  /**
+   * true if the <code>len</code> chars of <code>text</code> starting at <code>off</code> are in the
+   * set
+   */
   public boolean contains(char[] text, int off, int len) {
     return map.containsKey(text, off, len);
   }
@@ -112,15 +108,15 @@ public class CharArraySet extends AbstractSet<Object> {
   public boolean add(CharSequence text) {
     return map.put(text, PLACEHOLDER) == null;
   }
-  
+
   /** Add this String into the set */
   public boolean add(String text) {
     return map.put(text, PLACEHOLDER) == null;
   }
 
-  /** Add this char[] directly to the set.
-   * If ignoreCase is true for this Set, the text array will be directly modified.
-   * The user should never modify this text array after calling this method.
+  /**
+   * Add this char[] directly to the set. If ignoreCase is true for this Set, the text array will be
+   * directly modified. The user should never modify this text array after calling this method.
    */
   public boolean add(char[] text) {
     return map.put(text, PLACEHOLDER) == null;
@@ -130,61 +126,53 @@ public class CharArraySet extends AbstractSet<Object> {
   public int size() {
     return map.size();
   }
-  
+
   /**
-   * Returns an unmodifiable {@link CharArraySet}. This allows to provide
-   * unmodifiable views of internal sets for "read-only" use.
-   * 
-   * @param set
-   *          a set for which the unmodifiable set is returned.
+   * Returns an unmodifiable {@link CharArraySet}. This allows to provide unmodifiable views of
+   * internal sets for "read-only" use.
+   *
+   * @param set a set for which the unmodifiable set is returned.
    * @return an new unmodifiable {@link CharArraySet}.
-   * @throws NullPointerException
-   *           if the given set is <code>null</code>.
+   * @throws NullPointerException if the given set is <code>null</code>.
    */
   public static CharArraySet unmodifiableSet(CharArraySet set) {
-    if (set == null)
-      throw new NullPointerException("Given set is null");
-    if (set == EMPTY_SET)
-      return EMPTY_SET;
-    if (set.map instanceof CharArrayMap.UnmodifiableCharArrayMap)
-      return set;
+    if (set == null) throw new NullPointerException("Given set is null");
+    if (set == EMPTY_SET) return EMPTY_SET;
+    if (set.map instanceof CharArrayMap.UnmodifiableCharArrayMap) return set;
     return new CharArraySet(CharArrayMap.unmodifiableMap(set.map));
   }
 
   /**
-   * Returns a copy of the given set as a {@link CharArraySet}. If the given set
-   * is a {@link CharArraySet} the ignoreCase property will be preserved.
-   * 
-   * @param set
-   *          a set to copy
-   * @return a copy of the given set as a {@link CharArraySet}. If the given set
-   *         is a {@link CharArraySet} the ignoreCase property as well as the
-   *         matchVersion will be of the given set will be preserved.
+   * Returns a copy of the given set as a {@link CharArraySet}. If the given set is a {@link
+   * CharArraySet} the ignoreCase property will be preserved.
+   *
+   * @param set a set to copy
+   * @return a copy of the given set as a {@link CharArraySet}. If the given set is a {@link
+   *     CharArraySet} the ignoreCase property as well as the matchVersion will be of the given set
+   *     will be preserved.
    */
   public static CharArraySet copy(final Set<?> set) {
-    if(set == EMPTY_SET)
-      return EMPTY_SET;
-    if(set instanceof CharArraySet) {
+    if (set == EMPTY_SET) return EMPTY_SET;
+    if (set instanceof CharArraySet) {
       final CharArraySet source = (CharArraySet) set;
       return new CharArraySet(CharArrayMap.copy(source.map));
     }
     return new CharArraySet(set, false);
   }
-  
-  /**
-   * Returns an {@link Iterator} for {@code char[]} instances in this set.
-   */
-  @Override @SuppressWarnings("unchecked")
+
+  /** Returns an {@link Iterator} for {@code char[]} instances in this set. */
+  @Override
+  @SuppressWarnings("unchecked")
   public Iterator<Object> iterator() {
     // use the AbstractSet#keySet()'s iterator (to not produce endless recursion)
     return map.originalKeySet().iterator();
   }
-  
+
   @Override
   public String toString() {
     final StringBuilder sb = new StringBuilder("[");
     for (Object item : this) {
-      if (sb.length()>1) sb.append(", ");
+      if (sb.length() > 1) sb.append(", ");
       if (item instanceof char[]) {
         sb.append((char[]) item);
       } else {
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/CharFilter.java b/lucene/core/src/java/org/apache/lucene/analysis/CharFilter.java
index 0a3fcae..4f43ce4 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/CharFilter.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/CharFilter.java
@@ -16,49 +16,44 @@
  */
 package org.apache.lucene.analysis;
 
-
 import java.io.IOException;
 import java.io.Reader;
 
 /**
- * Subclasses of CharFilter can be chained to filter a Reader
- * They can be used as {@link java.io.Reader} with additional offset
- * correction. {@link Tokenizer}s will automatically use {@link #correctOffset}
- * if a CharFilter subclass is used.
- * <p>
- * This class is abstract: at a minimum you must implement {@link #read(char[], int, int)},
- * transforming the input in some way from {@link #input}, and {@link #correct(int)}
- * to adjust the offsets to match the originals.
- * <p>
- * You can optionally provide more efficient implementations of additional methods 
- * like {@link #read()}, {@link #read(char[])}, {@link #read(java.nio.CharBuffer)},
- * but this is not required.
- * <p>
- * For examples and integration with {@link Analyzer}, see the 
- * {@link org.apache.lucene.analysis Analysis package documentation}.
+ * Subclasses of CharFilter can be chained to filter a Reader They can be used as {@link
+ * java.io.Reader} with additional offset correction. {@link Tokenizer}s will automatically use
+ * {@link #correctOffset} if a CharFilter subclass is used.
+ *
+ * <p>This class is abstract: at a minimum you must implement {@link #read(char[], int, int)},
+ * transforming the input in some way from {@link #input}, and {@link #correct(int)} to adjust the
+ * offsets to match the originals.
+ *
+ * <p>You can optionally provide more efficient implementations of additional methods like {@link
+ * #read()}, {@link #read(char[])}, {@link #read(java.nio.CharBuffer)}, but this is not required.
+ *
+ * <p>For examples and integration with {@link Analyzer}, see the {@link org.apache.lucene.analysis
+ * Analysis package documentation}.
  */
 // the way java.io.FilterReader should work!
 public abstract class CharFilter extends Reader {
-  /** 
-   * The underlying character-input stream. 
-   */
+  /** The underlying character-input stream. */
   protected final Reader input;
 
   /**
    * Create a new CharFilter wrapping the provided reader.
+   *
    * @param input a Reader, can also be a CharFilter for chaining.
    */
   public CharFilter(Reader input) {
     super(input);
     this.input = input;
   }
-  
-  /** 
+
+  /**
    * Closes the underlying input stream.
-   * <p>
-   * <b>NOTE:</b> 
-   * The default implementation closes the input Reader, so
-   * be sure to call <code>super.close()</code> when overriding this method.
+   *
+   * <p><b>NOTE:</b> The default implementation closes the input Reader, so be sure to call <code>
+   * super.close()</code> when overriding this method.
    */
   @Override
   public void close() throws IOException {
@@ -72,13 +67,12 @@ public abstract class CharFilter extends Reader {
    * @return corrected offset
    */
   protected abstract int correct(int currentOff);
-  
-  /**
-   * Chains the corrected offset through the input
-   * CharFilter(s).
-   */
+
+  /** Chains the corrected offset through the input CharFilter(s). */
   public final int correctOffset(int currentOff) {
     final int corrected = correct(currentOff);
-    return (input instanceof CharFilter) ? ((CharFilter) input).correctOffset(corrected) : corrected;
+    return (input instanceof CharFilter)
+        ? ((CharFilter) input).correctOffset(corrected)
+        : corrected;
   }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/CharFilterFactory.java b/lucene/core/src/java/org/apache/lucene/analysis/CharFilterFactory.java
index 4a2a1aa..3dddd0d 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/CharFilterFactory.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/CharFilterFactory.java
@@ -16,14 +16,12 @@
  */
 package org.apache.lucene.analysis;
 
-
 import java.io.Reader;
 import java.util.Map;
 import java.util.Set;
 
 /**
- * Abstract parent class for analysis factories that create {@link CharFilter}
- * instances.
+ * Abstract parent class for analysis factories that create {@link CharFilter} instances.
  *
  * @since 3.1
  */
@@ -31,17 +29,17 @@ public abstract class CharFilterFactory extends AbstractAnalysisFactory {
 
   private static final AnalysisSPILoader<CharFilterFactory> loader =
       new AnalysisSPILoader<>(CharFilterFactory.class);
-  
+
   /** looks up a charfilter by name from context classpath */
-  public static CharFilterFactory forName(String name, Map<String,String> args) {
+  public static CharFilterFactory forName(String name, Map<String, String> args) {
     return loader.newInstance(name, args);
   }
-  
+
   /** looks up a charfilter class by name from context classpath */
   public static Class<? extends CharFilterFactory> lookupClass(String name) {
     return loader.lookupClass(name);
   }
-  
+
   /** returns a list of all available charfilter names */
   public static Set<String> availableCharFilters() {
     return loader.availableServices();
@@ -56,16 +54,15 @@ public abstract class CharFilterFactory extends AbstractAnalysisFactory {
     }
   }
 
-  /** 
-   * Reloads the factory list from the given {@link ClassLoader}.
-   * Changes to the factories are visible after the method ends, all
-   * iterators ({@link #availableCharFilters()},...) stay consistent. 
-   * 
-   * <p><b>NOTE:</b> Only new factories are added, existing ones are
-   * never removed or replaced.
-   * 
-   * <p><em>This method is expensive and should only be called for discovery
-   * of new factories on the given classpath/classloader!</em>
+  /**
+   * Reloads the factory list from the given {@link ClassLoader}. Changes to the factories are
+   * visible after the method ends, all iterators ({@link #availableCharFilters()},...) stay
+   * consistent.
+   *
+   * <p><b>NOTE:</b> Only new factories are added, existing ones are never removed or replaced.
+   *
+   * <p><em>This method is expensive and should only be called for discovery of new factories on the
+   * given classpath/classloader!</em>
    */
   public static void reloadCharFilters(ClassLoader classloader) {
     loader.reload(classloader);
@@ -76,10 +73,8 @@ public abstract class CharFilterFactory extends AbstractAnalysisFactory {
     super();
   }
 
-  /**
-   * Initialize this factory via a set of key-value pairs.
-   */
-  protected CharFilterFactory(Map<String,String> args) {
+  /** Initialize this factory via a set of key-value pairs. */
+  protected CharFilterFactory(Map<String, String> args) {
     super(args);
   }
 
@@ -87,9 +82,9 @@ public abstract class CharFilterFactory extends AbstractAnalysisFactory {
   public abstract Reader create(Reader input);
 
   /**
-   * Normalize the specified input Reader
-   * While the default implementation returns input unchanged,
-   * char filters that should be applied at normalization time can delegate to {@code create} method.
+   * Normalize the specified input Reader While the default implementation returns input unchanged,
+   * char filters that should be applied at normalization time can delegate to {@code create}
+   * method.
    */
   public Reader normalize(Reader input) {
     return input;
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/CharacterUtils.java b/lucene/core/src/java/org/apache/lucene/analysis/CharacterUtils.java
index 9ed077c..a74f55d 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/CharacterUtils.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/CharacterUtils.java
@@ -16,12 +16,12 @@
  */
 package org.apache.lucene.analysis;
 
-
 import java.io.IOException;
 import java.io.Reader;
 
 /**
  * Utility class to write tokenizers or token filters.
+ *
  * @lucene.internal
  */
 public final class CharacterUtils {
@@ -29,11 +29,10 @@ public final class CharacterUtils {
   private CharacterUtils() {} // no instantiation
 
   /**
-   * Creates a new {@link CharacterBuffer} and allocates a <code>char[]</code>
-   * of the given bufferSize.
-   * 
-   * @param bufferSize
-   *          the internal char buffer size, must be <code>&gt;= 2</code>
+   * Creates a new {@link CharacterBuffer} and allocates a <code>char[]</code> of the given
+   * bufferSize.
+   *
+   * @param bufferSize the internal char buffer size, must be <code>&gt;= 2</code>
    * @return a new {@link CharacterBuffer} instance.
    */
   public static CharacterBuffer newCharacterBuffer(final int bufferSize) {
@@ -42,11 +41,11 @@ public final class CharacterUtils {
     }
     return new CharacterBuffer(new char[bufferSize], 0, 0);
   }
-  
-  
+
   /**
-   * Converts each unicode codepoint to lowerCase via {@link Character#toLowerCase(int)} starting 
-   * at the given offset.
+   * Converts each unicode codepoint to lowerCase via {@link Character#toLowerCase(int)} starting at
+   * the given offset.
+   *
    * @param buffer the char buffer to lowercase
    * @param offset the offset to start at
    * @param limit the max char in the buffer to lower case
@@ -54,16 +53,17 @@ public final class CharacterUtils {
   public static void toLowerCase(final char[] buffer, final int offset, final int limit) {
     assert buffer.length >= limit;
     assert 0 <= offset && offset <= buffer.length;
-    for (int i = offset; i < limit;) {
-      i += Character.toChars(
-              Character.toLowerCase(
-                  Character.codePointAt(buffer, i, limit)), buffer, i);
-     }
+    for (int i = offset; i < limit; ) {
+      i +=
+          Character.toChars(
+              Character.toLowerCase(Character.codePointAt(buffer, i, limit)), buffer, i);
+    }
   }
 
   /**
-   * Converts each unicode codepoint to UpperCase via {@link Character#toUpperCase(int)} starting 
-   * at the given offset.
+   * Converts each unicode codepoint to UpperCase via {@link Character#toUpperCase(int)} starting at
+   * the given offset.
+   *
    * @param buffer the char buffer to UPPERCASE
    * @param offset the offset to start at
    * @param limit the max char in the buffer to lower case
@@ -71,15 +71,18 @@ public final class CharacterUtils {
   public static void toUpperCase(final char[] buffer, final int offset, final int limit) {
     assert buffer.length >= limit;
     assert 0 <= offset && offset <= buffer.length;
-    for (int i = offset; i < limit;) {
-      i += Character.toChars(
-              Character.toUpperCase(
-                  Character.codePointAt(buffer, i, limit)), buffer, i);
-     }
+    for (int i = offset; i < limit; ) {
+      i +=
+          Character.toChars(
+              Character.toUpperCase(Character.codePointAt(buffer, i, limit)), buffer, i);
+    }
   }
 
-  /** Converts a sequence of Java characters to a sequence of unicode code points.
-   *  @return the number of code points written to the destination buffer */
+  /**
+   * Converts a sequence of Java characters to a sequence of unicode code points.
+   *
+   * @return the number of code points written to the destination buffer
+   */
   public static int toCodePoints(char[] src, int srcOff, int srcLen, int[] dest, int destOff) {
     if (srcLen < 0) {
       throw new IllegalArgumentException("srcLen must be >= 0");
@@ -94,8 +97,11 @@ public final class CharacterUtils {
     return codePointCount;
   }
 
-  /** Converts a sequence of unicode code points to a sequence of Java characters.
-   *  @return the number of chars written to the destination buffer */
+  /**
+   * Converts a sequence of unicode code points to a sequence of Java characters.
+   *
+   * @return the number of chars written to the destination buffer
+   */
   public static int toChars(int[] src, int srcOff, int srcLen, char[] dest, int destOff) {
     if (srcLen < 0) {
       throw new IllegalArgumentException("srcLen must be >= 0");
@@ -108,38 +114,31 @@ public final class CharacterUtils {
   }
 
   /**
-   * Fills the {@link CharacterBuffer} with characters read from the given
-   * reader {@link Reader}. This method tries to read <code>numChars</code>
-   * characters into the {@link CharacterBuffer}, each call to fill will start
-   * filling the buffer from offset <code>0</code> up to <code>numChars</code>.
-   * In case code points can span across 2 java characters, this method may
-   * only fill <code>numChars - 1</code> characters in order not to split in
-   * the middle of a surrogate pair, even if there are remaining characters in
-   * the {@link Reader}.
-   * <p>
-   * This method guarantees
-   * that the given {@link CharacterBuffer} will never contain a high surrogate
-   * character as the last element in the buffer unless it is the last available
-   * character in the reader. In other words, high and low surrogate pairs will
-   * always be preserved across buffer boarders.
-   * </p>
-   * <p>
-   * A return value of <code>false</code> means that this method call exhausted
-   * the reader, but there may be some bytes which have been read, which can be
-   * verified by checking whether <code>buffer.getLength() &gt; 0</code>.
-   * </p>
-   * 
-   * @param buffer
-   *          the buffer to fill.
-   * @param reader
-   *          the reader to read characters from.
-   * @param numChars
-   *          the number of chars to read
-   * @return <code>false</code> if and only if reader.read returned -1 while trying to fill the buffer
-   * @throws IOException
-   *           if the reader throws an {@link IOException}.
+   * Fills the {@link CharacterBuffer} with characters read from the given reader {@link Reader}.
+   * This method tries to read <code>numChars</code> characters into the {@link CharacterBuffer},
+   * each call to fill will start filling the buffer from offset <code>0</code> up to <code>numChars
+   * </code>. In case code points can span across 2 java characters, this method may only fill
+   * <code>numChars - 1</code> characters in order not to split in the middle of a surrogate pair,
+   * even if there are remaining characters in the {@link Reader}.
+   *
+   * <p>This method guarantees that the given {@link CharacterBuffer} will never contain a high
+   * surrogate character as the last element in the buffer unless it is the last available character
+   * in the reader. In other words, high and low surrogate pairs will always be preserved across
+   * buffer boarders.
+   *
+   * <p>A return value of <code>false</code> means that this method call exhausted the reader, but
+   * there may be some bytes which have been read, which can be verified by checking whether <code>
+   * buffer.getLength() &gt; 0</code>.
+   *
+   * @param buffer the buffer to fill.
+   * @param reader the reader to read characters from.
+   * @param numChars the number of chars to read
+   * @return <code>false</code> if and only if reader.read returned -1 while trying to fill the
+   *     buffer
+   * @throws IOException if the reader throws an {@link IOException}.
    */
-  public static boolean fill(CharacterBuffer buffer, Reader reader, int numChars) throws IOException {
+  public static boolean fill(CharacterBuffer buffer, Reader reader, int numChars)
+      throws IOException {
     assert buffer.buffer.length >= 2;
     if (numChars < 2 || numChars > buffer.buffer.length) {
       throw new IllegalArgumentException("numChars must be >= 2 and <= the buffer size");
@@ -190,62 +189,54 @@ public final class CharacterUtils {
     return read;
   }
 
-  /**
-   * A simple IO buffer to use with
-   * {@link CharacterUtils#fill(CharacterBuffer, Reader)}.
-   */
+  /** A simple IO buffer to use with {@link CharacterUtils#fill(CharacterBuffer, Reader)}. */
   public static final class CharacterBuffer {
-    
+
     private final char[] buffer;
     private int offset;
     private int length;
     // NOTE: not private so outer class can access without
     // $access methods:
     char lastTrailingHighSurrogate;
-    
+
     CharacterBuffer(char[] buffer, int offset, int length) {
       this.buffer = buffer;
       this.offset = offset;
       this.length = length;
     }
-    
+
     /**
      * Returns the internal buffer
-     * 
+     *
      * @return the buffer
      */
     public char[] getBuffer() {
       return buffer;
     }
-    
+
     /**
      * Returns the data offset in the internal buffer.
-     * 
+     *
      * @return the offset
      */
     public int getOffset() {
       return offset;
     }
-    
+
     /**
-     * Return the length of the data in the internal buffer starting at
-     * {@link #getOffset()}
-     * 
+     * Return the length of the data in the internal buffer starting at {@link #getOffset()}
+     *
      * @return the length
      */
     public int getLength() {
       return length;
     }
-    
-    /**
-     * Resets the CharacterBuffer. All internals are reset to its default
-     * values.
-     */
+
+    /** Resets the CharacterBuffer. All internals are reset to its default values. */
     public void reset() {
       offset = 0;
       length = 0;
       lastTrailingHighSurrogate = 0;
     }
   }
-
 }
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/DelegatingAnalyzerWrapper.java b/lucene/core/src/java/org/apache/lucene/analysis/DelegatingAnalyzerWrapper.java
index b1a9378..9fc24af 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/DelegatingAnalyzerWrapper.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/DelegatingAnalyzerWrapper.java
@@ -18,42 +18,39 @@ package org.apache.lucene.analysis;
 
 import java.io.Reader;
 
-
 /**
- * An analyzer wrapper, that doesn't allow to wrap components or readers.
- * By disallowing it, it means that the thread local resources can be delegated
- * to the delegate analyzer, and not also be allocated on this analyzer.
- * This wrapper class is the base class of all analyzers that just delegate to
- * another analyzer, e.g. per field name.
- * 
- * <p>This solves the problem of per field analyzer wrapper, where it also
- * maintains a thread local per field token stream components, while it can
- * safely delegate those and not also hold these data structures, which can
- * become expensive memory wise.
- * 
- * <p><b>Please note:</b> This analyzer uses a private {@link Analyzer.ReuseStrategy},
- * which is returned by {@link #getReuseStrategy()}. This strategy is used when
- * delegating. If you wrap this analyzer again and reuse this strategy, no
- * delegation is done and the given fallback is used.
+ * An analyzer wrapper, that doesn't allow to wrap components or readers. By disallowing it, it
+ * means that the thread local resources can be delegated to the delegate analyzer, and not also be
+ * allocated on this analyzer. This wrapper class is the base class of all analyzers that just
+ * delegate to another analyzer, e.g. per field name.
+ *
+ * <p>This solves the problem of per field analyzer wrapper, where it also maintains a thread local
+ * per field token stream components, while it can safely delegate those and not also hold these
+ * data structures, which can become expensive memory wise.
+ *
+ * <p><b>Please note:</b> This analyzer uses a private {@link Analyzer.ReuseStrategy}, which is
+ * returned by {@link #getReuseStrategy()}. This strategy is used when delegating. If you wrap this
+ * analyzer again and reuse this strategy, no delegation is done and the given fallback is used.
  *
  * @since 4.10.0
  */
 public abstract class DelegatingAnalyzerWrapper extends AnalyzerWrapper {
-  
+
   /**
    * Constructor.
-   * @param fallbackStrategy is the strategy to use if delegation is not possible
-   *  This is to support the common pattern:
-   *  {@code new OtherWrapper(thisWrapper.getReuseStrategy())} 
+   *
+   * @param fallbackStrategy is the strategy to use if delegation is not possible This is to support
+   *     the common pattern: {@code new OtherWrapper(thisWrapper.getReuseStrategy())}
    */
   protected DelegatingAnalyzerWrapper(ReuseStrategy fallbackStrategy) {
     super(new DelegatingReuseStrategy(fallbackStrategy));
     // häckidy-hick-hack, because we cannot call super() with a reference to "this":
     ((DelegatingReuseStrategy) getReuseStrategy()).wrapper = this;
   }
-  
+
   @Override
-  protected final TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) {
+  protected final TokenStreamComponents wrapComponents(
+      String fieldName, TokenStreamComponents components) {
     return super.wrapComponents(fieldName, components);
   }
 
@@ -75,11 +72,11 @@ public abstract class DelegatingAnalyzerWrapper extends AnalyzerWrapper {
   private static final class DelegatingReuseStrategy extends ReuseStrategy {
     DelegatingAnalyzerWrapper wrapper;
     private final ReuseStrategy fallbackStrategy;
-    
+
     DelegatingReuseStrategy(ReuseStrategy fallbackStrategy) {
       this.fallbackStrategy = fallbackStrategy;
     }
-    
+
     @Override
     public TokenStreamComponents getReusableComponents(Analyzer analyzer, String fieldName) {
       if (analyzer == wrapper) {
@@ -91,14 +88,17 @@ public abstract class DelegatingAnalyzerWrapper extends AnalyzerWrapper {
     }
 
     @Override
-    public void setReusableComponents(Analyzer analyzer, String fieldName,  TokenStreamComponents components) {
+    public void setReusableComponents(
+        Analyzer analyzer, String fieldName, TokenStreamComponents components) {
       if (analyzer == wrapper) {
         final Analyzer wrappedAnalyzer = wrapper.getWrappedAnalyzer(fieldName);
-        wrappedAnalyzer.getReuseStrategy().setReusableComponents(wrappedAnalyzer, fieldName, components);
+        wrappedAnalyzer
+            .getReuseStrategy()
+            .setReusableComponents(wrappedAnalyzer, fieldName, components);
       } else {
         fallbackStrategy.setReusableComponents(analyzer, fieldName, components);
       }
     }
-  };
-  
+  }
+  ;
 }
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/FilteringTokenFilter.java b/lucene/core/src/java/org/apache/lucene/analysis/FilteringTokenFilter.java
index e942224..866311f 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/FilteringTokenFilter.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/FilteringTokenFilter.java
@@ -16,31 +16,33 @@
  */
 package org.apache.lucene.analysis;
 
-
 import java.io.IOException;
-
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 
 /**
- * Abstract base class for TokenFilters that may remove tokens.
- * You have to implement {@link #accept} and return a boolean if the current
- * token should be preserved. {@link #incrementToken} uses this method
- * to decide if a token should be passed to the caller.
+ * Abstract base class for TokenFilters that may remove tokens. You have to implement {@link
+ * #accept} and return a boolean if the current token should be preserved. {@link #incrementToken}
+ * uses this method to decide if a token should be passed to the caller.
  */
 public abstract class FilteringTokenFilter extends TokenFilter {
 
-  private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
+  private final PositionIncrementAttribute posIncrAtt =
+      addAttribute(PositionIncrementAttribute.class);
   private int skippedPositions;
 
   /**
    * Create a new {@link FilteringTokenFilter}.
-   * @param in      the {@link TokenStream} to consume
+   *
+   * @param in the {@link TokenStream} to consume
    */
   public FilteringTokenFilter(TokenStream in) {
     super(in);
   }
 
-  /** Override this method and return if the current input token should be returned by {@link #incrementToken}. */
+  /**
+   * Override this method and return if the current input token should be returned by {@link
+   * #incrementToken}.
+   */
   protected abstract boolean accept() throws IOException;
 
   @Override
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/GraphTokenFilter.java b/lucene/core/src/java/org/apache/lucene/analysis/GraphTokenFilter.java
index 9c1e02e..c48b28f 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/GraphTokenFilter.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/GraphTokenFilter.java
@@ -22,7 +22,6 @@ import java.util.ArrayDeque;
 import java.util.ArrayList;
 import java.util.Deque;
 import java.util.List;
-
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute;
@@ -31,28 +30,23 @@ import org.apache.lucene.util.AttributeSource;
 /**
  * An abstract TokenFilter that exposes its input stream as a graph
  *
- * Call {@link #incrementBaseToken()} to move the root of the graph to the next
- * position in the TokenStream, {@link #incrementGraphToken()} to move along
- * the current graph, and {@link #incrementGraph()} to reset to the next graph
- * based at the current root.
+ * <p>Call {@link #incrementBaseToken()} to move the root of the graph to the next position in the
+ * TokenStream, {@link #incrementGraphToken()} to move along the current graph, and {@link
+ * #incrementGraph()} to reset to the next graph based at the current root.
  *
- * For example, given the stream 'a b/c:2 d e`, then with the base token at
- * 'a', incrementGraphToken() will produce the stream 'a b d e', and then
- * after calling incrementGraph() will produce the stream 'a c e'.
+ * <p>For example, given the stream 'a b/c:2 d e`, then with the base token at 'a',
+ * incrementGraphToken() will produce the stream 'a b d e', and then after calling incrementGraph()
+ * will produce the stream 'a c e'.
  */
 public abstract class GraphTokenFilter extends TokenFilter {
 
   private final Deque<Token> tokenPool = new ArrayDeque<>();
   private final List<Token> currentGraph = new ArrayList<>();
 
-  /**
-   * The maximum permitted number of routes through a graph
-   */
+  /** The maximum permitted number of routes through a graph */
   public static final int MAX_GRAPH_STACK_SIZE = 1000;
 
-  /**
-   * The maximum permitted read-ahead in the token stream
-   */
+  /** The maximum permitted read-ahead in the token stream */
   public static final int MAX_TOKEN_CACHE_SIZE = 100;
 
   private Token baseToken;
@@ -67,9 +61,7 @@ public abstract class GraphTokenFilter extends TokenFilter {
   private final PositionIncrementAttribute posIncAtt;
   private final OffsetAttribute offsetAtt;
 
-  /**
-   * Create a new GraphTokenFilter
-   */
+  /** Create a new GraphTokenFilter */
   public GraphTokenFilter(TokenStream input) {
     super(input);
     this.posIncAtt = input.addAttribute(PositionIncrementAttribute.class);
@@ -149,7 +141,8 @@ public abstract class GraphTokenFilter extends TokenFilter {
   /**
    * Return the number of trailing positions at the end of the graph
    *
-   * NB this should only be called after {@link #incrementGraphToken()} has returned {@code false}
+   * <p>NB this should only be called after {@link #incrementGraphToken()} has returned {@code
+   * false}
    */
   public int getTrailingPositions() {
     return trailingPositions;
@@ -161,8 +154,7 @@ public abstract class GraphTokenFilter extends TokenFilter {
       input.end();
       trailingPositions = posIncAtt.getPositionIncrement();
       finalOffsets = offsetAtt.endOffset();
-    }
-    else {
+    } else {
       endAttributes();
       this.posIncAtt.setPositionIncrement(trailingPositions);
       this.offsetAtt.setOffset(finalOffsets, finalOffsets);
@@ -200,8 +192,7 @@ public abstract class GraphTokenFilter extends TokenFilter {
   }
 
   private void recycleToken(Token token) {
-    if (token == null)
-      return;
+    if (token == null) return;
     token.nextToken = null;
     tokenPool.add(token);
   }
@@ -280,5 +271,4 @@ public abstract class GraphTokenFilter extends TokenFilter {
       return attSource.toString();
     }
   }
-
 }
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/LowerCaseFilter.java b/lucene/core/src/java/org/apache/lucene/analysis/LowerCaseFilter.java
index e89678b..e9c3796 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/LowerCaseFilter.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/LowerCaseFilter.java
@@ -16,32 +16,27 @@
  */
 package org.apache.lucene.analysis;
 
-
 import java.io.IOException;
-
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 
-/**
- * Normalizes token text to lower case.
- */
+/** Normalizes token text to lower case. */
 public class LowerCaseFilter extends TokenFilter {
   private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
-  
+
   /**
    * Create a new LowerCaseFilter, that normalizes token text to lower case.
-   * 
+   *
    * @param in TokenStream to filter
    */
   public LowerCaseFilter(TokenStream in) {
     super(in);
   }
-  
+
   @Override
   public final boolean incrementToken() throws IOException {
     if (input.incrementToken()) {
       CharacterUtils.toLowerCase(termAtt.buffer(), 0, termAtt.length());
       return true;
-    } else
-      return false;
+    } else return false;
   }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/ReusableStringReader.java b/lucene/core/src/java/org/apache/lucene/analysis/ReusableStringReader.java
index c350534..2ff8f41 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/ReusableStringReader.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/ReusableStringReader.java
@@ -18,18 +18,20 @@ package org.apache.lucene.analysis;
 
 import java.io.Reader;
 
-
-/** Internal class to enable reuse of the string reader by {@link Analyzer#tokenStream(String,String)} */
+/**
+ * Internal class to enable reuse of the string reader by {@link
+ * Analyzer#tokenStream(String,String)}
+ */
 final class ReusableStringReader extends Reader {
   private int pos = 0, size = 0;
   private String s = null;
-  
+
   void setValue(String s) {
     this.s = s;
     this.size = s.length();
     this.pos = 0;
   }
-  
+
   @Override
   public int read() {
     if (pos < size) {
@@ -39,12 +41,12 @@ final class ReusableStringReader extends Reader {
       return -1;
     }
   }
-  
+
   @Override
   public int read(char[] c, int off, int len) {
     if (pos < size) {
-      len = Math.min(len, size-pos);
-      s.getChars(pos, pos+len, c, off);
+      len = Math.min(len, size - pos);
+      s.getChars(pos, pos + len, c, off);
       pos += len;
       return len;
     } else {
@@ -52,7 +54,7 @@ final class ReusableStringReader extends Reader {
       return -1;
     }
   }
-  
+
   @Override
   public void close() {
     pos = size; // this prevents NPE when reading after close!
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/StopFilter.java b/lucene/core/src/java/org/apache/lucene/analysis/StopFilter.java
index 8cb562b..403c004 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/StopFilter.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/StopFilter.java
@@ -16,28 +16,21 @@
  */
 package org.apache.lucene.analysis;
 
-
 import java.util.Arrays;
 import java.util.List;
-
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 
-/**
- * Removes stop words from a token stream.
- */
+/** Removes stop words from a token stream. */
 public class StopFilter extends FilteringTokenFilter {
 
   private final CharArraySet stopWords;
   private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
-  
+
   /**
-   * Constructs a filter which removes words from the input TokenStream that are
-   * named in the Set.
-   * 
-   * @param in
-   *          Input stream
-   * @param stopWords
-   *          A {@link CharArraySet} representing the stopwords.
+   * Constructs a filter which removes words from the input TokenStream that are named in the Set.
+   *
+   * @param in Input stream
+   * @param stopWords A {@link CharArraySet} representing the stopwords.
    * @see #makeStopSet(java.lang.String...)
    */
   public StopFilter(TokenStream in, CharArraySet stopWords) {
@@ -46,63 +39,61 @@ public class StopFilter extends FilteringTokenFilter {
   }
 
   /**
-   * Builds a Set from an array of stop words,
-   * appropriate for passing into the StopFilter constructor.
-   * This permits this stopWords construction to be cached once when
-   * an Analyzer is constructed.
-   * 
+   * Builds a Set from an array of stop words, appropriate for passing into the StopFilter
+   * constructor. This permits this stopWords construction to be cached once when an Analyzer is
+   * constructed.
+   *
    * @param stopWords An array of stopwords
    * @see #makeStopSet(java.lang.String[], boolean) passing false to ignoreCase
    */
   public static CharArraySet makeStopSet(String... stopWords) {
     return makeStopSet(stopWords, false);
   }
-  
+
   /**
-   * Builds a Set from an array of stop words,
-   * appropriate for passing into the StopFilter constructor.
-   * This permits this stopWords construction to be cached once when
-   * an Analyzer is constructed.
-   * 
-   * @param stopWords A List of Strings or char[] or any other toString()-able list representing the stopwords
+   * Builds a Set from an array of stop words, appropriate for passing into the StopFilter
+   * constructor. This permits this stopWords construction to be cached once when an Analyzer is
+   * constructed.
+   *
+   * @param stopWords A List of Strings or char[] or any other toString()-able list representing the
+   *     stopwords
    * @return A Set ({@link CharArraySet}) containing the words
    * @see #makeStopSet(java.lang.String[], boolean) passing false to ignoreCase
    */
   public static CharArraySet makeStopSet(List<?> stopWords) {
     return makeStopSet(stopWords, false);
   }
-    
+
   /**
    * Creates a stopword set from the given stopword array.
-   * 
+   *
    * @param stopWords An array of stopwords
-   * @param ignoreCase If true, all words are lower cased first.  
+   * @param ignoreCase If true, all words are lower cased first.
    * @return a Set containing the words
-   */    
+   */
   public static CharArraySet makeStopSet(String[] stopWords, boolean ignoreCase) {
     CharArraySet stopSet = new CharArraySet(stopWords.length, ignoreCase);
     stopSet.addAll(Arrays.asList(stopWords));
     return stopSet;
   }
-  
+
   /**
    * Creates a stopword set from the given stopword list.
-   * @param stopWords A List of Strings or char[] or any other toString()-able list representing the stopwords
+   *
+   * @param stopWords A List of Strings or char[] or any other toString()-able list representing the
+   *     stopwords
    * @param ignoreCase if true, all words are lower cased first
    * @return A Set ({@link CharArraySet}) containing the words
    */
-  public static CharArraySet makeStopSet(List<?> stopWords, boolean ignoreCase){
+  public static CharArraySet makeStopSet(List<?> stopWords, boolean ignoreCase) {
     CharArraySet stopSet = new CharArraySet(stopWords.size(), ignoreCase);
     stopSet.addAll(stopWords);
     return stopSet;
   }
-  
-  /**
-   * Returns the next input Token whose term() is not a stop word.
-   */
+
+  /** Returns the next input Token whose term() is not a stop word. */
   @Override
   protected boolean accept() {
     return !stopWords.contains(termAtt.buffer(), 0, termAtt.length());
   }
-
 }
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/StopwordAnalyzerBase.java b/lucene/core/src/java/org/apache/lucene/analysis/StopwordAnalyzerBase.java
index 452ef9a..77675fd 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/StopwordAnalyzerBase.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/StopwordAnalyzerBase.java
@@ -21,28 +21,22 @@ import java.io.Reader;
 import java.nio.charset.StandardCharsets;
 import java.nio.file.Files;
 import java.nio.file.Path;
-
 import org.apache.lucene.util.IOUtils;
 
 /**
- * Base class for Analyzers that need to make use of stopword sets. 
- *
+ * Base class for Analyzers that need to make use of stopword sets.
  *
  * @since 3.1
  */
 public abstract class StopwordAnalyzerBase extends Analyzer {
 
-  /**
-   * An immutable stopword set
-   */
+  /** An immutable stopword set */
   protected final CharArraySet stopwords;
 
   /**
-   * Returns the analyzer's stopword set or an empty set if the analyzer has no
-   * stopwords
-   * 
-   * @return the analyzer's stopword set or an empty set if the analyzer has no
-   *         stopwords
+   * Returns the analyzer's stopword set or an empty set if the analyzer has no stopwords
+   *
+   * @return the analyzer's stopword set or an empty set if the analyzer has no stopwords
    */
   public CharArraySet getStopwordSet() {
     return stopwords;
@@ -50,63 +44,56 @@ public abstract class StopwordAnalyzerBase extends Analyzer {
 
   /**
    * Creates a new instance initialized with the given stopword set
-   * 
-   * @param stopwords
-   *          the analyzer's stopword set
+   *
+   * @param stopwords the analyzer's stopword set
    */
   protected StopwordAnalyzerBase(final CharArraySet stopwords) {
     // analyzers should use char array set for stopwords!
-    this.stopwords = stopwords == null ? CharArraySet.EMPTY_SET : CharArraySet
-        .unmodifiableSet(CharArraySet.copy(stopwords));
+    this.stopwords =
+        stopwords == null
+            ? CharArraySet.EMPTY_SET
+            : CharArraySet.unmodifiableSet(CharArraySet.copy(stopwords));
   }
 
-  /**
-   * Creates a new Analyzer with an empty stopword set
-   */
+  /** Creates a new Analyzer with an empty stopword set */
   protected StopwordAnalyzerBase() {
     this(null);
   }
 
   /**
-   * Creates a CharArraySet from a file resource associated with a class. (See
-   * {@link Class#getResourceAsStream(String)}).
-   * 
-   * @param ignoreCase
-   *          <code>true</code> if the set should ignore the case of the
-   *          stopwords, otherwise <code>false</code>
-   * @param aClass
-   *          a class that is associated with the given stopwordResource
-   * @param resource
-   *          name of the resource file associated with the given class
-   * @param comment
-   *          comment string to ignore in the stopword file
-   * @return a CharArraySet containing the distinct stopwords from the given
-   *         file
-   * @throws IOException
-   *           if loading the stopwords throws an {@link IOException}
+   * Creates a CharArraySet from a file resource associated with a class. (See {@link
+   * Class#getResourceAsStream(String)}).
+   *
+   * @param ignoreCase <code>true</code> if the set should ignore the case of the stopwords,
+   *     otherwise <code>false</code>
+   * @param aClass a class that is associated with the given stopwordResource
+   * @param resource name of the resource file associated with the given class
+   * @param comment comment string to ignore in the stopword file
+   * @return a CharArraySet containing the distinct stopwords from the given file
+   * @throws IOException if loading the stopwords throws an {@link IOException}
    */
-  protected static CharArraySet loadStopwordSet(final boolean ignoreCase,
-      final Class<? extends Analyzer> aClass, final String resource,
-      final String comment) throws IOException {
+  protected static CharArraySet loadStopwordSet(
+      final boolean ignoreCase,
+      final Class<? extends Analyzer> aClass,
+      final String resource,
+      final String comment)
+      throws IOException {
     Reader reader = null;
     try {
-      reader = IOUtils.getDecodingReader(aClass.getResourceAsStream(resource), StandardCharsets.UTF_8);
+      reader =
+          IOUtils.getDecodingReader(aClass.getResourceAsStream(resource), StandardCharsets.UTF_8);
       return WordlistLoader.getWordSet(reader, comment, new CharArraySet(16, ignoreCase));
     } finally {
       IOUtils.close(reader);
     }
-    
   }
-  
+
   /**
    * Creates a CharArraySet from a path.
-   * 
-   * @param stopwords
-   *          the stopwords file to load
-   * @return a CharArraySet containing the distinct stopwords from the given
-   *         file
-   * @throws IOException
-   *           if loading the stopwords throws an {@link IOException}
+   *
+   * @param stopwords the stopwords file to load
+   * @return a CharArraySet containing the distinct stopwords from the given file
+   * @throws IOException if loading the stopwords throws an {@link IOException}
    */
   protected static CharArraySet loadStopwordSet(Path stopwords) throws IOException {
     Reader reader = null;
@@ -117,17 +104,13 @@ public abstract class StopwordAnalyzerBase extends Analyzer {
       IOUtils.close(reader);
     }
   }
-  
+
   /**
    * Creates a CharArraySet from a file.
-   * 
-   * @param stopwords
-   *          the stopwords reader to load
-   * 
-   * @return a CharArraySet containing the distinct stopwords from the given
-   *         reader
-   * @throws IOException
-   *           if loading the stopwords throws an {@link IOException}
+   *
+   * @param stopwords the stopwords reader to load
+   * @return a CharArraySet containing the distinct stopwords from the given reader
+   * @throws IOException if loading the stopwords throws an {@link IOException}
    */
   protected static CharArraySet loadStopwordSet(Reader stopwords) throws IOException {
     try {
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/TokenFilter.java b/lucene/core/src/java/org/apache/lucene/analysis/TokenFilter.java
index c097c26..9e05e90 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/TokenFilter.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/TokenFilter.java
@@ -16,14 +16,15 @@
  */
 package org.apache.lucene.analysis;
 
-
 import java.io.IOException;
 
-/** A TokenFilter is a TokenStream whose input is another TokenStream.
-  <p>
-  This is an abstract class; subclasses must override {@link #incrementToken()}.
-  @see TokenStream
-  */
+/**
+ * A TokenFilter is a TokenStream whose input is another TokenStream.
+ *
+ * <p>This is an abstract class; subclasses must override {@link #incrementToken()}.
+ *
+ * @see TokenStream
+ */
 public abstract class TokenFilter extends TokenStream {
   /** The source of tokens for this filter. */
   protected final TokenStream input;
@@ -33,25 +34,23 @@ public abstract class TokenFilter extends TokenStream {
     super(input);
     this.input = input;
   }
-  
-  /** 
+
+  /**
    * {@inheritDoc}
-   * <p> 
-   * <b>NOTE:</b> 
-   * The default implementation chains the call to the input TokenStream, so
-   * be sure to call <code>super.end()</code> first when overriding this method.
+   *
+   * <p><b>NOTE:</b> The default implementation chains the call to the input TokenStream, so be sure
+   * to call <code>super.end()</code> first when overriding this method.
    */
   @Override
   public void end() throws IOException {
     input.end();
   }
-  
+
   /**
    * {@inheritDoc}
-   * <p>
-   * <b>NOTE:</b> 
-   * The default implementation chains the call to the input TokenStream, so
-   * be sure to call <code>super.close()</code> when overriding this method.
+   *
+   * <p><b>NOTE:</b> The default implementation chains the call to the input TokenStream, so be sure
+   * to call <code>super.close()</code> when overriding this method.
    */
   @Override
   public void close() throws IOException {
@@ -60,10 +59,9 @@ public abstract class TokenFilter extends TokenStream {
 
   /**
    * {@inheritDoc}
-   * <p>
-   * <b>NOTE:</b> 
-   * The default implementation chains the call to the input TokenStream, so
-   * be sure to call <code>super.reset()</code> when overriding this method.
+   *
+   * <p><b>NOTE:</b> The default implementation chains the call to the input TokenStream, so be sure
+   * to call <code>super.reset()</code> when overriding this method.
    */
   @Override
   public void reset() throws IOException {
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/TokenFilterFactory.java b/lucene/core/src/java/org/apache/lucene/analysis/TokenFilterFactory.java
index 022e80f..57cbbfe 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/TokenFilterFactory.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/TokenFilterFactory.java
@@ -16,13 +16,12 @@
  */
 package org.apache.lucene.analysis;
 
-
 import java.util.Map;
 import java.util.Set;
 
 /**
- * Abstract parent class for analysis factories that create {@link org.apache.lucene.analysis.TokenFilter}
- * instances.
+ * Abstract parent class for analysis factories that create {@link
+ * org.apache.lucene.analysis.TokenFilter} instances.
  *
  * @since 3.1
  */
@@ -32,15 +31,15 @@ public abstract class TokenFilterFactory extends AbstractAnalysisFactory {
       new AnalysisSPILoader<>(TokenFilterFactory.class);
 
   /** looks up a tokenfilter by name from context classpath */
-  public static TokenFilterFactory forName(String name, Map<String,String> args) {
+  public static TokenFilterFactory forName(String name, Map<String, String> args) {
     return loader.newInstance(name, args);
   }
-  
+
   /** looks up a tokenfilter class by name from context classpath */
   public static Class<? extends TokenFilterFactory> lookupClass(String name) {
     return loader.lookupClass(name);
   }
-  
+
   /** returns a list of all available tokenfilter names from context classpath */
   public static Set<String> availableTokenFilters() {
     return loader.availableServices();
@@ -55,30 +54,27 @@ public abstract class TokenFilterFactory extends AbstractAnalysisFactory {
     }
   }
 
-  /** 
-   * Reloads the factory list from the given {@link ClassLoader}.
-   * Changes to the factories are visible after the method ends, all
-   * iterators ({@link #availableTokenFilters()},...) stay consistent. 
-   * 
-   * <p><b>NOTE:</b> Only new factories are added, existing ones are
-   * never removed or replaced.
-   * 
-   * <p><em>This method is expensive and should only be called for discovery
-   * of new factories on the given classpath/classloader!</em>
+  /**
+   * Reloads the factory list from the given {@link ClassLoader}. Changes to the factories are
+   * visible after the method ends, all iterators ({@link #availableTokenFilters()},...) stay
+   * consistent.
+   *
+   * <p><b>NOTE:</b> Only new factories are added, existing ones are never removed or replaced.
+   *
+   * <p><em>This method is expensive and should only be called for discovery of new factories on the
+   * given classpath/classloader!</em>
    */
   public static void reloadTokenFilters(ClassLoader classloader) {
     loader.reload(classloader);
   }
-  
+
   /** Default ctor for compatibility with SPI */
   protected TokenFilterFactory() {
     super();
   }
 
-  /**
-   * Initialize this factory via a set of key-value pairs.
-   */
-  protected TokenFilterFactory(Map<String,String> args) {
+  /** Initialize this factory via a set of key-value pairs. */
+  protected TokenFilterFactory(Map<String, String> args) {
     super(args);
   }
 
@@ -86,9 +82,9 @@ public abstract class TokenFilterFactory extends AbstractAnalysisFactory {
   public abstract TokenStream create(TokenStream input);
 
   /**
-   * Normalize the specified input TokenStream
-   * While the default implementation returns input unchanged,
-   * filters that should be applied at normalization time can delegate to {@code create} method.
+   * Normalize the specified input TokenStream While the default implementation returns input
+   * unchanged, filters that should be applied at normalization time can delegate to {@code create}
+   * method.
    */
   public TokenStream normalize(TokenStream input) {
     return input;
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/TokenStream.java b/lucene/core/src/java/org/apache/lucene/analysis/TokenStream.java
index a19d31d..0381792 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/TokenStream.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/TokenStream.java
@@ -16,11 +16,9 @@
  */
 package org.apache.lucene.analysis;
 
-
-import java.io.IOException;
 import java.io.Closeable;
+import java.io.IOException;
 import java.lang.reflect.Modifier;
-
 import org.apache.lucene.analysis.tokenattributes.PackedTokenAttributeImpl;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
@@ -31,76 +29,71 @@ import org.apache.lucene.util.AttributeImpl;
 import org.apache.lucene.util.AttributeSource;
 
 /**
- * A <code>TokenStream</code> enumerates the sequence of tokens, either from
- * {@link Field}s of a {@link Document} or from query text.
- * <p>
- * This is an abstract class; concrete subclasses are:
+ * A <code>TokenStream</code> enumerates the sequence of tokens, either from {@link Field}s of a
+ * {@link Document} or from query text.
+ *
+ * <p>This is an abstract class; concrete subclasses are:
+ *
  * <ul>
- * <li>{@link Tokenizer}, a <code>TokenStream</code> whose input is a Reader; and
- * <li>{@link TokenFilter}, a <code>TokenStream</code> whose input is another
- * <code>TokenStream</code>.
+ *   <li>{@link Tokenizer}, a <code>TokenStream</code> whose input is a Reader; and
+ *   <li>{@link TokenFilter}, a <code>TokenStream</code> whose input is another <code>TokenStream
+ *       </code>.
  * </ul>
- * <code>TokenStream</code> extends {@link AttributeSource}, which provides
- * access to all of the token {@link Attribute}s for the <code>TokenStream</code>.
- * Note that only one instance per {@link AttributeImpl} is created and reused
- * for every token. This approach reduces object creation and allows local
- * caching of references to the {@link AttributeImpl}s. See
- * {@link #incrementToken()} for further details.
- * <p>
- * <b>The workflow of the new <code>TokenStream</code> API is as follows:</b>
+ *
+ * <code>TokenStream</code> extends {@link AttributeSource}, which provides access to all of the
+ * token {@link Attribute}s for the <code>TokenStream</code>. Note that only one instance per {@link
+ * AttributeImpl} is created and reused for every token. This approach reduces object creation and
+ * allows local caching of references to the {@link AttributeImpl}s. See {@link #incrementToken()}
+ * for further details.
+ *
+ * <p><b>The workflow of the new <code>TokenStream</code> API is as follows:</b>
+ *
  * <ol>
- * <li>Instantiation of <code>TokenStream</code>/{@link TokenFilter}s which add/get
- * attributes to/from the {@link AttributeSource}.
- * <li>The consumer calls {@link TokenStream#reset()}.
- * <li>The consumer retrieves attributes from the stream and stores local
- * references to all attributes it wants to access.
- * <li>The consumer calls {@link #incrementToken()} until it returns false
- * consuming the attributes after each call.
- * <li>The consumer calls {@link #end()} so that any end-of-stream operations
- * can be performed.
- * <li>The consumer calls {@link #close()} to release any resource when finished
- * using the <code>TokenStream</code>.
+ *   <li>Instantiation of <code>TokenStream</code>/{@link TokenFilter}s which add/get attributes
+ *       to/from the {@link AttributeSource}.
+ *   <li>The consumer calls {@link TokenStream#reset()}.
+ *   <li>The consumer retrieves attributes from the stream and stores local references to all
+ *       attributes it wants to access.
+ *   <li>The consumer calls {@link #incrementToken()} until it returns false consuming the
+ *       attributes after each call.
+ *   <li>The consumer calls {@link #end()} so that any end-of-stream operations can be performed.
+ *   <li>The consumer calls {@link #close()} to release any resource when finished using the <code>
+ *       TokenStream</code>.
  * </ol>
- * To make sure that filters and consumers know which attributes are available,
- * the attributes must be added during instantiation. Filters and consumers are
- * not required to check for availability of attributes in
- * {@link #incrementToken()}.
- * <p>
- * You can find some example code for the new API in the analysis package level
- * Javadoc.
- * <p>
- * Sometimes it is desirable to capture a current state of a <code>TokenStream</code>,
- * e.g., for buffering purposes (see {@link CachingTokenFilter},
- * TeeSinkTokenFilter). For this usecase
- * {@link AttributeSource#captureState} and {@link AttributeSource#restoreState}
- * can be used.
- * <p>The {@code TokenStream}-API in Lucene is based on the decorator pattern.
- * Therefore all non-abstract subclasses must be final or have at least a final
- * implementation of {@link #incrementToken}! This is checked when Java
- * assertions are enabled.
+ *
+ * To make sure that filters and consumers know which attributes are available, the attributes must
+ * be added during instantiation. Filters and consumers are not required to check for availability
+ * of attributes in {@link #incrementToken()}.
+ *
+ * <p>You can find some example code for the new API in the analysis package level Javadoc.
+ *
+ * <p>Sometimes it is desirable to capture a current state of a <code>TokenStream</code>, e.g., for
+ * buffering purposes (see {@link CachingTokenFilter}, TeeSinkTokenFilter). For this usecase {@link
+ * AttributeSource#captureState} and {@link AttributeSource#restoreState} can be used.
+ *
+ * <p>The {@code TokenStream}-API in Lucene is based on the decorator pattern. Therefore all
+ * non-abstract subclasses must be final or have at least a final implementation of {@link
+ * #incrementToken}! This is checked when Java assertions are enabled.
  */
 public abstract class TokenStream extends AttributeSource implements Closeable {
-  
+
   /** Default {@link AttributeFactory} instance that should be used for TokenStreams. */
   public static final AttributeFactory DEFAULT_TOKEN_ATTRIBUTE_FACTORY =
-    AttributeFactory.getStaticImplementation(AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY, PackedTokenAttributeImpl.class);
+      AttributeFactory.getStaticImplementation(
+          AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY, PackedTokenAttributeImpl.class);
 
-  /**
-   * A TokenStream using the default attribute factory.
-   */
+  /** A TokenStream using the default attribute factory. */
   protected TokenStream() {
     super(DEFAULT_TOKEN_ATTRIBUTE_FACTORY);
     assert assertFinal();
   }
-  
-  /**
-   * A TokenStream that uses the same attributes as the supplied one.
-   */
+
+  /** A TokenStream that uses the same attributes as the supplied one. */
   protected TokenStream(AttributeSource input) {
     super(input);
     assert assertFinal();
   }
-  
+
   /**
    * A TokenStream using the supplied AttributeFactory for creating new {@link Attribute} instances.
    */
@@ -108,65 +101,60 @@ public abstract class TokenStream extends AttributeSource implements Closeable {
     super(factory);
     assert assertFinal();
   }
-  
+
   private boolean assertFinal() {
     try {
       final Class<?> clazz = getClass();
-      if (!clazz.desiredAssertionStatus())
-        return true;
-      assert clazz.isAnonymousClass() ||
-        (clazz.getModifiers() & (Modifier.FINAL | Modifier.PRIVATE)) != 0 ||
-        Modifier.isFinal(clazz.getMethod("incrementToken").getModifiers()) :
-        "TokenStream implementation classes or at least their incrementToken() implementation must be final";
+      if (!clazz.desiredAssertionStatus()) return true;
+      assert clazz.isAnonymousClass()
+              || (clazz.getModifiers() & (Modifier.FINAL | Modifier.PRIVATE)) != 0
+              || Modifier.isFinal(clazz.getMethod("incrementToken").getModifiers())
+          : "TokenStream implementation classes or at least their incrementToken() implementation must be final";
       return true;
     } catch (NoSuchMethodException nsme) {
       return false;
     }
   }
-  
+
   /**
-   * Consumers (i.e., {@link IndexWriter}) use this method to advance the stream to
-   * the next token. Implementing classes must implement this method and update
-   * the appropriate {@link AttributeImpl}s with the attributes of the next
-   * token.
-   * <P>
-   * The producer must make no assumptions about the attributes after the method
-   * has been returned: the caller may arbitrarily change it. If the producer
-   * needs to preserve the state for subsequent calls, it can use
-   * {@link #captureState} to create a copy of the current attribute state.
-   * <p>
-   * This method is called for every token of a document, so an efficient
-   * implementation is crucial for good performance. To avoid calls to
-   * {@link #addAttribute(Class)} and {@link #getAttribute(Class)},
-   * references to all {@link AttributeImpl}s that this stream uses should be
+   * Consumers (i.e., {@link IndexWriter}) use this method to advance the stream to the next token.
+   * Implementing classes must implement this method and update the appropriate {@link
+   * AttributeImpl}s with the attributes of the next token.
+   *
+   * <p>The producer must make no assumptions about the attributes after the method has been
+   * returned: the caller may arbitrarily change it. If the producer needs to preserve the state for
+   * subsequent calls, it can use {@link #captureState} to create a copy of the current attribute
+   * state.
+   *
+   * <p>This method is called for every token of a document, so an efficient implementation is
+   * crucial for good performance. To avoid calls to {@link #addAttribute(Class)} and {@link
+   * #getAttribute(Class)}, references to all {@link AttributeImpl}s that this stream uses should be
    * retrieved during instantiation.
-   * <p>
-   * To ensure that filters and consumers know which attributes are available,
-   * the attributes must be added during instantiation. Filters and consumers
-   * are not required to check for availability of attributes in
-   * {@link #incrementToken()}.
-   * 
+   *
+   * <p>To ensure that filters and consumers know which attributes are available, the attributes
+   * must be added during instantiation. Filters and consumers are not required to check for
+   * availability of attributes in {@link #incrementToken()}.
+   *
    * @return false for end of stream; true otherwise
    */
   public abstract boolean incrementToken() throws IOException;
-  
+
   /**
-   * This method is called by the consumer after the last token has been
-   * consumed, after {@link #incrementToken()} returned <code>false</code>
-   * (using the new <code>TokenStream</code> API). Streams implementing the old API
-   * should upgrade to use this feature.
-   * <p>
-   * This method can be used to perform any end-of-stream operations, such as
-   * setting the final offset of a stream. The final offset of a stream might
-   * differ from the offset of the last token eg in case one or more whitespaces
-   * followed after the last token, but a WhitespaceTokenizer was used.
-   * <p>
-   * Additionally any skipped positions (such as those removed by a stopfilter)
-   * can be applied to the position increment, or any adjustment of other
-   * attributes where the end-of-stream value may be important.
-   * <p>
-   * If you override this method, always call {@code super.end()}.
-   * 
+   * This method is called by the consumer after the last token has been consumed, after {@link
+   * #incrementToken()} returned <code>false</code> (using the new <code>TokenStream</code> API).
+   * Streams implementing the old API should upgrade to use this feature.
+   *
+   * <p>This method can be used to perform any end-of-stream operations, such as setting the final
+   * offset of a stream. The final offset of a stream might differ from the offset of the last token
+   * eg in case one or more whitespaces followed after the last token, but a WhitespaceTokenizer was
+   * used.
+   *
+   * <p>Additionally any skipped positions (such as those removed by a stopfilter) can be applied to
+   * the position increment, or any adjustment of other attributes where the end-of-stream value may
+   * be important.
+   *
+   * <p>If you override this method, always call {@code super.end()}.
+   *
    * @throws IOException If an I/O error occurs
    */
   public void end() throws IOException {
@@ -174,25 +162,25 @@ public abstract class TokenStream extends AttributeSource implements Closeable {
   }
 
   /**
-   * This method is called by a consumer before it begins consumption using
-   * {@link #incrementToken()}.
-   * <p>
-   * Resets this stream to a clean state. Stateful implementations must implement
-   * this method so that they can be reused, just as if they had been created fresh.
-   * <p>
-   * If you override this method, always call {@code super.reset()}, otherwise
-   * some internal state will not be correctly reset (e.g., {@link Tokenizer} will
-   * throw {@link IllegalStateException} on further usage).
+   * This method is called by a consumer before it begins consumption using {@link
+   * #incrementToken()}.
+   *
+   * <p>Resets this stream to a clean state. Stateful implementations must implement this method so
+   * that they can be reused, just as if they had been created fresh.
+   *
+   * <p>If you override this method, always call {@code super.reset()}, otherwise some internal
+   * state will not be correctly reset (e.g., {@link Tokenizer} will throw {@link
+   * IllegalStateException} on further usage).
    */
   public void reset() throws IOException {}
-  
-  /** Releases resources associated with this stream.
-   * <p>
-   * If you override this method, always call {@code super.close()}, otherwise
-   * some internal state will not be correctly reset (e.g., {@link Tokenizer} will
-   * throw {@link IllegalStateException} on reuse).
+
+  /**
+   * Releases resources associated with this stream.
+   *
+   * <p>If you override this method, always call {@code super.close()}, otherwise some internal
+   * state will not be correctly reset (e.g., {@link Tokenizer} will throw {@link
+   * IllegalStateException} on reuse).
    */
   @Override
   public void close() throws IOException {}
-  
 }
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/TokenStreamToAutomaton.java b/lucene/core/src/java/org/apache/lucene/analysis/TokenStreamToAutomaton.java
index 0891930..764391d 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/TokenStreamToAutomaton.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/TokenStreamToAutomaton.java
@@ -16,9 +16,7 @@
  */
 package org.apache.lucene.analysis;
 
-
 import java.io.IOException;
-
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute;
@@ -29,13 +27,13 @@ import org.apache.lucene.util.automaton.Automaton;
 
 // TODO: maybe also toFST?  then we can translate atts into FST outputs/weights
 
-/** Consumes a TokenStream and creates an {@link Automaton}
- *  where the transition labels are UTF8 bytes (or Unicode 
- *  code points if unicodeArcs is true) from the {@link
- *  TermToBytesRefAttribute}.  Between tokens we insert
- *  POS_SEP and for holes we insert HOLE.
+/**
+ * Consumes a TokenStream and creates an {@link Automaton} where the transition labels are UTF8
+ * bytes (or Unicode code points if unicodeArcs is true) from the {@link TermToBytesRefAttribute}.
+ * Between tokens we insert POS_SEP and for holes we insert HOLE.
  *
- * @lucene.experimental */
+ * @lucene.experimental
+ */
 public class TokenStreamToAutomaton {
 
   private boolean preservePositionIncrements;
@@ -47,7 +45,9 @@ public class TokenStreamToAutomaton {
     this.preservePositionIncrements = true;
   }
 
-  /** Whether to generate holes in the automaton for missing positions, <code>true</code> by default. */
+  /**
+   * Whether to generate holes in the automaton for missing positions, <code>true</code> by default.
+   */
   public void setPreservePositionIncrements(boolean enablePositionIncrements) {
     this.preservePositionIncrements = enablePositionIncrements;
   }
@@ -57,8 +57,10 @@ public class TokenStreamToAutomaton {
     this.finalOffsetGapAsHole = finalOffsetGapAsHole;
   }
 
-  /** Whether to make transition labels Unicode code points instead of UTF8 bytes, 
-   *  <code>false</code> by default */
+  /**
+   * Whether to make transition labels Unicode code points instead of UTF8 bytes, <code>false</code>
+   * by default
+   */
   public void setUnicodeArcs(boolean unicodeArcs) {
     this.unicodeArcs = unicodeArcs;
   }
@@ -84,9 +86,10 @@ public class TokenStreamToAutomaton {
     }
   }
 
-  /** Subclass and implement this if you need to change the
-   *  token (such as escaping certain bytes) before it's
-   *  turned into a graph. */ 
+  /**
+   * Subclass and implement this if you need to change the token (such as escaping certain bytes)
+   * before it's turned into a graph.
+   */
   protected BytesRef changeToken(BytesRef in) {
     return in;
   }
@@ -97,11 +100,11 @@ public class TokenStreamToAutomaton {
   /** We add this arc to represent a hole. */
   public static final int HOLE = 0x001e;
 
-  /** Pulls the graph (including {@link
-   *  PositionLengthAttribute}) from the provided {@link
-   *  TokenStream}, and creates the corresponding
-   *  automaton where arcs are bytes (or Unicode code points 
-   *  if unicodeArcs = true) from each term. */
+  /**
+   * Pulls the graph (including {@link PositionLengthAttribute}) from the provided {@link
+   * TokenStream}, and creates the corresponding automaton where arcs are bytes (or Unicode code
+   * points if unicodeArcs = true) from each term.
+   */
   public Automaton toAutomaton(TokenStream in) throws IOException {
     final Automaton.Builder builder = new Automaton.Builder();
     builder.createState();
@@ -191,8 +194,8 @@ public class TokenStreamToAutomaton {
 
       int state = posData.leaving;
 
-      for(int byteIDX=0;byteIDX<termLen;byteIDX++) {
-        final int nextState = byteIDX == termLen-1 ? endPosData.arriving : builder.createState();
+      for (int byteIDX = 0; byteIDX < termLen; byteIDX++) {
+        final int nextState = byteIDX == termLen - 1 ? endPosData.arriving : builder.createState();
         int c;
         if (unicodeArcs) {
           c = termUnicode[byteIDX];
@@ -211,7 +214,7 @@ public class TokenStreamToAutomaton {
     int endPosInc = posIncAtt.getPositionIncrement();
     if (endPosInc == 0 && finalOffsetGapAsHole && offsetAtt.endOffset() > maxOffset) {
       endPosInc = 1;
-    } else if (endPosInc > 0 && preservePositionIncrements==false) {
+    } else if (endPosInc > 0 && preservePositionIncrements == false) {
       endPosInc = 0;
     }
 
@@ -250,7 +253,7 @@ public class TokenStreamToAutomaton {
       }
       pos++;
     }
-    
+
     return builder.finish();
   }
 
@@ -265,11 +268,12 @@ public class TokenStreamToAutomaton {
   }
   */
 
-  private static void addHoles(Automaton.Builder builder, RollingBuffer<Position> positions, int pos) {
+  private static void addHoles(
+      Automaton.Builder builder, RollingBuffer<Position> positions, int pos) {
     Position posData = positions.get(pos);
-    Position prevPosData = positions.get(pos-1);
+    Position prevPosData = positions.get(pos - 1);
 
-    while(posData.arriving == -1 || prevPosData.leaving == -1) {
+    while (posData.arriving == -1 || prevPosData.leaving == -1) {
       if (posData.arriving == -1) {
         posData.arriving = builder.createState();
         builder.addTransition(posData.arriving, posData.leaving, POS_SEP);
@@ -290,7 +294,7 @@ public class TokenStreamToAutomaton {
         break;
       }
       posData = prevPosData;
-      prevPosData = positions.get(pos-1);
+      prevPosData = positions.get(pos - 1);
     }
   }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/Tokenizer.java b/lucene/core/src/java/org/apache/lucene/analysis/Tokenizer.java
index 33f972a..343e011 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/Tokenizer.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/Tokenizer.java
@@ -16,31 +16,29 @@
  */
 package org.apache.lucene.analysis;
 
-
 import java.io.IOException;
 import java.io.Reader;
-
 import org.apache.lucene.util.AttributeFactory;
 import org.apache.lucene.util.AttributeSource;
 
-/** A Tokenizer is a TokenStream whose input is a Reader.
-  <p>
-  This is an abstract class; subclasses must override {@link #incrementToken()}
-  <p>
-  NOTE: Subclasses overriding {@link #incrementToken()} must
-  call {@link AttributeSource#clearAttributes()} before
-  setting attributes.
+/**
+ * A Tokenizer is a TokenStream whose input is a Reader.
+ *
+ * <p>This is an abstract class; subclasses must override {@link #incrementToken()}
+ *
+ * <p>NOTE: Subclasses overriding {@link #incrementToken()} must call {@link
+ * AttributeSource#clearAttributes()} before setting attributes.
  */
-public abstract class Tokenizer extends TokenStream {  
+public abstract class Tokenizer extends TokenStream {
   /** The text source for this Tokenizer. */
   protected Reader input = ILLEGAL_STATE_READER;
-  
+
   /** Pending reader: not actually assigned to input until reset() */
   private Reader inputPending = ILLEGAL_STATE_READER;
 
   /**
-   * Construct a tokenizer with no input, awaiting a call to {@link #setReader(java.io.Reader)}
-   * to provide input.
+   * Construct a tokenizer with no input, awaiting a call to {@link #setReader(java.io.Reader)} to
+   * provide input.
    */
   protected Tokenizer() {
     //
@@ -49,6 +47,7 @@ public abstract class Tokenizer extends TokenStream {
   /**
    * Construct a tokenizer with no input, awaiting a call to {@link #setReader(java.io.Reader)} to
    * provide input.
+   *
    * @param factory attribute factory.
    */
   protected Tokenizer(AttributeFactory factory) {
@@ -57,10 +56,9 @@ public abstract class Tokenizer extends TokenStream {
 
   /**
    * {@inheritDoc}
-   * <p>
-   * <b>NOTE:</b> 
-   * The default implementation closes the input Reader, so
-   * be sure to call <code>super.close()</code> when overriding this method.
+   *
+   * <p><b>NOTE:</b> The default implementation closes the input Reader, so be sure to call <code>
+   * super.close()</code> when overriding this method.
    */
   @Override
   public void close() throws IOException {
@@ -69,20 +67,25 @@ public abstract class Tokenizer extends TokenStream {
     // GC can reclaim
     inputPending = input = ILLEGAL_STATE_READER;
   }
-  
-  /** Return the corrected offset. If {@link #input} is a {@link CharFilter} subclass
-   * this method calls {@link CharFilter#correctOffset}, else returns <code>currentOff</code>.
+
+  /**
+   * Return the corrected offset. If {@link #input} is a {@link CharFilter} subclass this method
+   * calls {@link CharFilter#correctOffset}, else returns <code>currentOff</code>.
+   *
    * @param currentOff offset as seen in the output
    * @return corrected offset based on the input
    * @see CharFilter#correctOffset
    */
   protected final int correctOffset(int currentOff) {
-    return (input instanceof CharFilter) ? ((CharFilter) input).correctOffset(currentOff) : currentOff;
+    return (input instanceof CharFilter)
+        ? ((CharFilter) input).correctOffset(currentOff)
+        : currentOff;
   }
 
-  /** Expert: Set a new reader on the Tokenizer.  Typically, an
-   *  analyzer (in its tokenStream method) will use
-   *  this to re-use a previously created tokenizer. */
+  /**
+   * Expert: Set a new reader on the Tokenizer. Typically, an analyzer (in its tokenStream method)
+   * will use this to re-use a previously created tokenizer.
+   */
   public final void setReader(Reader input) {
     if (input == null) {
       throw new NullPointerException("input must not be null");
@@ -92,7 +95,7 @@ public abstract class Tokenizer extends TokenStream {
     this.inputPending = input;
     setReaderTestPoint();
   }
-  
+
   @Override
   public void reset() throws IOException {
     super.reset();
@@ -102,17 +105,18 @@ public abstract class Tokenizer extends TokenStream {
 
   // only used for testing
   void setReaderTestPoint() {}
-  
-  private static final Reader ILLEGAL_STATE_READER = new Reader() {
-    @Override
-    public int read(char[] cbuf, int off, int len) {
-      throw new IllegalStateException("TokenStream contract violation: reset()/close() call missing, " +
-          "reset() called multiple times, or subclass does not call super.reset(). " +
-          "Please see Javadocs of TokenStream class for more information about the correct consuming workflow.");
-    }
 
-    @Override
-    public void close() {} 
-  };
-}
+  private static final Reader ILLEGAL_STATE_READER =
+      new Reader() {
+        @Override
+        public int read(char[] cbuf, int off, int len) {
+          throw new IllegalStateException(
+              "TokenStream contract violation: reset()/close() call missing, "
+                  + "reset() called multiple times, or subclass does not call super.reset(). "
+                  + "Please see Javadocs of TokenStream class for more information about the correct consuming workflow.");
+        }
 
+        @Override
+        public void close() {}
+      };
+}
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/TokenizerFactory.java b/lucene/core/src/java/org/apache/lucene/analysis/TokenizerFactory.java
index d4eb6f1..30115a1 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/TokenizerFactory.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/TokenizerFactory.java
@@ -16,15 +16,12 @@
  */
 package org.apache.lucene.analysis;
 
-
-import org.apache.lucene.util.AttributeFactory;
-
 import java.util.Map;
 import java.util.Set;
+import org.apache.lucene.util.AttributeFactory;
 
 /**
- * Abstract parent class for analysis factories that create {@link Tokenizer}
- * instances.
+ * Abstract parent class for analysis factories that create {@link Tokenizer} instances.
  *
  * @since 3.1
  */
@@ -32,17 +29,17 @@ public abstract class TokenizerFactory extends AbstractAnalysisFactory {
 
   private static final AnalysisSPILoader<TokenizerFactory> loader =
       new AnalysisSPILoader<>(TokenizerFactory.class);
-  
+
   /** looks up a tokenizer by name from context classpath */
-  public static TokenizerFactory forName(String name, Map<String,String> args) {
+  public static TokenizerFactory forName(String name, Map<String, String> args) {
     return loader.newInstance(name, args);
   }
-  
+
   /** looks up a tokenizer class by name from context classpath */
   public static Class<? extends TokenizerFactory> lookupClass(String name) {
     return loader.lookupClass(name);
   }
-  
+
   /** returns a list of all available tokenizer names from context classpath */
   public static Set<String> availableTokenizers() {
     return loader.availableServices();
@@ -57,30 +54,27 @@ public abstract class TokenizerFactory extends AbstractAnalysisFactory {
     }
   }
 
-  /** 
-   * Reloads the factory list from the given {@link ClassLoader}.
-   * Changes to the factories are visible after the method ends, all
-   * iterators ({@link #availableTokenizers()},...) stay consistent. 
-   * 
-   * <p><b>NOTE:</b> Only new factories are added, existing ones are
-   * never removed or replaced.
-   * 
-   * <p><em>This method is expensive and should only be called for discovery
-   * of new factories on the given classpath/classloader!</em>
+  /**
+   * Reloads the factory list from the given {@link ClassLoader}. Changes to the factories are
+   * visible after the method ends, all iterators ({@link #availableTokenizers()},...) stay
+   * consistent.
+   *
+   * <p><b>NOTE:</b> Only new factories are added, existing ones are never removed or replaced.
+   *
+   * <p><em>This method is expensive and should only be called for discovery of new factories on the
+   * given classpath/classloader!</em>
    */
   public static void reloadTokenizers(ClassLoader classloader) {
     loader.reload(classloader);
   }
-  
+
   /** Default ctor for compatibility with SPI */
   protected TokenizerFactory() {
     super();
   }
 
-  /**
-   * Initialize this factory via a set of key-value pairs.
-   */
-  protected TokenizerFactory(Map<String,String> args) {
+  /** Initialize this factory via a set of key-value pairs. */
+  protected TokenizerFactory(Map<String, String> args) {
     super(args);
   }
 
@@ -88,7 +82,7 @@ public abstract class TokenizerFactory extends AbstractAnalysisFactory {
   public final Tokenizer create() {
     return create(TokenStream.DEFAULT_TOKEN_ATTRIBUTE_FACTORY);
   }
-  
+
   /** Creates a TokenStream of the specified input using the given AttributeFactory */
-  abstract public Tokenizer create(AttributeFactory factory);
+  public abstract Tokenizer create(AttributeFactory factory);
 }
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/WordlistLoader.java b/lucene/core/src/java/org/apache/lucene/analysis/WordlistLoader.java
index 2397e66..b8b7506 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/WordlistLoader.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/WordlistLoader.java
@@ -16,7 +16,6 @@
  */
 package org.apache.lucene.analysis;
 
-
 import java.io.BufferedReader;
 import java.io.IOException;
 import java.io.InputStream;
@@ -24,27 +23,26 @@ import java.io.Reader;
 import java.nio.charset.Charset;
 import java.util.ArrayList;
 import java.util.List;
-
 import org.apache.lucene.util.IOUtils;
 
 /**
  * Loader for text files that represent a list of stopwords.
- * 
+ *
  * @see IOUtils to obtain {@link Reader} instances
  * @lucene.internal
  */
 public class WordlistLoader {
-  
+
   private static final int INITIAL_CAPACITY = 16;
-  
+
   /** no instance */
   private WordlistLoader() {}
-  
+
   /**
-   * Reads lines from a Reader and adds every line as an entry to a CharArraySet (omitting
-   * leading and trailing whitespace). Every line of the Reader should contain only
-   * one word. The words need to be in lowercase if you make use of an
-   * Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
+   * Reads lines from a Reader and adds every line as an entry to a CharArraySet (omitting leading
+   * and trailing whitespace). Every line of the Reader should contain only one word. The words need
+   * to be in lowercase if you make use of an Analyzer which uses LowerCaseFilter (like
+   * StandardAnalyzer).
    *
    * @param reader Reader containing the wordlist
    * @param result the {@link CharArraySet} to fill with the readers words
@@ -58,18 +56,17 @@ public class WordlistLoader {
       while ((word = br.readLine()) != null) {
         result.add(word.trim());
       }
-    }
-    finally {
+    } finally {
       IOUtils.close(br);
     }
     return result;
   }
-  
+
   /**
-   * Reads lines from a Reader and adds every line as an entry to a CharArraySet (omitting
-   * leading and trailing whitespace). Every line of the Reader should contain only
-   * one word. The words need to be in lowercase if you make use of an
-   * Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
+   * Reads lines from a Reader and adds every line as an entry to a CharArraySet (omitting leading
+   * and trailing whitespace). Every line of the Reader should contain only one word. The words need
+   * to be in lowercase if you make use of an Analyzer which uses LowerCaseFilter (like
+   * StandardAnalyzer).
    *
    * @param reader Reader containing the wordlist
    * @return A {@link CharArraySet} with the reader's words
@@ -79,10 +76,10 @@ public class WordlistLoader {
   }
 
   /**
-   * Reads lines from a Reader and adds every non-comment line as an entry to a CharArraySet (omitting
-   * leading and trailing whitespace). Every line of the Reader should contain only
-   * one word. The words need to be in lowercase if you make use of an
-   * Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
+   * Reads lines from a Reader and adds every non-comment line as an entry to a CharArraySet
+   * (omitting leading and trailing whitespace). Every line of the Reader should contain only one
+   * word. The words need to be in lowercase if you make use of an Analyzer which uses
+   * LowerCaseFilter (like StandardAnalyzer).
    *
    * @param reader Reader containing the wordlist
    * @param comment The string representing a comment.
@@ -93,44 +90,44 @@ public class WordlistLoader {
   }
 
   /**
-   * Reads lines from a Reader and adds every non-comment line as an entry to a CharArraySet (omitting
-   * leading and trailing whitespace). Every line of the Reader should contain only
-   * one word. The words need to be in lowercase if you make use of an
-   * Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
+   * Reads lines from a Reader and adds every non-comment line as an entry to a CharArraySet
+   * (omitting leading and trailing whitespace). Every line of the Reader should contain only one
+   * word. The words need to be in lowercase if you make use of an Analyzer which uses
+   * LowerCaseFilter (like StandardAnalyzer).
    *
    * @param reader Reader containing the wordlist
    * @param comment The string representing a comment.
    * @param result the {@link CharArraySet} to fill with the readers words
    * @return the given {@link CharArraySet} with the reader's words
    */
-  public static CharArraySet getWordSet(Reader reader, String comment, CharArraySet result) throws IOException {
+  public static CharArraySet getWordSet(Reader reader, String comment, CharArraySet result)
+      throws IOException {
     BufferedReader br = null;
     try {
       br = getBufferedReader(reader);
       String word = null;
       while ((word = br.readLine()) != null) {
-        if (word.startsWith(comment) == false){
+        if (word.startsWith(comment) == false) {
           result.add(word.trim());
         }
       }
-    }
-    finally {
+    } finally {
       IOUtils.close(br);
     }
     return result;
   }
 
-  
   /**
    * Reads stopwords from a stopword list in Snowball format.
-   * <p>
-   * The snowball format is the following:
+   *
+   * <p>The snowball format is the following:
+   *
    * <ul>
-   * <li>Lines may contain multiple words separated by whitespace.
-   * <li>The comment character is the vertical line (&#124;).
-   * <li>Lines may contain trailing comments.
+   *   <li>Lines may contain multiple words separated by whitespace.
+   *   <li>The comment character is the vertical line (&#124;).
+   *   <li>Lines may contain trailing comments.
    * </ul>
-   * 
+   *
    * @param reader Reader containing a Snowball stopword list
    * @param result the {@link CharArraySet} to fill with the readers words
    * @return the given {@link CharArraySet} with the reader's words
@@ -145,25 +142,27 @@ public class WordlistLoader {
         int comment = line.indexOf('|');
         if (comment >= 0) line = line.substring(0, comment);
         String words[] = line.split("\\s+");
-        for (int i = 0; i < words.length; i++)
+        for (int i = 0; i < words.length; i++) {
           if (words[i].length() > 0) result.add(words[i]);
+        }
       }
     } finally {
       IOUtils.close(br);
     }
     return result;
   }
-  
+
   /**
    * Reads stopwords from a stopword list in Snowball format.
-   * <p>
-   * The snowball format is the following:
+   *
+   * <p>The snowball format is the following:
+   *
    * <ul>
-   * <li>Lines may contain multiple words separated by whitespace.
-   * <li>The comment character is the vertical line (&#124;).
-   * <li>Lines may contain trailing comments.
+   *   <li>Lines may contain multiple words separated by whitespace.
+   *   <li>The comment character is the vertical line (&#124;).
+   *   <li>Lines may contain trailing comments.
    * </ul>
-   * 
+   *
    * @param reader Reader containing a Snowball stopword list
    * @return A {@link CharArraySet} with the reader's words
    */
@@ -171,16 +170,18 @@ public class WordlistLoader {
     return getSnowballWordSet(reader, new CharArraySet(INITIAL_CAPACITY, false));
   }
 
-
   /**
    * Reads a stem dictionary. Each line contains:
+   *
    * <pre>word<b>\t</b>stem</pre>
+   *
    * (i.e. two tab separated words)
    *
    * @return stem dictionary that overrules the stemming algorithm
    * @throws IOException If there is a low-level I/O error.
    */
-  public static CharArrayMap<String> getStemDict(Reader reader, CharArrayMap<String> result) throws IOException {
+  public static CharArrayMap<String> getStemDict(Reader reader, CharArrayMap<String> result)
+      throws IOException {
     BufferedReader br = null;
     try {
       br = getBufferedReader(reader);
@@ -194,19 +195,17 @@ public class WordlistLoader {
     }
     return result;
   }
-  
+
   /**
-   * Accesses a resource by name and returns the (non comment) lines containing
-   * data using the given character encoding.
+   * Accesses a resource by name and returns the (non comment) lines containing data using the given
+   * character encoding.
    *
-   * <p>
-   * A comment line is any line that starts with the character "#"
-   * </p>
+   * <p>A comment line is any line that starts with the character "#"
    *
    * @return a list of non-blank non-comment lines with whitespace trimmed
    * @throws IOException If there is a low-level I/O error.
    */
-  public static List<String> getLines(InputStream stream, Charset charset) throws IOException{
+  public static List<String> getLines(InputStream stream, Charset charset) throws IOException {
     BufferedReader input = null;
     ArrayList<String> lines;
     boolean success = false;
@@ -214,15 +213,15 @@ public class WordlistLoader {
       input = getBufferedReader(IOUtils.getDecodingReader(stream, charset));
 
       lines = new ArrayList<>();
-      for (String word=null; (word=input.readLine())!=null;) {
+      for (String word = null; (word = input.readLine()) != null; ) {
         // skip initial bom marker
         if (lines.isEmpty() && word.length() > 0 && word.charAt(0) == '\uFEFF')
           word = word.substring(1);
         // skip comments
         if (word.startsWith("#")) continue;
-        word=word.trim();
+        word = word.trim();
         // skip blank lines
-        if (word.length()==0) continue;
+        if (word.length() == 0) continue;
         lines.add(word);
       }
       success = true;
@@ -235,10 +234,10 @@ public class WordlistLoader {
       }
     }
   }
-  
+
   private static BufferedReader getBufferedReader(Reader reader) {
-    return (reader instanceof BufferedReader) ? (BufferedReader) reader
+    return (reader instanceof BufferedReader)
+        ? (BufferedReader) reader
         : new BufferedReader(reader);
   }
-  
 }
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/package-info.java b/lucene/core/src/java/org/apache/lucene/analysis/package-info.java
index b847d16..4c3270d 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/package-info.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/package-info.java
@@ -16,206 +16,183 @@
  */
 
 /**
- * Text analysis. 
- * <p>API and code to convert text into indexable/searchable tokens.  Covers {@link org.apache.lucene.analysis.Analyzer} and related classes.</p>
+ * Text analysis.
+ *
+ * <p>API and code to convert text into indexable/searchable tokens. Covers {@link
+ * org.apache.lucene.analysis.Analyzer} and related classes.
+ *
  * <h2>Parsing? Tokenization? Analysis!</h2>
- * <p>
- * Lucene, an indexing and search library, accepts only plain text input.
+ *
+ * <p>Lucene, an indexing and search library, accepts only plain text input.
+ *
  * <h2>Parsing</h2>
- * <p>
- * Applications that build their search capabilities upon Lucene may support documents in various formats &ndash; HTML, XML, PDF, Word &ndash; just to name a few.
- * Lucene does not care about the <i>Parsing</i> of these and other document formats, and it is the responsibility of the 
- * application using Lucene to use an appropriate <i>Parser</i> to convert the original format into plain text before passing that plain text to Lucene.
+ *
+ * <p>Applications that build their search capabilities upon Lucene may support documents in various
+ * formats &ndash; HTML, XML, PDF, Word &ndash; just to name a few. Lucene does not care about the
+ * <i>Parsing</i> of these and other document formats, and it is the responsibility of the
+ * application using Lucene to use an appropriate <i>Parser</i> to convert the original format into
+ * plain text before passing that plain text to Lucene.
+ *
  * <h2>Tokenization</h2>
- * <p>
- * Plain text passed to Lucene for indexing goes through a process generally called tokenization. Tokenization is the process
- * of breaking input text into small indexing elements &ndash; tokens.
- * The way input text is broken into tokens heavily influences how people will then be able to search for that text. 
- * For instance, sentences beginnings and endings can be identified to provide for more accurate phrase 
- * and proximity searches (though sentence identification is not provided by Lucene).
- * <p>
- *   In some cases simply breaking the input text into tokens is not enough
- *   &ndash; a deeper <i>Analysis</i> may be needed. Lucene includes both
- *   pre- and post-tokenization analysis facilities.
- * </p>
- * <p>
- *   Pre-tokenization analysis can include (but is not limited to) stripping
- *   HTML markup, and transforming or removing text matching arbitrary patterns
- *   or sets of fixed strings.
- * </p>
- * <p>
- *   There are many post-tokenization steps that can be done, including 
- *   (but not limited to):
- * </p>
+ *
+ * <p>Plain text passed to Lucene for indexing goes through a process generally called tokenization.
+ * Tokenization is the process of breaking input text into small indexing elements &ndash; tokens.
+ * The way input text is broken into tokens heavily influences how people will then be able to
+ * search for that text. For instance, sentences beginnings and endings can be identified to provide
+ * for more accurate phrase and proximity searches (though sentence identification is not provided
+ * by Lucene).
+ *
+ * <p>In some cases simply breaking the input text into tokens is not enough &ndash; a deeper
+ * <i>Analysis</i> may be needed. Lucene includes both pre- and post-tokenization analysis
+ * facilities.
+ *
+ * <p>Pre-tokenization analysis can include (but is not limited to) stripping HTML markup, and
+ * transforming or removing text matching arbitrary patterns or sets of fixed strings.
+ *
+ * <p>There are many post-tokenization steps that can be done, including (but not limited to):
+ *
  * <ul>
- *   <li><a href="http://en.wikipedia.org/wiki/Stemming">Stemming</a> &ndash; 
- *       Replacing words with their stems. 
- *       For instance with English stemming "bikes" is replaced with "bike"; 
- *       now query "bike" can find both documents containing "bike" and those containing "bikes".
- *   </li>
- *   <li><a href="http://en.wikipedia.org/wiki/Stop_words">Stop Words Filtering</a> &ndash; 
- *       Common words like "the", "and" and "a" rarely add any value to a search.
- *       Removing them shrinks the index size and increases performance.
- *       It may also reduce some "noise" and actually improve search quality.
- *   </li>
- *   <li><a href="http://en.wikipedia.org/wiki/Text_normalization">Text Normalization</a> &ndash; 
+ *   <li><a href="http://en.wikipedia.org/wiki/Stemming">Stemming</a> &ndash; Replacing words with
+ *       their stems. For instance with English stemming "bikes" is replaced with "bike"; now query
+ *       "bike" can find both documents containing "bike" and those containing "bikes".
+ *   <li><a href="http://en.wikipedia.org/wiki/Stop_words">Stop Words Filtering</a> &ndash; Common
+ *       words like "the", "and" and "a" rarely add any value to a search. Removing them shrinks the
+ *       index size and increases performance. It may also reduce some "noise" and actually improve
+ *       search quality.
+ *   <li><a href="http://en.wikipedia.org/wiki/Text_normalization">Text Normalization</a> &ndash;
  *       Stripping accents and other character markings can make for better searching.
- *   </li>
- *   <li><a href="http://en.wikipedia.org/wiki/Synonym">Synonym Expansion</a> &ndash; 
- *       Adding in synonyms at the same token position as the current word can mean better 
- *       matching when users search with words in the synonym set.
- *   </li>
+ *   <li><a href="http://en.wikipedia.org/wiki/Synonym">Synonym Expansion</a> &ndash; Adding in
+ *       synonyms at the same token position as the current word can mean better matching when users
+ *       search with words in the synonym set.
  * </ul>
+ *
  * <h2>Core Analysis</h2>
- * <p>
- *   The analysis package provides the mechanism to convert Strings and Readers
- *   into tokens that can be indexed by Lucene.  There are four main classes in 
- *   the package from which all analysis processes are derived.  These are:
- * </p>
+ *
+ * <p>The analysis package provides the mechanism to convert Strings and Readers into tokens that
+ * can be indexed by Lucene. There are four main classes in the package from which all analysis
+ * processes are derived. These are:
+ *
  * <ul>
- *   <li>
- *     {@link org.apache.lucene.analysis.Analyzer} &ndash; An <code>Analyzer</code> is 
- *     responsible for supplying a
- *     {@link org.apache.lucene.analysis.TokenStream} which can be consumed
- *     by the indexing and searching processes.  See below for more information
- *     on implementing your own {@link org.apache.lucene.analysis.Analyzer}. Most of the time, you can use
- *     an anonymous subclass of {@link org.apache.lucene.analysis.Analyzer}.
- *   </li>
- *   <li>
- *     {@link org.apache.lucene.analysis.CharFilter} &ndash; <code>CharFilter</code> extends
- *     {@link java.io.Reader} to transform the text before it is
- *     tokenized, while providing
- *     corrected character offsets to account for these modifications.  This
- *     capability allows highlighting to function over the original text when 
- *     indexed tokens are created from <code>CharFilter</code>-modified text with offsets
- *     that are not the same as those in the original text. {@link org.apache.lucene.analysis.Tokenizer#setReader(java.io.Reader)}
- *     accept <code>CharFilter</code>s.  <code>CharFilter</code>s may
- *     be chained to perform multiple pre-tokenization modifications.
- *   </li>
- *   <li>
- *     {@link org.apache.lucene.analysis.Tokenizer} &ndash; A <code>Tokenizer</code> is a 
- *     {@link org.apache.lucene.analysis.TokenStream} and is responsible for
- *     breaking up incoming text into tokens. In many cases, an {@link org.apache.lucene.analysis.Analyzer} will
- *     use a {@link org.apache.lucene.analysis.Tokenizer} as the first step in the analysis process.  However,
- *     to modify text prior to tokenization, use a {@link org.apache.lucene.analysis.CharFilter} subclass (see
- *     above).
- *   </li>
- *   <li>
- *     {@link org.apache.lucene.analysis.TokenFilter} &ndash; A <code>TokenFilter</code> is
- *     a {@link org.apache.lucene.analysis.TokenStream} and is responsible
- *     for modifying tokens that have been created by the <code>Tokenizer</code>. Common 
- *     modifications performed by a <code>TokenFilter</code> are: deletion, stemming, synonym 
- *     injection, and case folding.  Not all <code>Analyzer</code>s require <code>TokenFilter</code>s.
- *   </li>
+ *   <li>{@link org.apache.lucene.analysis.Analyzer} &ndash; An <code>Analyzer</code> is responsible
+ *       for supplying a {@link org.apache.lucene.analysis.TokenStream} which can be consumed by the
+ *       indexing and searching processes. See below for more information on implementing your own
+ *       {@link org.apache.lucene.analysis.Analyzer}. Most of the time, you can use an anonymous
+ *       subclass of {@link org.apache.lucene.analysis.Analyzer}.
+ *   <li>{@link org.apache.lucene.analysis.CharFilter} &ndash; <code>CharFilter</code> extends
+ *       {@link java.io.Reader} to transform the text before it is tokenized, while providing
+ *       corrected character offsets to account for these modifications. This capability allows
+ *       highlighting to function over the original text when indexed tokens are created from <code>
+ *       CharFilter</code>-modified text with offsets that are not the same as those in the original
+ *       text. {@link org.apache.lucene.analysis.Tokenizer#setReader(java.io.Reader)} accept <code>
+ *       CharFilter</code>s. <code>CharFilter</code>s may be chained to perform multiple
+ *       pre-tokenization modifications.
+ *   <li>{@link org.apache.lucene.analysis.Tokenizer} &ndash; A <code>Tokenizer</code> is a {@link
+ *       org.apache.lucene.analysis.TokenStream} and is responsible for breaking up incoming text
+ *       into tokens. In many cases, an {@link org.apache.lucene.analysis.Analyzer} will use a
+ *       {@link org.apache.lucene.analysis.Tokenizer} as the first step in the analysis process.
+ *       However, to modify text prior to tokenization, use a {@link
+ *       org.apache.lucene.analysis.CharFilter} subclass (see above).
+ *   <li>{@link org.apache.lucene.analysis.TokenFilter} &ndash; A <code>TokenFilter</code> is a
+ *       {@link org.apache.lucene.analysis.TokenStream} and is responsible for modifying tokens that
+ *       have been created by the <code>Tokenizer</code>. Common modifications performed by a <code>
+ *       TokenFilter</code> are: deletion, stemming, synonym injection, and case folding. Not all
+ *       <code>Analyzer</code>s require <code>TokenFilter</code>s.
  * </ul>
+ *
  * <h2>Hints, Tips and Traps</h2>
- * <p>
- *   The relationship between {@link org.apache.lucene.analysis.Analyzer} and 
- *   {@link org.apache.lucene.analysis.CharFilter}s,
- *   {@link org.apache.lucene.analysis.Tokenizer}s,
- *   and {@link org.apache.lucene.analysis.TokenFilter}s is sometimes confusing. To ease
- *   this confusion, here is some clarifications:
- * </p>
+ *
+ * <p>The relationship between {@link org.apache.lucene.analysis.Analyzer} and {@link
+ * org.apache.lucene.analysis.CharFilter}s, {@link org.apache.lucene.analysis.Tokenizer}s, and
+ * {@link org.apache.lucene.analysis.TokenFilter}s is sometimes confusing. To ease this confusion,
+ * here is some clarifications:
+ *
  * <ul>
- *   <li>
- *     The {@link org.apache.lucene.analysis.Analyzer} is a
- *     <strong>factory</strong> for analysis chains. <code>Analyzer</code>s don't
- *     process text, <code>Analyzer</code>s construct <code>CharFilter</code>s, <code>Tokenizer</code>s, and/or
- *     <code>TokenFilter</code>s that process text. An <code>Analyzer</code> has two tasks: 
- *     to produce {@link org.apache.lucene.analysis.TokenStream}s that accept a
- *     reader and produces tokens, and to wrap or otherwise
- *     pre-process {@link java.io.Reader} objects.
- *   </li>
- *   <li>
- *   The {@link org.apache.lucene.analysis.CharFilter} is a subclass of
- *  {@link java.io.Reader} that supports offset tracking.
- *   </li>
- *   <li>The{@link org.apache.lucene.analysis.Tokenizer}
- *     is only responsible for <u>breaking</u> the input text into tokens.
- *   </li>
- *   <li>The{@link org.apache.lucene.analysis.TokenFilter} modifies a
- *   stream of tokens and their contents.
- *   </li>
- *   <li>
- *     {@link org.apache.lucene.analysis.Tokenizer} is a {@link org.apache.lucene.analysis.TokenStream}, 
- *     but {@link org.apache.lucene.analysis.Analyzer} is not.
- *   </li>
- *   <li>
- *     {@link org.apache.lucene.analysis.Analyzer} is "field aware", but 
- *     {@link org.apache.lucene.analysis.Tokenizer} is not. {@link org.apache.lucene.analysis.Analyzer}s may
- *     take a field name into account when constructing the {@link org.apache.lucene.analysis.TokenStream}.
- *   </li>
+ *   <li>The {@link org.apache.lucene.analysis.Analyzer} is a <strong>factory</strong> for analysis
+ *       chains. <code>Analyzer</code>s don't process text, <code>Analyzer</code>s construct <code>
+ *       CharFilter</code>s, <code>Tokenizer</code>s, and/or <code>TokenFilter</code>s that process
+ *       text. An <code>Analyzer</code> has two tasks: to produce {@link
+ *       org.apache.lucene.analysis.TokenStream}s that accept a reader and produces tokens, and to
+ *       wrap or otherwise pre-process {@link java.io.Reader} objects.
+ *   <li>The {@link org.apache.lucene.analysis.CharFilter} is a subclass of {@link java.io.Reader}
+ *       that supports offset tracking.
+ *   <li>The{@link org.apache.lucene.analysis.Tokenizer} is only responsible for <u>breaking</u> the
+ *       input text into tokens.
+ *   <li>The{@link org.apache.lucene.analysis.TokenFilter} modifies a stream of tokens and their
+ *       contents.
+ *   <li>{@link org.apache.lucene.analysis.Tokenizer} is a {@link
+ *       org.apache.lucene.analysis.TokenStream}, but {@link org.apache.lucene.analysis.Analyzer} is
+ *       not.
+ *   <li>{@link org.apache.lucene.analysis.Analyzer} is "field aware", but {@link
+ *       org.apache.lucene.analysis.Tokenizer} is not. {@link org.apache.lucene.analysis.Analyzer}s
+ *       may take a field name into account when constructing the {@link
+ *       org.apache.lucene.analysis.TokenStream}.
  * </ul>
- * <p>
- *   If you want to use a particular combination of <code>CharFilter</code>s, a
- *   <code>Tokenizer</code>, and some <code>TokenFilter</code>s, the simplest thing is often an
- *   create an anonymous subclass of {@link org.apache.lucene.analysis.Analyzer}, provide {@link
- *   org.apache.lucene.analysis.Analyzer#createComponents(String)} and perhaps also
- *   {@link org.apache.lucene.analysis.Analyzer#initReader(String,
- *   java.io.Reader)}. However, if you need the same set of components
- *   over and over in many places, you can make a subclass of
- *   {@link org.apache.lucene.analysis.Analyzer}. In fact, Apache Lucene
- *   supplies a large family of <code>Analyzer</code> classes that deliver useful
- *   analysis chains. The most common of these is the <a href="{@docRoot}/org/apache/lucene/analysis/standard/StandardAnalyzer.html">StandardAnalyzer</a>.
- *   Many applications will have a long and industrious life with nothing more
- *   than the <code>StandardAnalyzer</code>. The <a href="{@docRoot}/../analysis/common/overview-summary.html">analysis-common</a>
- *   library provides many pre-existing analyzers for various languages.
- *   The analysis-common library also allows to configure a custom Analyzer without subclassing using the
- *   <a href="{@docRoot}/../analysis/common/org/apache/lucene/analysis/custom/CustomAnalyzer.html">CustomAnalyzer</a>
- *   class.
- * </p>
- * <p>
- *   Aside from the <code>StandardAnalyzer</code>,
- *   Lucene includes several components containing analysis components,
- *   all under the 'analysis' directory of the distribution. Some of
- *   these support particular languages, others integrate external
- *   components. The 'common' subdirectory has some noteworthy
- *  general-purpose analyzers, including the <a href="{@docRoot}/../analysis/common/org/apache/lucene/analysis/miscellaneous/PerFieldAnalyzerWrapper.html">PerFieldAnalyzerWrapper</a>. Most <code>Analyzer</code>s perform the same operation on all
- *  {@link org.apache.lucene.document.Field}s.  The PerFieldAnalyzerWrapper can be used to associate a different <code>Analyzer</code> with different
- *  {@link org.apache.lucene.document.Field}s. There is a great deal of
- *  functionality in the analysis area, you should study it carefully to
- *  find the pieces you need.
- * </p>
- * <p>
- *   Analysis is one of the main causes of slow indexing.  Simply put, the more you analyze the slower the indexing (in most cases).
- *   Perhaps your application would be just fine using the simple WhitespaceTokenizer combined with a StopFilter. The benchmark/ library can be useful 
- *   for testing out the speed of the analysis process.
- * </p>
+ *
+ * <p>If you want to use a particular combination of <code>CharFilter</code>s, a <code>Tokenizer
+ * </code>, and some <code>TokenFilter</code>s, the simplest thing is often an create an anonymous
+ * subclass of {@link org.apache.lucene.analysis.Analyzer}, provide {@link
+ * org.apache.lucene.analysis.Analyzer#createComponents(String)} and perhaps also {@link
+ * org.apache.lucene.analysis.Analyzer#initReader(String, java.io.Reader)}. However, if you need the
+ * same set of components over and over in many places, you can make a subclass of {@link
+ * org.apache.lucene.analysis.Analyzer}. In fact, Apache Lucene supplies a large family of <code>
+ * Analyzer</code> classes that deliver useful analysis chains. The most common of these is the <a
+ * href="{@docRoot}/org/apache/lucene/analysis/standard/StandardAnalyzer.html">StandardAnalyzer</a>.
+ * Many applications will have a long and industrious life with nothing more than the <code>
+ * StandardAnalyzer</code>. The <a
+ * href="{@docRoot}/../analysis/common/overview-summary.html">analysis-common</a> library provides
+ * many pre-existing analyzers for various languages. The analysis-common library also allows to
+ * configure a custom Analyzer without subclassing using the <a
+ * href="{@docRoot}/../analysis/common/org/apache/lucene/analysis/custom/CustomAnalyzer.html">CustomAnalyzer</a>
+ * class.
+ *
+ * <p>Aside from the <code>StandardAnalyzer</code>, Lucene includes several components containing
+ * analysis components, all under the 'analysis' directory of the distribution. Some of these
+ * support particular languages, others integrate external components. The 'common' subdirectory has
+ * some noteworthy general-purpose analyzers, including the <a
+ * href="{@docRoot}/../analysis/common/org/apache/lucene/analysis/miscellaneous/PerFieldAnalyzerWrapper.html">PerFieldAnalyzerWrapper</a>.
+ * Most <code>Analyzer</code>s perform the same operation on all {@link
+ * org.apache.lucene.document.Field}s. The PerFieldAnalyzerWrapper can be used to associate a
+ * different <code>Analyzer</code> with different {@link org.apache.lucene.document.Field}s. There
+ * is a great deal of functionality in the analysis area, you should study it carefully to find the
+ * pieces you need.
+ *
+ * <p>Analysis is one of the main causes of slow indexing. Simply put, the more you analyze the
+ * slower the indexing (in most cases). Perhaps your application would be just fine using the simple
+ * WhitespaceTokenizer combined with a StopFilter. The benchmark/ library can be useful for testing
+ * out the speed of the analysis process.
+ *
  * <h2>Invoking the Analyzer</h2>
- * <p>
- *   Applications usually do not invoke analysis &ndash; Lucene does it
- *  for them. Applications construct <code>Analyzer</code>s and pass then into Lucene,
- *  as follows:
- * </p>
+ *
+ * <p>Applications usually do not invoke analysis &ndash; Lucene does it for them. Applications
+ * construct <code>Analyzer</code>s and pass then into Lucene, as follows:
+ *
  * <ul>
- *   <li>
- *     At indexing, as a consequence of 
- *     {@link org.apache.lucene.index.IndexWriter#addDocument(Iterable) addDocument(doc)},
- *     the <code>Analyzer</code> in effect for indexing is invoked for each indexed field of the added document.
- *   </li>
- *   <li>
- *     At search, a <code>QueryParser</code> may invoke the Analyzer during parsing.  Note that for some queries, analysis does not
- *     take place, e.g. wildcard queries.
- *   </li>
+ *   <li>At indexing, as a consequence of {@link
+ *       org.apache.lucene.index.IndexWriter#addDocument(Iterable) addDocument(doc)}, the <code>
+ *       Analyzer</code> in effect for indexing is invoked for each indexed field of the added
+ *       document.
+ *   <li>At search, a <code>QueryParser</code> may invoke the Analyzer during parsing. Note that for
+ *       some queries, analysis does not take place, e.g. wildcard queries.
  * </ul>
- * <p>
- *   However an application might invoke Analysis of any text for testing or for any other purpose, something like:
- * </p>
- * <PRE class="prettyprint" id="analysis-workflow">
+ *
+ * <p>However an application might invoke Analysis of any text for testing or for any other purpose,
+ * something like:
+ *
+ * <pre class="prettyprint" id="analysis-workflow">
  *     Version matchVersion = Version.LUCENE_XY; // Substitute desired Lucene version for XY
  *     Analyzer analyzer = new StandardAnalyzer(matchVersion); // or any other analyzer
  *     TokenStream ts = analyzer.tokenStream("myfield", new StringReader("some text goes here"));
  *     // The Analyzer class will construct the Tokenizer, TokenFilter(s), and CharFilter(s),
  *     //   and pass the resulting Reader to the Tokenizer.
  *     OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
- *     
+ *
  *     try {
  *       ts.reset(); // Resets this stream to the beginning. (Required)
  *       while (ts.incrementToken()) {
  *         // Use {@link org.apache.lucene.util.AttributeSource#reflectAsString(boolean)}
  *         // for token stream debugging.
  *         System.out.println("token: " + ts.reflectAsString(true));
- * 
+ *
  *         System.out.println("token start offset: " + offsetAtt.startOffset());
  *         System.out.println("  token end offset: " + offsetAtt.endOffset());
  *       }
@@ -223,120 +200,117 @@
  *     } finally {
  *       ts.close(); // Release resources associated with this stream.
  *     }
- * </PRE>
+ * </pre>
+ *
  * <h2>Indexing Analysis vs. Search Analysis</h2>
- * <p>
- *   Selecting the "correct" analyzer is crucial
- *   for search quality, and can also affect indexing and search performance.
- *   The "correct" analyzer for your application will depend on what your input text
- *   looks like and what problem you are trying to solve.
- *   Lucene java's wiki page 
- *   <a href="http://wiki.apache.org/lucene-java/AnalysisParalysis">AnalysisParalysis</a> 
- *   provides some data on "analyzing your analyzer".
- *   Here are some rules of thumb:
- *   <ol>
- *     <li>Test test test... (did we say test?)</li>
- *     <li>Beware of too much analysis &ndash; it might hurt indexing performance.</li>
- *     <li>Start with the same analyzer for indexing and search, otherwise searches would not find what they are supposed to...</li>
- *     <li>In some cases a different analyzer is required for indexing and search, for instance:
- *         <ul>
- *            <li>Certain searches require more stop words to be filtered. (i.e. more than those that were filtered at indexing.)</li>
- *            <li>Query expansion by synonyms, acronyms, auto spell correction, etc.</li>
- *         </ul>
- *         This might sometimes require a modified analyzer &ndash; see the next section on how to do that.
- *     </li>
- *   </ol>
+ *
+ * <p>Selecting the "correct" analyzer is crucial for search quality, and can also affect indexing
+ * and search performance. The "correct" analyzer for your application will depend on what your
+ * input text looks like and what problem you are trying to solve. Lucene java's wiki page <a
+ * href="http://wiki.apache.org/lucene-java/AnalysisParalysis">AnalysisParalysis</a> provides some
+ * data on "analyzing your analyzer". Here are some rules of thumb:
+ *
+ * <ol>
+ *   <li>Test test test... (did we say test?)
+ *   <li>Beware of too much analysis &ndash; it might hurt indexing performance.
+ *   <li>Start with the same analyzer for indexing and search, otherwise searches would not find
+ *       what they are supposed to...
+ *   <li>In some cases a different analyzer is required for indexing and search, for instance:
+ *       <ul>
+ *         <li>Certain searches require more stop words to be filtered. (i.e. more than those that
+ *             were filtered at indexing.)
+ *         <li>Query expansion by synonyms, acronyms, auto spell correction, etc.
+ *       </ul>
+ *       This might sometimes require a modified analyzer &ndash; see the next section on how to do
+ *       that.
+ * </ol>
+ *
  * <h2>Implementing your own Analyzer and Analysis Components</h2>
- * <p>
- *   Creating your own Analyzer is straightforward. Your Analyzer should subclass {@link org.apache.lucene.analysis.Analyzer}. It can use
- *   existing analysis components &mdash; CharFilter(s) <i>(optional)</i>, a
- *   Tokenizer, and TokenFilter(s) <i>(optional)</i> &mdash; or components you
- *   create, or a combination of existing and newly created components.  Before
- *   pursuing this approach, you may find it worthwhile to explore the
- *   <a href="{@docRoot}/../analysis/common/overview-summary.html">analysis-common</a> library and/or ask on the
- *   <a href="http://lucene.apache.org/core/discussion.html">java-user@lucene.apache.org mailing list</a> first to see if what you
- *   need already exists. If you are still committed to creating your own
- *   Analyzer, have a look at the source code of any one of the many samples
- *   located in this package.
- * </p>
- * <p>
- *   The following sections discuss some aspects of implementing your own analyzer.
- * </p>
+ *
+ * <p>Creating your own Analyzer is straightforward. Your Analyzer should subclass {@link
+ * org.apache.lucene.analysis.Analyzer}. It can use existing analysis components &mdash;
+ * CharFilter(s) <i>(optional)</i>, a Tokenizer, and TokenFilter(s) <i>(optional)</i> &mdash; or
+ * components you create, or a combination of existing and newly created components. Before pursuing
+ * this approach, you may find it worthwhile to explore the <a
+ * href="{@docRoot}/../analysis/common/overview-summary.html">analysis-common</a> library and/or ask
+ * on the <a href="http://lucene.apache.org/core/discussion.html">java-user@lucene.apache.org
+ * mailing list</a> first to see if what you need already exists. If you are still committed to
+ * creating your own Analyzer, have a look at the source code of any one of the many samples located
+ * in this package.
+ *
+ * <p>The following sections discuss some aspects of implementing your own analyzer.
+ *
  * <h3>Field Section Boundaries</h3>
- * <p>
- *   When {@link org.apache.lucene.document.Document#add(org.apache.lucene.index.IndexableField) document.add(field)}
- *   is called multiple times for the same field name, we could say that each such call creates a new 
- *   section for that field in that document. 
- *   In fact, a separate call to 
- *   {@link org.apache.lucene.analysis.Analyzer#tokenStream(java.lang.String, java.io.Reader) tokenStream(field,reader)}
- *   would take place for each of these so called "sections".
- *   However, the default Analyzer behavior is to treat all these sections as one large section. 
- *   This allows phrase search and proximity search to seamlessly cross 
- *   boundaries between these "sections".
- *   In other words, if a certain field "f" is added like this:
- * </p>
+ *
+ * <p>When {@link org.apache.lucene.document.Document#add(org.apache.lucene.index.IndexableField)
+ * document.add(field)} is called multiple times for the same field name, we could say that each
+ * such call creates a new section for that field in that document. In fact, a separate call to
+ * {@link org.apache.lucene.analysis.Analyzer#tokenStream(java.lang.String, java.io.Reader)
+ * tokenStream(field,reader)} would take place for each of these so called "sections". However, the
+ * default Analyzer behavior is to treat all these sections as one large section. This allows phrase
+ * search and proximity search to seamlessly cross boundaries between these "sections". In other
+ * words, if a certain field "f" is added like this:
+ *
  * <PRE class="prettyprint">
  *     document.add(new Field("f","first ends",...);
  *     document.add(new Field("f","starts two",...);
  *     indexWriter.addDocument(document);
  * </PRE>
- * <p>
- *   Then, a phrase search for "ends starts" would find that document.
- *   Where desired, this behavior can be modified by introducing a "position gap" between consecutive field "sections", 
- *   simply by overriding 
- *   {@link org.apache.lucene.analysis.Analyzer#getPositionIncrementGap(java.lang.String) Analyzer.getPositionIncrementGap(fieldName)}:
- * </p>
- * <PRE class="prettyprint">
+ *
+ * <p>Then, a phrase search for "ends starts" would find that document. Where desired, this behavior
+ * can be modified by introducing a "position gap" between consecutive field "sections", simply by
+ * overriding {@link org.apache.lucene.analysis.Analyzer#getPositionIncrementGap(java.lang.String)
+ * Analyzer.getPositionIncrementGap(fieldName)}:
+ *
+ * <pre class="prettyprint">
  *   Version matchVersion = Version.LUCENE_XY; // Substitute desired Lucene version for XY
  *   Analyzer myAnalyzer = new StandardAnalyzer(matchVersion) {
  *     public int getPositionIncrementGap(String fieldName) {
  *       return 10;
  *     }
  *   };
- * </PRE>
+ * </pre>
+ *
  * <h3>End of Input Cleanup</h3>
- * <p>
- *    At the ends of each field, Lucene will call the {@link org.apache.lucene.analysis.TokenStream#end()}.
- *    The components of the token stream (the tokenizer and the token filters) <strong>must</strong>
- *    put accurate values into the token attributes to reflect the situation at the end of the field.
- *    The Offset attribute must contain the final offset (the total number of characters processed)
- *    in both start and end. Attributes like PositionLength must be correct. 
- * </p>
- * <p>
- *    The base method{@link org.apache.lucene.analysis.TokenStream#end()} sets PositionIncrement to 0, which is required.
- *    Other components must override this method to fix up the other attributes.
- * </p>
+ *
+ * <p>At the ends of each field, Lucene will call the {@link
+ * org.apache.lucene.analysis.TokenStream#end()}. The components of the token stream (the tokenizer
+ * and the token filters) <strong>must</strong> put accurate values into the token attributes to
+ * reflect the situation at the end of the field. The Offset attribute must contain the final offset
+ * (the total number of characters processed) in both start and end. Attributes like PositionLength
+ * must be correct.
+ *
+ * <p>The base method{@link org.apache.lucene.analysis.TokenStream#end()} sets PositionIncrement to
+ * 0, which is required. Other components must override this method to fix up the other attributes.
+ *
  * <h3>Token Position Increments</h3>
- * <p>
- *    By default, TokenStream arranges for the 
- *    {@link org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute#getPositionIncrement() position increment} of all tokens to be one.
- *    This means that the position stored for that token in the index would be one more than
- *    that of the previous token.
- *    Recall that phrase and proximity searches rely on position info.
- * </p>
- * <p>
- *    If the selected analyzer filters the stop words "is" and "the", then for a document 
- *    containing the string "blue is the sky", only the tokens "blue", "sky" are indexed, 
- *    with position("sky") = 3 + position("blue"). Now, a phrase query "blue is the sky"
- *    would find that document, because the same analyzer filters the same stop words from
- *    that query. But the phrase query "blue sky" would not find that document because the
- *    position increment between "blue" and "sky" is only 1.
- * </p>
- * <p>   
- *    If this behavior does not fit the application needs, the query parser needs to be
- *    configured to not take position increments into account when generating phrase queries.
- * </p>
- * <p>
- *   Note that a filter that filters <strong>out</strong> tokens <strong>must</strong> increment the position increment in order not to generate corrupt
- *   tokenstream graphs. Here is the logic used by StopFilter to increment positions when filtering out tokens:
- * </p>
- * <PRE class="prettyprint">
+ *
+ * <p>By default, TokenStream arranges for the {@link
+ * org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute#getPositionIncrement()
+ * position increment} of all tokens to be one. This means that the position stored for that token
+ * in the index would be one more than that of the previous token. Recall that phrase and proximity
+ * searches rely on position info.
+ *
+ * <p>If the selected analyzer filters the stop words "is" and "the", then for a document containing
+ * the string "blue is the sky", only the tokens "blue", "sky" are indexed, with position("sky") = 3
+ * + position("blue"). Now, a phrase query "blue is the sky" would find that document, because the
+ * same analyzer filters the same stop words from that query. But the phrase query "blue sky" would
+ * not find that document because the position increment between "blue" and "sky" is only 1.
+ *
+ * <p>If this behavior does not fit the application needs, the query parser needs to be configured
+ * to not take position increments into account when generating phrase queries.
+ *
+ * <p>Note that a filter that filters <strong>out</strong> tokens <strong>must</strong> increment
+ * the position increment in order not to generate corrupt tokenstream graphs. Here is the logic
+ * used by StopFilter to increment positions when filtering out tokens:
+ *
+ * <pre class="prettyprint">
  *   public TokenStream tokenStream(final String fieldName, Reader reader) {
  *     final TokenStream ts = someAnalyzer.tokenStream(fieldName, reader);
  *     TokenStream res = new TokenStream() {
  *       CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
  *       PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
- * 
+ *
  *       public boolean incrementToken() throws IOException {
  *         int extraIncrement = 0;
  *         while (true) {
@@ -345,7 +319,7 @@
  *             if (stopWords.contains(termAtt.toString())) {
  *               extraIncrement += posIncrAtt.getPositionIncrement(); // filter this word
  *               continue;
- *             } 
+ *             }
  *             if (extraIncrement &gt; 0) {
  *               posIncrAtt.setPositionIncrement(posIncrAtt.getPositionIncrement()+extraIncrement);
  *             }
@@ -356,134 +330,132 @@
  *     };
  *     return res;
  *   }
- * </PRE>
- * <p>
- *    A few more use cases for modifying position increments are:
- * </p>
+ * </pre>
+ *
+ * <p>A few more use cases for modifying position increments are:
+ *
  * <ol>
- *   <li>Inhibiting phrase and proximity matches in sentence boundaries &ndash; for this, a tokenizer that 
- *       identifies a new sentence can add 1 to the position increment of the first token of the new sentence.</li>
- *   <li>Injecting synonyms &ndash; synonyms of a token should be created at the same position as the
- *       original token, and the output order of the original token and the injected synonym is undefined
- *       as long as they both leave from the same position.  As result, all synonyms of a token would be
- *       considered to appear in exactly the same position as that token, and so would they be seen by
- *       phrase and proximity searches.  For multi-token synonyms to work correctly, you should use
- *       {@code SynoymGraphFilter} at search time only.</li>
+ *   <li>Inhibiting phrase and proximity matches in sentence boundaries &ndash; for this, a
+ *       tokenizer that identifies a new sentence can add 1 to the position increment of the first
+ *       token of the new sentence.
+ *   <li>Injecting synonyms &ndash; synonyms of a token should be created at the same position as
+ *       the original token, and the output order of the original token and the injected synonym is
+ *       undefined as long as they both leave from the same position. As result, all synonyms of a
+ *       token would be considered to appear in exactly the same position as that token, and so
+ *       would they be seen by phrase and proximity searches. For multi-token synonyms to work
+ *       correctly, you should use {@code SynoymGraphFilter} at search time only.
  * </ol>
- * 
+ *
  * <h3>Token Position Length</h3>
- * <p>
- *    By default, all tokens created by Analyzers and Tokenizers have a
- *    {@link org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute#getPositionLength() position length} of one.
- *    This means that the token occupies a single position. This attribute is not indexed
- *    and thus not taken into account for positional queries, but is used by eg. suggesters.
- * </p>
- * <p>
- *    The main use case for positions lengths is multi-word synonyms. With single-word
- *    synonyms, setting the position increment to 0 is enough to denote the fact that two
- *    words are synonyms, for example:
- * </p>
+ *
+ * <p>By default, all tokens created by Analyzers and Tokenizers have a {@link
+ * org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute#getPositionLength() position
+ * length} of one. This means that the token occupies a single position. This attribute is not
+ * indexed and thus not taken into account for positional queries, but is used by eg. suggesters.
+ *
+ * <p>The main use case for positions lengths is multi-word synonyms. With single-word synonyms,
+ * setting the position increment to 0 is enough to denote the fact that two words are synonyms, for
+ * example:
+ *
  * <table>
  * <caption>table showing position increments of 1 and 0 for red and magenta, respectively</caption>
  * <tr><td>Term</td><td>red</td><td>magenta</td></tr>
  * <tr><td>Position increment</td><td>1</td><td>0</td></tr>
  * </table>
- * <p>
- *    Given that position(magenta) = 0 + position(red), they are at the same position, so anything
- *    working with analyzers will return the exact same result if you replace "magenta" with "red"
- *    in the input. However, multi-word synonyms are more tricky. Let's say that you want to build
- *    a TokenStream where "IBM" is a synonym of "Internal Business Machines". Position increments
- *    are not enough anymore:
- * </p>
+ *
+ * <p>Given that position(magenta) = 0 + position(red), they are at the same position, so anything
+ * working with analyzers will return the exact same result if you replace "magenta" with "red" in
+ * the input. However, multi-word synonyms are more tricky. Let's say that you want to build a
+ * TokenStream where "IBM" is a synonym of "Internal Business Machines". Position increments are not
+ * enough anymore:
+ *
  * <table>
  * <caption>position increments where international is zero</caption>
  * <tr><td>Term</td><td>IBM</td><td>International</td><td>Business</td><td>Machines</td></tr>
  * <tr><td>Position increment</td><td>1</td><td>0</td><td>1</td><td>1</td></tr>
  * </table>
- * <p>
- *    The problem with this token stream is that "IBM" is at the same position as "International"
- *    although it is a synonym with "International Business Machines" as a whole. Setting
- *    the position increment of "Business" and "Machines" to 0 wouldn't help as it would mean
- *    than "International" is a synonym of "Business". The only way to solve this issue is to
- *    make "IBM" span across 3 positions, this is where position lengths come to rescue.
- * </p>
+ *
+ * <p>The problem with this token stream is that "IBM" is at the same position as "International"
+ * although it is a synonym with "International Business Machines" as a whole. Setting the position
+ * increment of "Business" and "Machines" to 0 wouldn't help as it would mean than "International"
+ * is a synonym of "Business". The only way to solve this issue is to make "IBM" span across 3
+ * positions, this is where position lengths come to rescue.
+ *
  * <table>
  * <caption>position lengths where IBM is three</caption>
  * <tr><td>Term</td><td>IBM</td><td>International</td><td>Business</td><td>Machines</td></tr>
  * <tr><td>Position increment</td><td>1</td><td>0</td><td>1</td><td>1</td></tr>
  * <tr><td>Position length</td><td>3</td><td>1</td><td>1</td><td>1</td></tr>
  * </table>
- * <p>
- *    This new attribute makes clear that "IBM" and "International Business Machines" start and end
- *    at the same positions.
- * </p>
- * <a id="corrupt"></a>
+ *
+ * <p>This new attribute makes clear that "IBM" and "International Business Machines" start and end
+ * at the same positions. <a id="corrupt"></a>
+ *
  * <h3>How to not write corrupt token streams</h3>
- * <p>
- *    There are a few rules to observe when writing custom Tokenizers and TokenFilters:
- * </p>
+ *
+ * <p>There are a few rules to observe when writing custom Tokenizers and TokenFilters:
+ *
  * <ul>
- *   <li>The first position increment must be &gt; 0.</li>
- *   <li>Positions must not go backward.</li>
- *   <li>Tokens that have the same start position must have the same start offset.</li>
- *   <li>Tokens that have the same end position (taking into account the
- *   position length) must have the same end offset.</li>
- *   <li>Tokenizers must call {@link
- *   org.apache.lucene.util.AttributeSource#clearAttributes()} in
- *   incrementToken().</li>
- *   <li>Tokenizers must override {@link
- *   org.apache.lucene.analysis.TokenStream#end()}, and pass the final
- *   offset (the total number of input characters processed) to both
- *   parameters of {@link org.apache.lucene.analysis.tokenattributes.OffsetAttribute#setOffset(int, int)}.</li>
+ *   <li>The first position increment must be &gt; 0.
+ *   <li>Positions must not go backward.
+ *   <li>Tokens that have the same start position must have the same start offset.
+ *   <li>Tokens that have the same end position (taking into account the position length) must have
+ *       the same end offset.
+ *   <li>Tokenizers must call {@link org.apache.lucene.util.AttributeSource#clearAttributes()} in
+ *       incrementToken().
+ *   <li>Tokenizers must override {@link org.apache.lucene.analysis.TokenStream#end()}, and pass the
+ *       final offset (the total number of input characters processed) to both parameters of {@link
+ *       org.apache.lucene.analysis.tokenattributes.OffsetAttribute#setOffset(int, int)}.
  * </ul>
- * <p>
- *    Although these rules might seem easy to follow, problems can quickly happen when chaining
- *    badly implemented filters that play with positions and offsets, such as synonym or n-grams
- *    filters. Here are good practices for writing correct filters:
- * </p>
+ *
+ * <p>Although these rules might seem easy to follow, problems can quickly happen when chaining
+ * badly implemented filters that play with positions and offsets, such as synonym or n-grams
+ * filters. Here are good practices for writing correct filters:
+ *
  * <ul>
- *   <li>Token filters should not modify offsets. If you feel that your filter would need to modify offsets, then it should probably be implemented as a tokenizer.</li>
- *   <li>Token filters should not insert positions. If a filter needs to add tokens, then they should all have a position increment of 0.</li>
- *   <li>When they add tokens, token filters should call {@link org.apache.lucene.util.AttributeSource#clearAttributes()} first.</li>
- *   <li>When they remove tokens, token filters should increment the position increment of the following token.</li>
- *   <li>Token filters should preserve position lengths.</li>
+ *   <li>Token filters should not modify offsets. If you feel that your filter would need to modify
+ *       offsets, then it should probably be implemented as a tokenizer.
+ *   <li>Token filters should not insert positions. If a filter needs to add tokens, then they
+ *       should all have a position increment of 0.
+ *   <li>When they add tokens, token filters should call {@link
+ *       org.apache.lucene.util.AttributeSource#clearAttributes()} first.
+ *   <li>When they remove tokens, token filters should increment the position increment of the
+ *       following token.
+ *   <li>Token filters should preserve position lengths.
  * </ul>
+ *
  * <h2>TokenStream API</h2>
- * <p>
- *   "Flexible Indexing" summarizes the effort of making the Lucene indexer
- *   pluggable and extensible for custom index formats.  A fully customizable
- *   indexer means that users will be able to store custom data structures on
- *   disk. Therefore the analysis API must transport custom types of
- *   data from the documents to the indexer. (It also supports communications
- *   amongst the analysis components.)
- * </p>
+ *
+ * <p>"Flexible Indexing" summarizes the effort of making the Lucene indexer pluggable and
+ * extensible for custom index formats. A fully customizable indexer means that users will be able
+ * to store custom data structures on disk. Therefore the analysis API must transport custom types
+ * of data from the documents to the indexer. (It also supports communications amongst the analysis
+ * components.)
+ *
  * <h3>Attribute and AttributeSource</h3>
- * <p>
- *   Classes {@link org.apache.lucene.util.Attribute} and 
- *   {@link org.apache.lucene.util.AttributeSource} serve as the basis upon which 
- *   the analysis elements of "Flexible Indexing" are implemented. An Attribute 
- *   holds a particular piece of information about a text token. For example, 
- *   {@link org.apache.lucene.analysis.tokenattributes.CharTermAttribute} 
- *   contains the term text of a token, and 
- *   {@link org.apache.lucene.analysis.tokenattributes.OffsetAttribute} contains
- *   the start and end character offsets of a token. An AttributeSource is a 
- *   collection of Attributes with a restriction: there may be only one instance
- *   of each attribute type. TokenStream now extends AttributeSource, which means
- *   that one can add Attributes to a TokenStream. Since TokenFilter extends
- *   TokenStream, all filters are also AttributeSources.
- * </p>
- * <p>
- * Lucene provides seven Attributes out of the box:
- * </p>
+ *
+ * <p>Classes {@link org.apache.lucene.util.Attribute} and {@link
+ * org.apache.lucene.util.AttributeSource} serve as the basis upon which the analysis elements of
+ * "Flexible Indexing" are implemented. An Attribute holds a particular piece of information about a
+ * text token. For example, {@link org.apache.lucene.analysis.tokenattributes.CharTermAttribute}
+ * contains the term text of a token, and {@link
+ * org.apache.lucene.analysis.tokenattributes.OffsetAttribute} contains the start and end character
+ * offsets of a token. An AttributeSource is a collection of Attributes with a restriction: there
+ * may be only one instance of each attribute type. TokenStream now extends AttributeSource, which
+ * means that one can add Attributes to a TokenStream. Since TokenFilter extends TokenStream, all
+ * filters are also AttributeSources.
+ *
+ * <p>Lucene provides seven Attributes out of the box:
+ *
  * <table class="padding3">
  *   <caption>common bundled attributes</caption>
  *   <tbody style="border: 1px solid">
  *   <tr>
  *     <td>{@link org.apache.lucene.analysis.tokenattributes.CharTermAttribute}</td>
  *     <td>
- *       The term text of a token.  Implements {@link java.lang.CharSequence} 
+ *       The term text of a token.  Implements {@link java.lang.CharSequence}
  *       (providing methods length() and charAt(), and allowing e.g. for direct
- *       use with regular expression {@link java.util.regex.Matcher}s) and 
+ *       use with regular expression {@link java.util.regex.Matcher}s) and
  *       {@link java.lang.Appendable} (allowing the term text to be appended to.)
  *     </td>
  *   </tr>
@@ -515,54 +487,57 @@
  *     <td>{@link org.apache.lucene.analysis.tokenattributes.KeywordAttribute}</td>
  *     <td>
  *       Keyword-aware TokenStreams/-Filters skip modification of tokens that
- *       return true from this attribute's isKeyword() method. 
+ *       return true from this attribute's isKeyword() method.
  *     </td>
  *   </tr>
  *   </tbody>
  * </table>
+ *
  * <h3>More Requirements for Analysis Component Classes</h3>
- * Due to the historical development of the API, there are some perhaps
- * less than obvious requirements to implement analysis components
- * classes.
+ *
+ * Due to the historical development of the API, there are some perhaps less than obvious
+ * requirements to implement analysis components classes.
+ *
  * <h4 id="analysis-lifetime">Token Stream Lifetime</h4>
- * The code fragment of the <a href="#analysis-workflow">analysis workflow
- * protocol</a> above shows a token stream being obtained, used, and then
- * left for garbage. However, that does not mean that the components of
- * that token stream will, in fact, be discarded. The default is just the
- * opposite. {@link org.apache.lucene.analysis.Analyzer} applies a reuse
- * strategy to the tokenizer and the token filters. It will reuse
- * them. For each new input, it calls {@link org.apache.lucene.analysis.Tokenizer#setReader(java.io.Reader)} 
- * to set the input. Your components must be prepared for this scenario,
- * as described below.
+ *
+ * The code fragment of the <a href="#analysis-workflow">analysis workflow protocol</a> above shows
+ * a token stream being obtained, used, and then left for garbage. However, that does not mean that
+ * the components of that token stream will, in fact, be discarded. The default is just the
+ * opposite. {@link org.apache.lucene.analysis.Analyzer} applies a reuse strategy to the tokenizer
+ * and the token filters. It will reuse them. For each new input, it calls {@link
+ * org.apache.lucene.analysis.Tokenizer#setReader(java.io.Reader)} to set the input. Your components
+ * must be prepared for this scenario, as described below.
+ *
  * <h4>Tokenizer</h4>
+ *
  * <ul>
- *   <li>
- *   You should create your tokenizer class by extending {@link org.apache.lucene.analysis.Tokenizer}.
- *   </li>
- *   <li>
- *   Your tokenizer <strong>must</strong> override {@link org.apache.lucene.analysis.TokenStream#end()}.
- *   Your implementation <strong>must</strong> call
- *   <code>super.end()</code>. It must set a correct final offset into
- *   the offset attribute, and finish up and other attributes to reflect
- *   the end of the stream.
- *   </li>
- *   <li>
- *   If your tokenizer overrides {@link org.apache.lucene.analysis.TokenStream#reset()}
- *   or {@link org.apache.lucene.analysis.TokenStream#close()}, it
- *   <strong>must</strong> call the corresponding superclass method.
- *   </li>
+ *   <li>You should create your tokenizer class by extending {@link
+ *       org.apache.lucene.analysis.Tokenizer}.
+ *   <li>Your tokenizer <strong>must</strong> override {@link
+ *       org.apache.lucene.analysis.TokenStream#end()}. Your implementation <strong>must</strong>
+ *       call <code>super.end()</code>. It must set a correct final offset into the offset
+ *       attribute, and finish up and other attributes to reflect the end of the stream.
+ *   <li>If your tokenizer overrides {@link org.apache.lucene.analysis.TokenStream#reset()} or
+ *       {@link org.apache.lucene.analysis.TokenStream#close()}, it <strong>must</strong> call the
+ *       corresponding superclass method.
  * </ul>
+ *
  * <h4>Token Filter</h4>
- *   You should create your token filter class by extending {@link org.apache.lucene.analysis.TokenFilter}.
- *   If your token filter overrides {@link org.apache.lucene.analysis.TokenStream#reset()},
- *   {@link org.apache.lucene.analysis.TokenStream#end()}
- *   or {@link org.apache.lucene.analysis.TokenStream#close()}, it
- *   <strong>must</strong> call the corresponding superclass method.
+ *
+ * You should create your token filter class by extending {@link
+ * org.apache.lucene.analysis.TokenFilter}. If your token filter overrides {@link
+ * org.apache.lucene.analysis.TokenStream#reset()}, {@link
+ * org.apache.lucene.analysis.TokenStream#end()} or {@link
+ * org.apache.lucene.analysis.TokenStream#close()}, it <strong>must</strong> call the corresponding
+ * superclass method.
+ *
  * <h4>Creating delegates</h4>
- *   Forwarding classes (those which extend {@link org.apache.lucene.analysis.Tokenizer} but delegate
- *   selected logic to another tokenizer) must also set the reader to the delegate in the overridden
- *   {@link org.apache.lucene.analysis.Tokenizer#reset()} method, e.g.:
- *   <pre class="prettyprint">
+ *
+ * Forwarding classes (those which extend {@link org.apache.lucene.analysis.Tokenizer} but delegate
+ * selected logic to another tokenizer) must also set the reader to the delegate in the overridden
+ * {@link org.apache.lucene.analysis.Tokenizer#reset()} method, e.g.:
+ *
+ * <pre class="prettyprint">
  *     public class ForwardingTokenizer extends Tokenizer {
  *        private Tokenizer delegate;
  *        ...
@@ -573,90 +548,94 @@
  *           delegate.reset();
  *        }
  *     }
- *   </pre>
+ * </pre>
+ *
  * <h3>Testing Your Analysis Component</h3>
- * <p>
- *     The lucene-test-framework component defines
- *     <a href="{@docRoot}/../test-framework/org/apache/lucene/analysis/BaseTokenStreamTestCase.html">BaseTokenStreamTestCase</a>. By extending
- *     this class, you can create JUnit tests that validate that your
- *     Analyzer and/or analysis components correctly implement the
- *     protocol. The checkRandomData methods of that class are particularly effective in flushing out errors.
- * </p>
+ *
+ * <p>The lucene-test-framework component defines <a
+ * href="{@docRoot}/../test-framework/org/apache/lucene/analysis/BaseTokenStreamTestCase.html">BaseTokenStreamTestCase</a>.
+ * By extending this class, you can create JUnit tests that validate that your Analyzer and/or
+ * analysis components correctly implement the protocol. The checkRandomData methods of that class
+ * are particularly effective in flushing out errors.
+ *
  * <h3>Using the TokenStream API</h3>
- * There are a few important things to know in order to use the new API efficiently which are summarized here. You may want
- * to walk through the example below first and come back to this section afterwards.
- * <ol><li>
- * Please keep in mind that an AttributeSource can only have one instance of a particular Attribute. Furthermore, if 
- * a chain of a TokenStream and multiple TokenFilters is used, then all TokenFilters in that chain share the Attributes
- * with the TokenStream.
- * </li>
- * <li>
- * Attribute instances are reused for all tokens of a document. Thus, a TokenStream/-Filter needs to update
- * the appropriate Attribute(s) in incrementToken(). The consumer, commonly the Lucene indexer, consumes the data in the
- * Attributes and then calls incrementToken() again until it returns false, which indicates that the end of the stream
- * was reached. This means that in each call of incrementToken() a TokenStream/-Filter can safely overwrite the data in
- * the Attribute instances.
- * </li>
- * <li>
- * For performance reasons a TokenStream/-Filter should add/get Attributes during instantiation; i.e., create an attribute in the
- * constructor and store references to it in an instance variable.  Using an instance variable instead of calling addAttribute()/getAttribute() 
- * in incrementToken() will avoid attribute lookups for every token in the document.
- * </li>
- * <li>
- * All methods in AttributeSource are idempotent, which means calling them multiple times always yields the same
- * result. This is especially important to know for addAttribute(). The method takes the <b>type</b> (<code>Class</code>)
- * of an Attribute as an argument and returns an <b>instance</b>. If an Attribute of the same type was previously added, then
- * the already existing instance is returned, otherwise a new instance is created and returned. Therefore TokenStreams/-Filters
- * can safely call addAttribute() with the same Attribute type multiple times. Even consumers of TokenStreams should
- * normally call addAttribute() instead of getAttribute(), because it would not fail if the TokenStream does not have this
- * Attribute (getAttribute() would throw an IllegalArgumentException, if the Attribute is missing). More advanced code
- * could simply check with hasAttribute(), if a TokenStream has it, and may conditionally leave out processing for
- * extra performance.
- * </li></ol>
+ *
+ * There are a few important things to know in order to use the new API efficiently which are
+ * summarized here. You may want to walk through the example below first and come back to this
+ * section afterwards.
+ *
+ * <ol>
+ *   <li>Please keep in mind that an AttributeSource can only have one instance of a particular
+ *       Attribute. Furthermore, if a chain of a TokenStream and multiple TokenFilters is used, then
+ *       all TokenFilters in that chain share the Attributes with the TokenStream.
+ *   <li>Attribute instances are reused for all tokens of a document. Thus, a TokenStream/-Filter
+ *       needs to update the appropriate Attribute(s) in incrementToken(). The consumer, commonly
+ *       the Lucene indexer, consumes the data in the Attributes and then calls incrementToken()
+ *       again until it returns false, which indicates that the end of the stream was reached. This
+ *       means that in each call of incrementToken() a TokenStream/-Filter can safely overwrite the
+ *       data in the Attribute instances.
+ *   <li>For performance reasons a TokenStream/-Filter should add/get Attributes during
+ *       instantiation; i.e., create an attribute in the constructor and store references to it in
+ *       an instance variable. Using an instance variable instead of calling
+ *       addAttribute()/getAttribute() in incrementToken() will avoid attribute lookups for every
+ *       token in the document.
+ *   <li>All methods in AttributeSource are idempotent, which means calling them multiple times
+ *       always yields the same result. This is especially important to know for addAttribute(). The
+ *       method takes the <b>type</b> (<code>Class</code>) of an Attribute as an argument and
+ *       returns an <b>instance</b>. If an Attribute of the same type was previously added, then the
+ *       already existing instance is returned, otherwise a new instance is created and returned.
+ *       Therefore TokenStreams/-Filters can safely call addAttribute() with the same Attribute type
+ *       multiple times. Even consumers of TokenStreams should normally call addAttribute() instead
+ *       of getAttribute(), because it would not fail if the TokenStream does not have this
+ *       Attribute (getAttribute() would throw an IllegalArgumentException, if the Attribute is
+ *       missing). More advanced code could simply check with hasAttribute(), if a TokenStream has
+ *       it, and may conditionally leave out processing for extra performance.
+ * </ol>
+ *
  * <h3>Example</h3>
- * <p>
- *   In this example we will create a WhiteSpaceTokenizer and use a LengthFilter to suppress all words that have
- *   only two or fewer characters. The LengthFilter is part of the Lucene core and its implementation will be explained
- *   here to illustrate the usage of the TokenStream API.
- * </p>
- * <p>
- *   Then we will develop a custom Attribute, a PartOfSpeechAttribute, and add another filter to the chain which
- *   utilizes the new custom attribute, and call it PartOfSpeechTaggingFilter.
- * </p>
+ *
+ * <p>In this example we will create a WhiteSpaceTokenizer and use a LengthFilter to suppress all
+ * words that have only two or fewer characters. The LengthFilter is part of the Lucene core and its
+ * implementation will be explained here to illustrate the usage of the TokenStream API.
+ *
+ * <p>Then we will develop a custom Attribute, a PartOfSpeechAttribute, and add another filter to
+ * the chain which utilizes the new custom attribute, and call it PartOfSpeechTaggingFilter.
+ *
  * <h4>Whitespace tokenization</h4>
+ *
  * <pre class="prettyprint">
  * public class MyAnalyzer extends Analyzer {
- * 
+ *
  *   private Version matchVersion;
- *   
+ *
  *   public MyAnalyzer(Version matchVersion) {
  *     this.matchVersion = matchVersion;
  *   }
- * 
+ *
  *   {@literal @Override}
  *   protected TokenStreamComponents createComponents(String fieldName) {
  *     return new TokenStreamComponents(new WhitespaceTokenizer(matchVersion));
  *   }
- *   
+ *
  *   public static void main(String[] args) throws IOException {
  *     // text to tokenize
  *     final String text = "This is a demo of the TokenStream API";
- *     
+ *
  *     Version matchVersion = Version.LUCENE_XY; // Substitute desired Lucene version for XY
  *     MyAnalyzer analyzer = new MyAnalyzer(matchVersion);
  *     TokenStream stream = analyzer.tokenStream("field", new StringReader(text));
- *     
+ *
  *     // get the CharTermAttribute from the TokenStream
  *     CharTermAttribute termAtt = stream.addAttribute(CharTermAttribute.class);
- * 
+ *
  *     try {
  *       stream.reset();
- *     
+ *
  *       // print all tokens until stream is exhausted
  *       while (stream.incrementToken()) {
  *         System.out.println(termAtt.toString());
  *       }
- *     
+ *
  *       stream.end();
  *     } finally {
  *       stream.close();
@@ -664,9 +643,11 @@
  *   }
  * }
  * </pre>
- * In this easy example a simple white space tokenization is performed. In main() a loop consumes the stream and
- * prints the term text of the tokens by accessing the CharTermAttribute that the WhitespaceTokenizer provides. 
- * Here is the output:
+ *
+ * In this easy example a simple white space tokenization is performed. In main() a loop consumes
+ * the stream and prints the term text of the tokens by accessing the CharTermAttribute that the
+ * WhitespaceTokenizer provides. Here is the output:
+ *
  * <pre>
  * This
  * is
@@ -678,10 +659,13 @@
  * TokenStream
  * API
  * </pre>
+ *
  * <h4>Adding a LengthFilter</h4>
- * We want to suppress all tokens that have 2 or less characters. We can do that
- * easily by adding a LengthFilter to the chain. Only the
- * <code>createComponents()</code> method in our analyzer needs to be changed:
+ *
+ * We want to suppress all tokens that have 2 or less characters. We can do that easily by adding a
+ * LengthFilter to the chain. Only the <code>createComponents()</code> method in our analyzer needs
+ * to be changed:
+ *
  * <pre class="prettyprint">
  *   {@literal @Override}
  *   protected TokenStreamComponents createComponents(String fieldName) {
@@ -690,7 +674,9 @@
  *     return new TokenStreamComponents(source, result);
  *   }
  * </pre>
+ *
  * Note how now only words with 3 or more characters are contained in the output:
+ *
  * <pre>
  * This
  * demo
@@ -699,15 +685,17 @@
  * TokenStream
  * API
  * </pre>
+ *
  * Now let's take a look how the LengthFilter is implemented:
+ *
  * <pre class="prettyprint">
  * public final class LengthFilter extends FilteringTokenFilter {
- * 
+ *
  *   private final int min;
  *   private final int max;
- *   
+ *
  *   private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
- * 
+ *
  *   &#47;**
  *    * Create a new LengthFilter. This will filter out tokens whose
  *    * CharTermAttribute is either too short
@@ -722,41 +710,35 @@
  *     this.min = min;
  *     this.max = max;
  *   }
- * 
+ *
  *   {@literal @Override}
  *   public boolean accept() {
  *     final int len = termAtt.length();
  *     return (len &gt;= min &amp;&amp; len &lt;= max);
  *   }
- * 
+ *
  * }
  * </pre>
- * <p>
- *   In LengthFilter, the CharTermAttribute is added and stored in the instance
- *   variable <code>termAtt</code>.  Remember that there can only be a single
- *   instance of CharTermAttribute in the chain, so in our example the
- *   <code>addAttribute()</code> call in LengthFilter returns the
- *   CharTermAttribute that the WhitespaceTokenizer already added.
- * </p>
- * <p>
- *   The tokens are retrieved from the input stream in FilteringTokenFilter's 
- *   <code>incrementToken()</code> method (see below), which calls LengthFilter's
- *   <code>accept()</code> method. By looking at the term text in the
- *   CharTermAttribute, the length of the term can be determined and tokens that
- *   are either too short or too long are skipped.  Note how
- *   <code>accept()</code> can efficiently access the instance variable; no 
- *   attribute lookup is necessary. The same is true for the consumer, which can
- *   simply use local references to the Attributes.
- * </p>
- * <p>
- *   LengthFilter extends FilteringTokenFilter:
- * </p>
- * 
+ *
+ * <p>In LengthFilter, the CharTermAttribute is added and stored in the instance variable <code>
+ * termAtt</code>. Remember that there can only be a single instance of CharTermAttribute in the
+ * chain, so in our example the <code>addAttribute()</code> call in LengthFilter returns the
+ * CharTermAttribute that the WhitespaceTokenizer already added.
+ *
+ * <p>The tokens are retrieved from the input stream in FilteringTokenFilter's <code>
+ * incrementToken()</code> method (see below), which calls LengthFilter's <code>accept()</code>
+ * method. By looking at the term text in the CharTermAttribute, the length of the term can be
+ * determined and tokens that are either too short or too long are skipped. Note how <code>accept()
+ * </code> can efficiently access the instance variable; no attribute lookup is necessary. The same
+ * is true for the consumer, which can simply use local references to the Attributes.
+ *
+ * <p>LengthFilter extends FilteringTokenFilter:
+ *
  * <pre class="prettyprint">
  * public abstract class FilteringTokenFilter extends TokenFilter {
- * 
+ *
  *   private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
- * 
+ *
  *   &#47;**
  *    * Create a new FilteringTokenFilter.
  *    * {@literal @param} in      the TokenStream to consume
@@ -764,10 +746,10 @@
  *   public FilteringTokenFilter(Version version, TokenStream in) {
  *     super(in);
  *   }
- * 
+ *
  *   &#47;** Override this method and return if the current input token should be returned by incrementToken. *&#47;
  *   protected abstract boolean accept() throws IOException;
- * 
+ *
  *   {@literal @Override}
  *   public final boolean incrementToken() throws IOException {
  *     int skippedPositions = 0;
@@ -783,92 +765,94 @@
  *     // reached EOS -- return false
  *     return false;
  *   }
- * 
+ *
  *   {@literal @Override}
  *   public void reset() throws IOException {
  *     super.reset();
  *   }
- * 
+ *
  * }
  * </pre>
- * 
+ *
  * <h4>Adding a custom Attribute</h4>
- * Now we're going to implement our own custom Attribute for part-of-speech tagging and call it consequently 
- * <code>PartOfSpeechAttribute</code>. First we need to define the interface of the new Attribute:
+ *
+ * Now we're going to implement our own custom Attribute for part-of-speech tagging and call it
+ * consequently <code>PartOfSpeechAttribute</code>. First we need to define the interface of the new
+ * Attribute:
+ *
  * <pre class="prettyprint">
  *   public interface PartOfSpeechAttribute extends Attribute {
  *     public static enum PartOfSpeech {
  *       Noun, Verb, Adjective, Adverb, Pronoun, Preposition, Conjunction, Article, Unknown
  *     }
- *   
+ *
  *     public void setPartOfSpeech(PartOfSpeech pos);
- *   
+ *
  *     public PartOfSpeech getPartOfSpeech();
  *   }
  * </pre>
- * <p>
- *   Now we also need to write the implementing class. The name of that class is important here: By default, Lucene
- *   checks if there is a class with the name of the Attribute with the suffix 'Impl'. In this example, we would
- *   consequently call the implementing class <code>PartOfSpeechAttributeImpl</code>.
- * </p>
- * <p>
- *   This should be the usual behavior. However, there is also an expert-API that allows changing these naming conventions:
- *   {@link org.apache.lucene.util.AttributeFactory}. The factory accepts an Attribute interface as argument
- *   and returns an actual instance. You can implement your own factory if you need to change the default behavior.
- * </p>
- * <p>
- *   Now here is the actual class that implements our new Attribute. Notice that the class has to extend
- *   {@link org.apache.lucene.util.AttributeImpl}:
- * </p>
+ *
+ * <p>Now we also need to write the implementing class. The name of that class is important here: By
+ * default, Lucene checks if there is a class with the name of the Attribute with the suffix 'Impl'.
+ * In this example, we would consequently call the implementing class <code>
+ * PartOfSpeechAttributeImpl</code>.
+ *
+ * <p>This should be the usual behavior. However, there is also an expert-API that allows changing
+ * these naming conventions: {@link org.apache.lucene.util.AttributeFactory}. The factory accepts an
+ * Attribute interface as argument and returns an actual instance. You can implement your own
+ * factory if you need to change the default behavior.
+ *
+ * <p>Now here is the actual class that implements our new Attribute. Notice that the class has to
+ * extend {@link org.apache.lucene.util.AttributeImpl}:
+ *
  * <pre class="prettyprint">
- * public final class PartOfSpeechAttributeImpl extends AttributeImpl 
+ * public final class PartOfSpeechAttributeImpl extends AttributeImpl
  *                                   implements PartOfSpeechAttribute {
- *   
+ *
  *   private PartOfSpeech pos = PartOfSpeech.Unknown;
- *   
+ *
  *   public void setPartOfSpeech(PartOfSpeech pos) {
  *     this.pos = pos;
  *   }
- *   
+ *
  *   public PartOfSpeech getPartOfSpeech() {
  *     return pos;
  *   }
- * 
+ *
  *   {@literal @Override}
  *   public void clear() {
  *     pos = PartOfSpeech.Unknown;
  *   }
- * 
+ *
  *   {@literal @Override}
  *   public void copyTo(AttributeImpl target) {
  *     ((PartOfSpeechAttribute) target).setPartOfSpeech(pos);
  *   }
  * }
  * </pre>
- * <p>
- *   This is a simple Attribute implementation has only a single variable that
- *   stores the part-of-speech of a token. It extends the
- *   <code>AttributeImpl</code> class and therefore implements its abstract methods
- *   <code>clear()</code> and <code>copyTo()</code>. Now we need a TokenFilter that
- *   can set this new PartOfSpeechAttribute for each token. In this example we
- *   show a very naive filter that tags every word with a leading upper-case letter
- *   as a 'Noun' and all other words as 'Unknown'.
- * </p>
+ *
+ * <p>This is a simple Attribute implementation has only a single variable that stores the
+ * part-of-speech of a token. It extends the <code>AttributeImpl</code> class and therefore
+ * implements its abstract methods <code>clear()</code> and <code>copyTo()</code>. Now we need a
+ * TokenFilter that can set this new PartOfSpeechAttribute for each token. In this example we show a
+ * very naive filter that tags every word with a leading upper-case letter as a 'Noun' and all other
+ * words as 'Unknown'.
+ *
  * <pre class="prettyprint">
  *   public static class PartOfSpeechTaggingFilter extends TokenFilter {
  *     PartOfSpeechAttribute posAtt = addAttribute(PartOfSpeechAttribute.class);
  *     CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
- *     
+ *
  *     protected PartOfSpeechTaggingFilter(TokenStream input) {
  *       super(input);
  *     }
- *     
+ *
  *     public boolean incrementToken() throws IOException {
  *       if (!input.incrementToken()) {return false;}
  *       posAtt.setPartOfSpeech(determinePOS(termAtt.buffer(), 0, termAtt.length()));
  *       return true;
  *     }
- *     
+ *
  *     // determine the part of speech for the given term
  *     protected PartOfSpeech determinePOS(char[] term, int offset, int length) {
  *       // naive implementation that tags every uppercased word as noun
@@ -879,13 +863,13 @@
  *     }
  *   }
  * </pre>
- * <p>
- *   Just like the LengthFilter, this new filter stores references to the
- *   attributes it needs in instance variables. Notice how you only need to pass
- *   in the interface of the new Attribute and instantiating the correct class
- *   is automatically taken care of.
- * </p>
- * <p>Now we need to add the filter to the chain in MyAnalyzer:</p>
+ *
+ * <p>Just like the LengthFilter, this new filter stores references to the attributes it needs in
+ * instance variables. Notice how you only need to pass in the interface of the new Attribute and
+ * instantiating the correct class is automatically taken care of.
+ *
+ * <p>Now we need to add the filter to the chain in MyAnalyzer:
+ *
  * <pre class="prettyprint">
  *   {@literal @Override}
  *   protected TokenStreamComponents createComponents(String fieldName) {
@@ -895,7 +879,9 @@
  *     return new TokenStreamComponents(source, result);
  *   }
  * </pre>
+ *
  * Now let's look at the output:
+ *
  * <pre>
  * This
  * demo
@@ -904,39 +890,43 @@
  * TokenStream
  * API
  * </pre>
- * Apparently it hasn't changed, which shows that adding a custom attribute to a TokenStream/Filter chain does not
- * affect any existing consumers, simply because they don't know the new Attribute. Now let's change the consumer
- * to make use of the new PartOfSpeechAttribute and print it out:
+ *
+ * Apparently it hasn't changed, which shows that adding a custom attribute to a TokenStream/Filter
+ * chain does not affect any existing consumers, simply because they don't know the new Attribute.
+ * Now let's change the consumer to make use of the new PartOfSpeechAttribute and print it out:
+ *
  * <pre class="prettyprint">
  *   public static void main(String[] args) throws IOException {
  *     // text to tokenize
  *     final String text = "This is a demo of the TokenStream API";
- *     
+ *
  *     MyAnalyzer analyzer = new MyAnalyzer();
  *     TokenStream stream = analyzer.tokenStream("field", new StringReader(text));
- *     
+ *
  *     // get the CharTermAttribute from the TokenStream
  *     CharTermAttribute termAtt = stream.addAttribute(CharTermAttribute.class);
- *     
+ *
  *     // get the PartOfSpeechAttribute from the TokenStream
  *     PartOfSpeechAttribute posAtt = stream.addAttribute(PartOfSpeechAttribute.class);
- * 
+ *
  *     try {
  *       stream.reset();
- * 
+ *
  *       // print all tokens until stream is exhausted
  *       while (stream.incrementToken()) {
  *         System.out.println(termAtt.toString() + ": " + posAtt.getPartOfSpeech());
  *       }
- *     
+ *
  *       stream.end();
  *     } finally {
  *       stream.close();
  *     }
  *   }
  * </pre>
- * The change that was made is to get the PartOfSpeechAttribute from the TokenStream and print out its contents in
- * the while loop that consumes the stream. Here is the new output:
+ *
+ * The change that was made is to get the PartOfSpeechAttribute from the TokenStream and print out
+ * its contents in the while loop that consumes the stream. Here is the new output:
+ *
  * <pre>
  * This: Noun
  * demo: Unknown
@@ -945,52 +935,59 @@
  * TokenStream: Noun
  * API: Noun
  * </pre>
- * Each word is now followed by its assigned PartOfSpeech tag. Of course this is a naive 
- * part-of-speech tagging. The word 'This' should not even be tagged as noun; it is only spelled capitalized because it
- * is the first word of a sentence. Actually this is a good opportunity for an exercise. To practice the usage of the new
- * API the reader could now write an Attribute and TokenFilter that can specify for each word if it was the first token
- * of a sentence or not. Then the PartOfSpeechTaggingFilter can make use of this knowledge and only tag capitalized words
- * as nouns if not the first word of a sentence (we know, this is still not a correct behavior, but hey, it's a good exercise). 
- * As a small hint, this is how the new Attribute class could begin:
+ *
+ * Each word is now followed by its assigned PartOfSpeech tag. Of course this is a naive
+ * part-of-speech tagging. The word 'This' should not even be tagged as noun; it is only spelled
+ * capitalized because it is the first word of a sentence. Actually this is a good opportunity for
+ * an exercise. To practice the usage of the new API the reader could now write an Attribute and
+ * TokenFilter that can specify for each word if it was the first token of a sentence or not. Then
+ * the PartOfSpeechTaggingFilter can make use of this knowledge and only tag capitalized words as
+ * nouns if not the first word of a sentence (we know, this is still not a correct behavior, but
+ * hey, it's a good exercise). As a small hint, this is how the new Attribute class could begin:
+ *
  * <pre class="prettyprint">
  *   public class FirstTokenOfSentenceAttributeImpl extends AttributeImpl
  *                               implements FirstTokenOfSentenceAttribute {
- *     
+ *
  *     private boolean firstToken;
- *     
+ *
  *     public void setFirstToken(boolean firstToken) {
  *       this.firstToken = firstToken;
  *     }
- *     
+ *
  *     public boolean getFirstToken() {
  *       return firstToken;
  *     }
- * 
+ *
  *     {@literal @Override}
  *     public void clear() {
  *       firstToken = false;
  *     }
- * 
+ *
  *   ...
  * </pre>
+ *
  * <h4>Adding a CharFilter chain</h4>
- * Analyzers take Java {@link java.io.Reader}s as input. Of course you can wrap your Readers with {@link java.io.FilterReader}s
- * to manipulate content, but this would have the big disadvantage that character offsets might be inconsistent with your original
- * text.
- * <p>
- * {@link org.apache.lucene.analysis.CharFilter} is designed to allow you to pre-process input like a FilterReader would, but also
- * preserve the original offsets associated with those characters. This way mechanisms like highlighting still work correctly.
- * CharFilters can be chained.
- * <p>
- * Example:
+ *
+ * Analyzers take Java {@link java.io.Reader}s as input. Of course you can wrap your Readers with
+ * {@link java.io.FilterReader}s to manipulate content, but this would have the big disadvantage
+ * that character offsets might be inconsistent with your original text.
+ *
+ * <p>{@link org.apache.lucene.analysis.CharFilter} is designed to allow you to pre-process input
+ * like a FilterReader would, but also preserve the original offsets associated with those
+ * characters. This way mechanisms like highlighting still work correctly. CharFilters can be
+ * chained.
+ *
+ * <p>Example:
+ *
  * <pre class="prettyprint">
  * public class MyAnalyzer extends Analyzer {
- * 
+ *
  *   {@literal @Override}
  *   protected TokenStreamComponents createComponents(String fieldName) {
  *     return new TokenStreamComponents(new MyTokenizer());
  *   }
- *   
+ *
  *   {@literal @Override}
  *   protected Reader initReader(String fieldName, Reader reader) {
  *     // wrap the Reader in a CharFilter chain.
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/BlockTermState.java b/lucene/core/src/java/org/apache/lucene/codecs/BlockTermState.java
index c317668..06508e7 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/BlockTermState.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/BlockTermState.java
@@ -20,9 +20,8 @@ import org.apache.lucene.index.OrdTermState;
 import org.apache.lucene.index.TermState;
 
 /**
- * Holds all state required for {@link PostingsReaderBase}
- * to produce a {@link org.apache.lucene.index.PostingsEnum} without re-seeking the
- * terms dict.
+ * Holds all state required for {@link PostingsReaderBase} to produce a {@link
+ * org.apache.lucene.index.PostingsEnum} without re-seeking the terms dict.
  *
  * @lucene.internal
  */
@@ -38,10 +37,8 @@ public class BlockTermState extends OrdTermState {
   // TODO: update BTR to nuke this
   public long blockFilePointer;
 
-  /** Sole constructor. (For invocation by subclass 
-   *  constructors, typically implicit.) */
-  protected BlockTermState() {
-  }
+  /** Sole constructor. (For invocation by subclass constructors, typically implicit.) */
+  protected BlockTermState() {}
 
   @Override
   public void copyFrom(TermState _other) {
@@ -56,6 +53,13 @@ public class BlockTermState extends OrdTermState {
 
   @Override
   public String toString() {
-    return "docFreq=" + docFreq + " totalTermFreq=" + totalTermFreq + " termBlockOrd=" + termBlockOrd + " blockFP=" + blockFilePointer;
+    return "docFreq="
+        + docFreq
+        + " totalTermFreq="
+        + totalTermFreq
+        + " termBlockOrd="
+        + termBlockOrd
+        + " blockFP="
+        + blockFilePointer;
   }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/Codec.java b/lucene/core/src/java/org/apache/lucene/codecs/Codec.java
index 3a2bc3f..24abcb4 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/Codec.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/Codec.java
@@ -16,46 +16,45 @@
  */
 package org.apache.lucene.codecs;
 
-
 import java.util.Objects;
 import java.util.ServiceLoader; // javadocs
 import java.util.Set;
-
 import org.apache.lucene.index.IndexWriterConfig; // javadocs
 import org.apache.lucene.util.NamedSPILoader;
 
 /**
  * Encodes/decodes an inverted index segment.
- * <p>
- * Note, when extending this class, the name ({@link #getName}) is 
- * written into the index. In order for the segment to be read, the
- * name must resolve to your implementation via {@link #forName(String)}.
- * This method uses Java's 
- * {@link ServiceLoader Service Provider Interface} (SPI) to resolve codec names.
- * <p>
- * If you implement your own codec, make sure that it has a no-arg constructor
- * so SPI can load it.
+ *
+ * <p>Note, when extending this class, the name ({@link #getName}) is written into the index. In
+ * order for the segment to be read, the name must resolve to your implementation via {@link
+ * #forName(String)}. This method uses Java's {@link ServiceLoader Service Provider Interface} (SPI)
+ * to resolve codec names.
+ *
+ * <p>If you implement your own codec, make sure that it has a no-arg constructor so SPI can load
+ * it.
+ *
  * @see ServiceLoader
  */
 public abstract class Codec implements NamedSPILoader.NamedSPI {
 
   /**
-   * This static holder class prevents classloading deadlock by delaying
-   * init of default codecs and available codecs until needed.
+   * This static holder class prevents classloading deadlock by delaying init of default codecs and
+   * available codecs until needed.
    */
   private static final class Holder {
     private static final NamedSPILoader<Codec> LOADER = new NamedSPILoader<>(Codec.class);
-    
+
     private Holder() {}
-    
+
     static NamedSPILoader<Codec> getLoader() {
       if (LOADER == null) {
-        throw new IllegalStateException("You tried to lookup a Codec by name before all Codecs could be initialized. "+
-          "This likely happens if you call Codec#forName from a Codec's ctor.");
+        throw new IllegalStateException(
+            "You tried to lookup a Codec by name before all Codecs could be initialized. "
+                + "This likely happens if you call Codec#forName from a Codec's ctor.");
       }
       return LOADER;
     }
-    
+
     static Codec defaultCodec = LOADER.lookup("Lucene90");
   }
 
@@ -63,47 +62,48 @@ public abstract class Codec implements NamedSPILoader.NamedSPI {
 
   /**
    * Creates a new codec.
-   * <p>
-   * The provided name will be written into the index segment: in order to
-   * for the segment to be read this class should be registered with Java's
-   * SPI mechanism (registered in META-INF/ of your jar file, etc).
+   *
+   * <p>The provided name will be written into the index segment: in order to for the segment to be
+   * read this class should be registered with Java's SPI mechanism (registered in META-INF/ of your
+   * jar file, etc).
+   *
    * @param name must be all ascii alphanumeric, and less than 128 characters in length.
    */
   protected Codec(String name) {
     NamedSPILoader.checkServiceName(name);
     this.name = name;
   }
-  
+
   /** Returns this codec's name */
   @Override
   public final String getName() {
     return name;
   }
-  
+
   /** Encodes/decodes postings */
   public abstract PostingsFormat postingsFormat();
 
   /** Encodes/decodes docvalues */
   public abstract DocValuesFormat docValuesFormat();
-  
+
   /** Encodes/decodes stored fields */
   public abstract StoredFieldsFormat storedFieldsFormat();
-  
+
   /** Encodes/decodes term vectors */
   public abstract TermVectorsFormat termVectorsFormat();
-  
+
   /** Encodes/decodes field infos file */
   public abstract FieldInfosFormat fieldInfosFormat();
-  
+
   /** Encodes/decodes segment info file */
   public abstract SegmentInfoFormat segmentInfoFormat();
-  
+
   /** Encodes/decodes document normalization values */
   public abstract NormsFormat normsFormat();
 
   /** Encodes/decodes live docs */
   public abstract LiveDocsFormat liveDocsFormat();
-  
+
   /** Encodes/decodes compound files */
   public abstract CompoundFormat compoundFormat();
 
@@ -112,53 +112,47 @@ public abstract class Codec implements NamedSPILoader.NamedSPI {
 
   /** Encodes/decodes numeric vector fields */
   public abstract VectorFormat vectorFormat();
-  
+
   /** looks up a codec by name */
   public static Codec forName(String name) {
     return Holder.getLoader().lookup(name);
   }
-  
+
   /** returns a list of all available codec names */
   public static Set<String> availableCodecs() {
     return Holder.getLoader().availableServices();
   }
-  
-  /** 
-   * Reloads the codec list from the given {@link ClassLoader}.
-   * Changes to the codecs are visible after the method ends, all
-   * iterators ({@link #availableCodecs()},...) stay consistent. 
-   * 
-   * <p><b>NOTE:</b> Only new codecs are added, existing ones are
-   * never removed or replaced.
-   * 
-   * <p><em>This method is expensive and should only be called for discovery
-   * of new codecs on the given classpath/classloader!</em>
+
+  /**
+   * Reloads the codec list from the given {@link ClassLoader}. Changes to the codecs are visible
+   * after the method ends, all iterators ({@link #availableCodecs()},...) stay consistent.
+   *
+   * <p><b>NOTE:</b> Only new codecs are added, existing ones are never removed or replaced.
+   *
+   * <p><em>This method is expensive and should only be called for discovery of new codecs on the
+   * given classpath/classloader!</em>
    */
   public static void reloadCodecs(ClassLoader classloader) {
     Holder.getLoader().reload(classloader);
   }
-    
-  /** expert: returns the default codec used for newly created
-   *  {@link IndexWriterConfig}s.
-   */
+
+  /** expert: returns the default codec used for newly created {@link IndexWriterConfig}s. */
   public static Codec getDefault() {
     if (Holder.defaultCodec == null) {
-      throw new IllegalStateException("You tried to lookup the default Codec before all Codecs could be initialized. "+
-        "This likely happens if you try to get it from a Codec's ctor.");
+      throw new IllegalStateException(
+          "You tried to lookup the default Codec before all Codecs could be initialized. "
+              + "This likely happens if you try to get it from a Codec's ctor.");
     }
     return Holder.defaultCodec;
   }
-  
-  /** expert: sets the default codec used for newly created
-   *  {@link IndexWriterConfig}s.
-   */
+
+  /** expert: sets the default codec used for newly created {@link IndexWriterConfig}s. */
   public static void setDefault(Codec codec) {
     Holder.defaultCodec = Objects.requireNonNull(codec);
   }
 
   /**
-   * returns the codec's name. Subclasses can override to provide
-   * more detail (such as parameters).
+   * returns the codec's name. Subclasses can override to provide more detail (such as parameters).
    */
   @Override
   public String toString() {
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/CodecUtil.java b/lucene/core/src/java/org/apache/lucene/codecs/CodecUtil.java
index 8c40e2a..3dc46de 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/CodecUtil.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/CodecUtil.java
@@ -16,11 +16,9 @@
  */
 package org.apache.lucene.codecs;
 
-
 import java.io.IOException;
 import java.nio.charset.StandardCharsets;
 import java.util.Arrays;
-
 import org.apache.lucene.index.CorruptIndexException;
 import org.apache.lucene.index.IndexFormatTooNewException;
 import org.apache.lucene.index.IndexFormatTooOldException;
@@ -36,97 +34,91 @@ import org.apache.lucene.util.StringHelper;
 
 /**
  * Utility class for reading and writing versioned headers.
- * <p>
- * Writing codec headers is useful to ensure that a file is in 
- * the format you think it is.
- * 
+ *
+ * <p>Writing codec headers is useful to ensure that a file is in the format you think it is.
+ *
  * @lucene.experimental
  */
-
 public final class CodecUtil {
   private CodecUtil() {} // no instance
 
-  /**
-   * Constant to identify the start of a codec header.
-   */
-  public final static int CODEC_MAGIC = 0x3fd76c17;
-  /**
-   * Constant to identify the start of a codec footer.
-   */
-  public final static int FOOTER_MAGIC = ~CODEC_MAGIC;
+  /** Constant to identify the start of a codec header. */
+  public static final int CODEC_MAGIC = 0x3fd76c17;
+  /** Constant to identify the start of a codec footer. */
+  public static final int FOOTER_MAGIC = ~CODEC_MAGIC;
 
   /**
-   * Writes a codec header, which records both a string to
-   * identify the file and a version number. This header can
-   * be parsed and validated with 
-   * {@link #checkHeader(DataInput, String, int, int) checkHeader()}.
-   * <p>
-   * CodecHeader --&gt; Magic,CodecName,Version
+   * Writes a codec header, which records both a string to identify the file and a version number.
+   * This header can be parsed and validated with {@link #checkHeader(DataInput, String, int, int)
+   * checkHeader()}.
+   *
+   * <p>CodecHeader --&gt; Magic,CodecName,Version
+   *
    * <ul>
-   *    <li>Magic --&gt; {@link DataOutput#writeInt Uint32}. This
-   *        identifies the start of the header. It is always {@value #CODEC_MAGIC}.
-   *    <li>CodecName --&gt; {@link DataOutput#writeString String}. This
-   *        is a string to identify this file.
-   *    <li>Version --&gt; {@link DataOutput#writeInt Uint32}. Records
-   *        the version of the file.
+   *   <li>Magic --&gt; {@link DataOutput#writeInt Uint32}. This identifies the start of the header.
+   *       It is always {@value #CODEC_MAGIC}.
+   *   <li>CodecName --&gt; {@link DataOutput#writeString String}. This is a string to identify this
+   *       file.
+   *   <li>Version --&gt; {@link DataOutput#writeInt Uint32}. Records the version of the file.
    * </ul>
-   * <p>
-   * Note that the length of a codec header depends only upon the
-   * name of the codec, so this length can be computed at any time
-   * with {@link #headerLength(String)}.
-   * 
+   *
+   * <p>Note that the length of a codec header depends only upon the name of the codec, so this
+   * length can be computed at any time with {@link #headerLength(String)}.
+   *
    * @param out Output stream
-   * @param codec String to identify this file. It should be simple ASCII, 
-   *              less than 128 characters in length.
+   * @param codec String to identify this file. It should be simple ASCII, less than 128 characters
+   *     in length.
    * @param version Version number
    * @throws IOException If there is an I/O error writing to the underlying medium.
-   * @throws IllegalArgumentException If the codec name is not simple ASCII, or is more than 127 characters in length
+   * @throws IllegalArgumentException If the codec name is not simple ASCII, or is more than 127
+   *     characters in length
    */
   public static void writeHeader(DataOutput out, String codec, int version) throws IOException {
     BytesRef bytes = new BytesRef(codec);
     if (bytes.length != codec.length() || bytes.length >= 128) {
-      throw new IllegalArgumentException("codec must be simple ASCII, less than 128 characters in length [got " + codec + "]");
+      throw new IllegalArgumentException(
+          "codec must be simple ASCII, less than 128 characters in length [got " + codec + "]");
     }
     out.writeInt(CODEC_MAGIC);
     out.writeString(codec);
     out.writeInt(version);
   }
-  
+
   /**
-   * Writes a codec header for an index file, which records both a string to
-   * identify the format of the file, a version number, and data to identify
-   * the file instance (ID and auxiliary suffix such as generation).
-   * <p>
-   * This header can be parsed and validated with 
-   * {@link #checkIndexHeader(DataInput, String, int, int, byte[], String) checkIndexHeader()}.
-   * <p>
-   * IndexHeader --&gt; CodecHeader,ObjectID,ObjectSuffix
+   * Writes a codec header for an index file, which records both a string to identify the format of
+   * the file, a version number, and data to identify the file instance (ID and auxiliary suffix
+   * such as generation).
+   *
+   * <p>This header can be parsed and validated with {@link #checkIndexHeader(DataInput, String,
+   * int, int, byte[], String) checkIndexHeader()}.
+   *
+   * <p>IndexHeader --&gt; CodecHeader,ObjectID,ObjectSuffix
+   *
    * <ul>
-   *    <li>CodecHeader   --&gt; {@link #writeHeader}
-   *    <li>ObjectID     --&gt; {@link DataOutput#writeByte byte}<sup>16</sup>
-   *    <li>ObjectSuffix --&gt; SuffixLength,SuffixBytes
-   *    <li>SuffixLength  --&gt; {@link DataOutput#writeByte byte}
-   *    <li>SuffixBytes   --&gt; {@link DataOutput#writeByte byte}<sup>SuffixLength</sup>
+   *   <li>CodecHeader --&gt; {@link #writeHeader}
+   *   <li>ObjectID --&gt; {@link DataOutput#writeByte byte}<sup>16</sup>
+   *   <li>ObjectSuffix --&gt; SuffixLength,SuffixBytes
+   *   <li>SuffixLength --&gt; {@link DataOutput#writeByte byte}
+   *   <li>SuffixBytes --&gt; {@link DataOutput#writeByte byte}<sup>SuffixLength</sup>
    * </ul>
-   * <p>
-   * Note that the length of an index header depends only upon the
-   * name of the codec and suffix, so this length can be computed at any time
-   * with {@link #indexHeaderLength(String,String)}.
-   * 
+   *
+   * <p>Note that the length of an index header depends only upon the name of the codec and suffix,
+   * so this length can be computed at any time with {@link #indexHeaderLength(String,String)}.
+   *
    * @param out Output stream
-   * @param codec String to identify the format of this file. It should be simple ASCII, 
-   *              less than 128 characters in length.
+   * @param codec String to identify the format of this file. It should be simple ASCII, less than
+   *     128 characters in length.
    * @param id Unique identifier for this particular file instance.
-   * @param suffix auxiliary suffix information for the file. It should be simple ASCII,
-   *              less than 256 characters in length.
+   * @param suffix auxiliary suffix information for the file. It should be simple ASCII, less than
+   *     256 characters in length.
    * @param version Version number
    * @throws IOException If there is an I/O error writing to the underlying medium.
-   * @throws IllegalArgumentException If the codec name is not simple ASCII, or 
-   *         is more than 127 characters in length, or if id is invalid,
-   *         or if the suffix is not simple ASCII, or more than 255 characters
-   *         in length.
+   * @throws IllegalArgumentException If the codec name is not simple ASCII, or is more than 127
+   *     characters in length, or if id is invalid, or if the suffix is not simple ASCII, or more
+   *     than 255 characters in length.
    */
-  public static void writeIndexHeader(DataOutput out, String codec, int version, byte[] id, String suffix) throws IOException {
+  public static void writeIndexHeader(
+      DataOutput out, String codec, int version, byte[] id, String suffix) throws IOException {
     if (id.length != StringHelper.ID_LENGTH) {
       throw new IllegalArgumentException("Invalid id: " + StringHelper.idToString(id));
     }
@@ -134,7 +126,8 @@ public final class CodecUtil {
     out.writeBytes(id, 0, id.length);
     BytesRef suffixBytes = new BytesRef(suffix);
     if (suffixBytes.length != suffix.length() || suffixBytes.length >= 256) {
-      throw new IllegalArgumentException("suffix must be simple ASCII, less than 256 characters in length [got " + suffix + "]");
+      throw new IllegalArgumentException(
+          "suffix must be simple ASCII, less than 256 characters in length [got " + suffix + "]");
     }
     out.writeByte((byte) suffixBytes.length);
     out.writeBytes(suffixBytes.bytes, suffixBytes.offset, suffixBytes.length);
@@ -142,18 +135,18 @@ public final class CodecUtil {
 
   /**
    * Computes the length of a codec header.
-   * 
+   *
    * @param codec Codec name.
    * @return length of the entire codec header.
    * @see #writeHeader(DataOutput, String, int)
    */
   public static int headerLength(String codec) {
-    return 9+codec.length();
+    return 9 + codec.length();
   }
-  
+
   /**
    * Computes the length of an index header.
-   * 
+   *
    * @param codec Codec name.
    * @return length of the entire index header.
    * @see #writeIndexHeader(DataOutput, String, int, byte[], String)
@@ -163,49 +156,53 @@ public final class CodecUtil {
   }
 
   /**
-   * Reads and validates a header previously written with 
-   * {@link #writeHeader(DataOutput, String, int)}.
-   * <p>
-   * When reading a file, supply the expected <code>codec</code> and
-   * an expected version range (<code>minVersion to maxVersion</code>).
-   * 
-   * @param in Input stream, positioned at the point where the
-   *        header was previously written. Typically this is located
-   *        at the beginning of the file.
+   * Reads and validates a header previously written with {@link #writeHeader(DataOutput, String,
+   * int)}.
+   *
+   * <p>When reading a file, supply the expected <code>codec</code> and an expected version range (
+   * <code>minVersion to maxVersion</code>).
+   *
+   * @param in Input stream, positioned at the point where the header was previously written.
+   *     Typically this is located at the beginning of the file.
    * @param codec The expected codec name.
    * @param minVersion The minimum supported expected version number.
    * @param maxVersion The maximum supported expected version number.
-   * @return The actual version found, when a valid header is found 
-   *         that matches <code>codec</code>, with an actual version 
-   *         where {@code minVersion <= actual <= maxVersion}.
-   *         Otherwise an exception is thrown.
-   * @throws CorruptIndexException If the first four bytes are not
-   *         {@link #CODEC_MAGIC}, or if the actual codec found is
-   *         not <code>codec</code>.
-   * @throws IndexFormatTooOldException If the actual version is less 
-   *         than <code>minVersion</code>.
-   * @throws IndexFormatTooNewException If the actual version is greater 
-   *         than <code>maxVersion</code>.
+   * @return The actual version found, when a valid header is found that matches <code>codec</code>,
+   *     with an actual version where {@code minVersion <= actual <= maxVersion}. Otherwise an
+   *     exception is thrown.
+   * @throws CorruptIndexException If the first four bytes are not {@link #CODEC_MAGIC}, or if the
+   *     actual codec found is not <code>codec</code>.
+   * @throws IndexFormatTooOldException If the actual version is less than <code>minVersion</code>.
+   * @throws IndexFormatTooNewException If the actual version is greater than <code>maxVersion
+   *     </code>.
    * @throws IOException If there is an I/O error reading from the underlying medium.
    * @see #writeHeader(DataOutput, String, int)
    */
-  public static int checkHeader(DataInput in, String codec, int minVersion, int maxVersion) throws IOException {
+  public static int checkHeader(DataInput in, String codec, int minVersion, int maxVersion)
+      throws IOException {
     // Safety to guard against reading a bogus string:
     final int actualHeader = in.readInt();
     if (actualHeader != CODEC_MAGIC) {
-      throw new CorruptIndexException("codec header mismatch: actual header=" + actualHeader + " vs expected header=" + CODEC_MAGIC, in);
+      throw new CorruptIndexException(
+          "codec header mismatch: actual header="
+              + actualHeader
+              + " vs expected header="
+              + CODEC_MAGIC,
+          in);
     }
     return checkHeaderNoMagic(in, codec, minVersion, maxVersion);
   }
 
-  /** Like {@link
-   *  #checkHeader(DataInput,String,int,int)} except this
-   *  version assumes the first int has already been read
-   *  and validated from the input. */
-  public static int checkHeaderNoMagic(DataInput in, String codec, int minVersion, int maxVersion) throws IOException {
+  /**
+   * Like {@link #checkHeader(DataInput,String,int,int)} except this version assumes the first int
+   * has already been read and validated from the input.
+   */
+  public static int checkHeaderNoMagic(DataInput in, String codec, int minVersion, int maxVersion)
+      throws IOException {
     final String actualCodec = in.readString();
     if (!actualCodec.equals(codec)) {
-      throw new CorruptIndexException("codec mismatch: actual codec=" + actualCodec + " vs expected codec=" + codec, in);
+      throw new CorruptIndexException(
+          "codec mismatch: actual codec=" + actualCodec + " vs expected codec=" + codec, in);
     }
 
     final int actualVersion = in.readInt();
@@ -218,40 +215,41 @@ public final class CodecUtil {
 
     return actualVersion;
   }
-  
+
   /**
-   * Reads and validates a header previously written with 
-   * {@link #writeIndexHeader(DataOutput, String, int, byte[], String)}.
-   * <p>
-   * When reading a file, supply the expected <code>codec</code>,
-   * expected version range (<code>minVersion to maxVersion</code>),
-   * and object ID and suffix.
-   * 
-   * @param in Input stream, positioned at the point where the
-   *        header was previously written. Typically this is located
-   *        at the beginning of the file.
+   * Reads and validates a header previously written with {@link #writeIndexHeader(DataOutput,
+   * String, int, byte[], String)}.
+   *
+   * <p>When reading a file, supply the expected <code>codec</code>, expected version range (<code>
+   * minVersion to maxVersion</code>), and object ID and suffix.
+   *
+   * @param in Input stream, positioned at the point where the header was previously written.
+   *     Typically this is located at the beginning of the file.
    * @param codec The expected codec name.
    * @param minVersion The minimum supported expected version number.
    * @param maxVersion The maximum supported expected version number.
    * @param expectedID The expected object identifier for this file.
    * @param expectedSuffix The expected auxiliary suffix for this file.
-   * @return The actual version found, when a valid header is found 
-   *         that matches <code>codec</code>, with an actual version 
-   *         where {@code minVersion <= actual <= maxVersion}, 
-   *         and matching <code>expectedID</code> and <code>expectedSuffix</code>
-   *         Otherwise an exception is thrown.
-   * @throws CorruptIndexException If the first four bytes are not
-   *         {@link #CODEC_MAGIC}, or if the actual codec found is
-   *         not <code>codec</code>, or if the <code>expectedID</code>
-   *         or <code>expectedSuffix</code> do not match.
-   * @throws IndexFormatTooOldException If the actual version is less 
-   *         than <code>minVersion</code>.
-   * @throws IndexFormatTooNewException If the actual version is greater 
-   *         than <code>maxVersion</code>.
+   * @return The actual version found, when a valid header is found that matches <code>codec</code>,
+   *     with an actual version where {@code minVersion <= actual <= maxVersion}, and matching
+   *     <code>expectedID</code> and <code>expectedSuffix</code> Otherwise an exception is thrown.
+   * @throws CorruptIndexException If the first four bytes are not {@link #CODEC_MAGIC}, or if the
+   *     actual codec found is not <code>codec</code>, or if the <code>expectedID</code> or <code>
+   *     expectedSuffix</code> do not match.
+   * @throws IndexFormatTooOldException If the actual version is less than <code>minVersion</code>.
+   * @throws IndexFormatTooNewException If the actual version is greater than <code>maxVersion
+   *     </code>.
    * @throws IOException If there is an I/O error reading from the underlying medium.
    * @see #writeIndexHeader(DataOutput, String, int, byte[],String)
    */
-  public static int checkIndexHeader(DataInput in, String codec, int minVersion, int maxVersion, byte[] expectedID, String expectedSuffix) throws IOException {
+  public static int checkIndexHeader(
+      DataInput in,
+      String codec,
+      int minVersion,
+      int maxVersion,
+      byte[] expectedID,
+      String expectedSuffix)
+      throws IOException {
     int version = checkHeader(in, codec, minVersion, maxVersion);
     checkIndexHeaderID(in, expectedID);
     checkIndexHeaderSuffix(in, expectedSuffix);
@@ -259,32 +257,38 @@ public final class CodecUtil {
   }
 
   /**
-   * Expert: verifies the incoming {@link IndexInput} has an index header
-   * and that its segment ID matches the expected one, and then copies
-   * that index header into the provided {@link DataOutput}.  This is
-   * useful when building compound files.
+   * Expert: verifies the incoming {@link IndexInput} has an index header and that its segment ID
+   * matches the expected one, and then copies that index header into the provided {@link
+   * DataOutput}. This is useful when building compound files.
    *
-   * @param in Input stream, positioned at the point where the
-   *        index header was previously written. Typically this is located
-   *        at the beginning of the file.
+   * @param in Input stream, positioned at the point where the index header was previously written.
+   *     Typically this is located at the beginning of the file.
    * @param out Output stream, where the header will be copied to.
    * @param expectedID Expected segment ID
-   * @throws CorruptIndexException If the first four bytes are not
-   *         {@link #CODEC_MAGIC}, or if the <code>expectedID</code>
-   *         does not match.
+   * @throws CorruptIndexException If the first four bytes are not {@link #CODEC_MAGIC}, or if the
+   *     <code>expectedID</code> does not match.
    * @throws IOException If there is an I/O error reading from the underlying medium.
-   *
-   * @lucene.internal 
+   * @lucene.internal
    */
-  public static void verifyAndCopyIndexHeader(IndexInput in, DataOutput out, byte[] expectedID) throws IOException {
+  public static void verifyAndCopyIndexHeader(IndexInput in, DataOutput out, byte[] expectedID)
+      throws IOException {
     // make sure it's large enough to have a header and footer
     if (in.length() < footerLength() + headerLength("")) {
-      throw new CorruptIndexException("compound sub-files must have a valid codec header and footer: file is too small (" + in.length() + " bytes)", in);
+      throw new CorruptIndexException(
+          "compound sub-files must have a valid codec header and footer: file is too small ("
+              + in.length()
+              + " bytes)",
+          in);
     }
 
     int actualHeader = in.readInt();
     if (actualHeader != CODEC_MAGIC) {
-      throw new CorruptIndexException("compound sub-files must have a valid codec header and footer: codec header mismatch: actual header=" + actualHeader + " vs expected header=" + CodecUtil.CODEC_MAGIC, in);
+      throw new CorruptIndexException(
+          "compound sub-files must have a valid codec header and footer: codec header mismatch: actual header="
+              + actualHeader
+              + " vs expected header="
+              + CodecUtil.CODEC_MAGIC,
+          in);
     }
 
     // we can't verify these, so we pass-through:
@@ -308,15 +312,20 @@ public final class CodecUtil {
     out.writeBytes(suffixBytes, 0, suffixLength);
   }
 
-
-  /** Retrieves the full index header from the provided {@link IndexInput}.
-   *  This throws {@link CorruptIndexException} if this file does
-   * not appear to be an index file. */
+  /**
+   * Retrieves the full index header from the provided {@link IndexInput}. This throws {@link
+   * CorruptIndexException} if this file does not appear to be an index file.
+   */
   public static byte[] readIndexHeader(IndexInput in) throws IOException {
     in.seek(0);
     final int actualHeader = in.readInt();
     if (actualHeader != CODEC_MAGIC) {
-      throw new CorruptIndexException("codec header mismatch: actual header=" + actualHeader + " vs expected header=" + CODEC_MAGIC, in);
+      throw new CorruptIndexException(
+          "codec header mismatch: actual header="
+              + actualHeader
+              + " vs expected header="
+              + CODEC_MAGIC,
+          in);
     }
     String codec = in.readString();
     in.readInt();
@@ -328,11 +337,18 @@ public final class CodecUtil {
     return bytes;
   }
 
-  /** Retrieves the full footer from the provided {@link IndexInput}.  This throws
-   *  {@link CorruptIndexException} if this file does not have a valid footer. */
+  /**
+   * Retrieves the full footer from the provided {@link IndexInput}. This throws {@link
+   * CorruptIndexException} if this file does not have a valid footer.
+   */
   public static byte[] readFooter(IndexInput in) throws IOException {
     if (in.length() < footerLength()) {
-      throw new CorruptIndexException("misplaced codec footer (file truncated?): length=" + in.length() + " but footerLength==" + footerLength(), in);
+      throw new CorruptIndexException(
+          "misplaced codec footer (file truncated?): length="
+              + in.length()
+              + " but footerLength=="
+              + footerLength(),
+          in);
     }
     in.seek(in.length() - footerLength());
     validateFooter(in);
@@ -341,49 +357,51 @@ public final class CodecUtil {
     in.readBytes(bytes, 0, bytes.length);
     return bytes;
   }
-  
+
   /** Expert: just reads and verifies the object ID of an index header */
   public static byte[] checkIndexHeaderID(DataInput in, byte[] expectedID) throws IOException {
     byte id[] = new byte[StringHelper.ID_LENGTH];
     in.readBytes(id, 0, id.length);
     if (!Arrays.equals(id, expectedID)) {
-      throw new CorruptIndexException("file mismatch, expected id=" + StringHelper.idToString(expectedID) 
-                                                         + ", got=" + StringHelper.idToString(id), in);
+      throw new CorruptIndexException(
+          "file mismatch, expected id="
+              + StringHelper.idToString(expectedID)
+              + ", got="
+              + StringHelper.idToString(id),
+          in);
     }
     return id;
   }
-  
+
   /** Expert: just reads and verifies the suffix of an index header */
-  public static String checkIndexHeaderSuffix(DataInput in, String expectedSuffix) throws IOException {
+  public static String checkIndexHeaderSuffix(DataInput in, String expectedSuffix)
+      throws IOException {
     int suffixLength = in.readByte() & 0xFF;
     byte suffixBytes[] = new byte[suffixLength];
     in.readBytes(suffixBytes, 0, suffixBytes.length);
     String suffix = new String(suffixBytes, 0, suffixBytes.length, StandardCharsets.UTF_8);
     if (!suffix.equals(expectedSuffix)) {
-      throw new CorruptIndexException("file mismatch, expected suffix=" + expectedSuffix
-                                                             + ", got=" + suffix, in);
+      throw new CorruptIndexException(
+          "file mismatch, expected suffix=" + expectedSuffix + ", got=" + suffix, in);
     }
     return suffix;
   }
-  
+
   /**
-   * Writes a codec footer, which records both a checksum
-   * algorithm ID and a checksum. This footer can
-   * be parsed and validated with 
-   * {@link #checkFooter(ChecksumIndexInput) checkFooter()}.
-   * <p>
-   * CodecFooter --&gt; Magic,AlgorithmID,Checksum
+   * Writes a codec footer, which records both a checksum algorithm ID and a checksum. This footer
+   * can be parsed and validated with {@link #checkFooter(ChecksumIndexInput) checkFooter()}.
+   *
+   * <p>CodecFooter --&gt; Magic,AlgorithmID,Checksum
+   *
    * <ul>
-   *    <li>Magic --&gt; {@link DataOutput#writeInt Uint32}. This
-   *        identifies the start of the footer. It is always {@value #FOOTER_MAGIC}.
-   *    <li>AlgorithmID --&gt; {@link DataOutput#writeInt Uint32}. This
-   *        indicates the checksum algorithm used. Currently this is always 0,
-   *        for zlib-crc32.
-   *    <li>Checksum --&gt; {@link DataOutput#writeLong Uint64}. The
-   *        actual checksum value for all previous bytes in the stream, including
-   *        the bytes from Magic and AlgorithmID.
+   *   <li>Magic --&gt; {@link DataOutput#writeInt Uint32}. This identifies the start of the footer.
+   *       It is always {@value #FOOTER_MAGIC}.
+   *   <li>AlgorithmID --&gt; {@link DataOutput#writeInt Uint32}. This indicates the checksum
+   *       algorithm used. Currently this is always 0, for zlib-crc32.
+   *   <li>Checksum --&gt; {@link DataOutput#writeLong Uint64}. The actual checksum value for all
+   *       previous bytes in the stream, including the bytes from Magic and AlgorithmID.
    * </ul>
-   * 
+   *
    * @param out Output stream
    * @throws IOException If there is an I/O error writing to the underlying medium.
    */
@@ -392,49 +410,54 @@ public final class CodecUtil {
     out.writeInt(0);
     writeCRC(out);
   }
-  
+
   /**
    * Computes the length of a codec footer.
-   * 
+   *
    * @return length of the entire codec footer.
    * @see #writeFooter(IndexOutput)
    */
   public static int footerLength() {
     return 16;
   }
-  
-  /** 
-   * Validates the codec footer previously written by {@link #writeFooter}. 
+
+  /**
+   * Validates the codec footer previously written by {@link #writeFooter}.
+   *
    * @return actual checksum value
-   * @throws IOException if the footer is invalid, if the checksum does not match, 
-   *                     or if {@code in} is not properly positioned before the footer
-   *                     at the end of the stream.
+   * @throws IOException if the footer is invalid, if the checksum does not match, or if {@code in}
+   *     is not properly positioned before the footer at the end of the stream.
    */
   public static long checkFooter(ChecksumIndexInput in) throws IOException {
     validateFooter(in);
     long actualChecksum = in.getChecksum();
     long expectedChecksum = readCRC(in);
     if (expectedChecksum != actualChecksum) {
-      throw new CorruptIndexException("checksum failed (hardware problem?) : expected=" + Long.toHexString(expectedChecksum) +  
-                                                       " actual=" + Long.toHexString(actualChecksum), in);
+      throw new CorruptIndexException(
+          "checksum failed (hardware problem?) : expected="
+              + Long.toHexString(expectedChecksum)
+              + " actual="
+              + Long.toHexString(actualChecksum),
+          in);
     }
     return actualChecksum;
   }
-  
-  /** 
-   * Validates the codec footer previously written by {@link #writeFooter}, optionally
-   * passing an unexpected exception that has already occurred.
-   * <p>
-   * When a {@code priorException} is provided, this method will add a suppressed exception 
-   * indicating whether the checksum for the stream passes, fails, or cannot be computed, and 
+
+  /**
+   * Validates the codec footer previously written by {@link #writeFooter}, optionally passing an
+   * unexpected exception that has already occurred.
+   *
+   * <p>When a {@code priorException} is provided, this method will add a suppressed exception
+   * indicating whether the checksum for the stream passes, fails, or cannot be computed, and
    * rethrow it. Otherwise it behaves the same as {@link #checkFooter(ChecksumIndexInput)}.
-   * <p>
-   * Example usage:
+   *
+   * <p>Example usage:
+   *
    * <pre class="prettyprint">
    * try (ChecksumIndexInput input = ...) {
    *   Throwable priorE = null;
    *   try {
-   *     // ... read a bunch of stuff ... 
+   *     // ... read a bunch of stuff ...
    *   } catch (Throwable exception) {
    *     priorE = exception;
    *   } finally {
@@ -443,7 +466,8 @@ public final class CodecUtil {
    * }
    * </pre>
    */
-  public static void checkFooter(ChecksumIndexInput in, Throwable priorException) throws IOException {
+  public static void checkFooter(ChecksumIndexInput in, Throwable priorException)
+      throws IOException {
     if (priorException == null) {
       checkFooter(in);
     } else {
@@ -455,44 +479,60 @@ public final class CodecUtil {
         long remaining = in.length() - in.getFilePointer();
         if (remaining < footerLength()) {
           // corruption caused us to read into the checksum footer already: we can't proceed
-          throw new CorruptIndexException("checksum status indeterminate: remaining=" + remaining +
-                                          "; please run checkindex for more details", in);
+          throw new CorruptIndexException(
+              "checksum status indeterminate: remaining="
+                  + remaining
+                  + "; please run checkindex for more details",
+              in);
         } else {
           // otherwise, skip any unread bytes.
           in.skipBytes(remaining - footerLength());
-          
+
           // now check the footer
           long checksum = checkFooter(in);
-          priorException.addSuppressed(new CorruptIndexException("checksum passed (" + Long.toHexString(checksum) +
-                                                                 "). possibly transient resource issue, or a Lucene or JVM bug", in));
+          priorException.addSuppressed(
+              new CorruptIndexException(
+                  "checksum passed ("
+                      + Long.toHexString(checksum)
+                      + "). possibly transient resource issue, or a Lucene or JVM bug",
+                  in));
         }
       } catch (CorruptIndexException corruptException) {
         corruptException.addSuppressed(priorException);
         throw corruptException;
       } catch (Throwable t) {
         // catch-all for things that shouldn't go wrong (e.g. OOM during readInt) but could...
-        priorException.addSuppressed(new CorruptIndexException("checksum status indeterminate: unexpected exception", in, t));
+        priorException.addSuppressed(
+            new CorruptIndexException(
+                "checksum status indeterminate: unexpected exception", in, t));
       }
       throw IOUtils.rethrowAlways(priorException);
     }
   }
-  
-  /** 
+
+  /**
    * Returns (but does not validate) the checksum previously written by {@link #checkFooter}.
+   *
    * @return actual checksum value
    * @throws IOException if the footer is invalid
    */
   public static long retrieveChecksum(IndexInput in) throws IOException {
     if (in.length() < footerLength()) {
-      throw new CorruptIndexException("misplaced codec footer (file truncated?): length=" + in.length() + " but footerLength==" + footerLength(), in);
+      throw new CorruptIndexException(
+          "misplaced codec footer (file truncated?): length="
+              + in.length()
+              + " but footerLength=="
+              + footerLength(),
+          in);
     }
     in.seek(in.length() - footerLength());
     validateFooter(in);
     return readCRC(in);
   }
 
-  /** 
+  /**
    * Returns (but does not validate) the checksum previously written by {@link #checkFooter}.
+   *
    * @return actual checksum value
    * @throws IOException if the footer is invalid
    */
@@ -501,9 +541,11 @@ public final class CodecUtil {
       throw new IllegalArgumentException("expectedLength cannot be less than the footer length");
     }
     if (in.length() < expectedLength) {
-      throw new CorruptIndexException("truncated file: length=" + in.length() + " but expectedLength==" + expectedLength, in);
+      throw new CorruptIndexException(
+          "truncated file: length=" + in.length() + " but expectedLength==" + expectedLength, in);
     } else if (in.length() > expectedLength) {
-      throw new CorruptIndexException("file too long: length=" + in.length() + " but expectedLength==" + expectedLength, in);
+      throw new CorruptIndexException(
+          "file too long: length=" + in.length() + " but expectedLength==" + expectedLength, in);
     }
 
     return retrieveChecksum(in);
@@ -513,27 +555,47 @@ public final class CodecUtil {
     long remaining = in.length() - in.getFilePointer();
     long expected = footerLength();
     if (remaining < expected) {
-      throw new CorruptIndexException("misplaced codec footer (file truncated?): remaining=" + remaining + ", expected=" + expected + ", fp=" + in.getFilePointer(), in);
+      throw new CorruptIndexException(
+          "misplaced codec footer (file truncated?): remaining="
+              + remaining
+              + ", expected="
+              + expected
+              + ", fp="
+              + in.getFilePointer(),
+          in);
     } else if (remaining > expected) {
-      throw new CorruptIndexException("misplaced codec footer (file extended?): remaining=" + remaining + ", expected=" + expected + ", fp=" + in.getFilePointer(), in);
+      throw new CorruptIndexException(
+          "misplaced codec footer (file extended?): remaining="
+              + remaining
+              + ", expected="
+              + expected
+              + ", fp="
+              + in.getFilePointer(),
+          in);
     }
-    
+
     final int magic = in.readInt();
     if (magic != FOOTER_MAGIC) {
-      throw new CorruptIndexException("codec footer mismatch (file truncated?): actual footer=" + magic + " vs expected footer=" + FOOTER_MAGIC, in);
+      throw new CorruptIndexException(
+          "codec footer mismatch (file truncated?): actual footer="
+              + magic
+              + " vs expected footer="
+              + FOOTER_MAGIC,
+          in);
     }
-    
+
     final int algorithmID = in.readInt();
     if (algorithmID != 0) {
-      throw new CorruptIndexException("codec footer mismatch: unknown algorithmID: " + algorithmID, in);
+      throw new CorruptIndexException(
+          "codec footer mismatch: unknown algorithmID: " + algorithmID, in);
     }
   }
-  
-  /** 
-   * Clones the provided input, reads all bytes from the file, and calls {@link #checkFooter} 
-   * <p>
-   * Note that this method may be slow, as it must process the entire file.
-   * If you just need to extract the checksum value, call {@link #retrieveChecksum}.
+
+  /**
+   * Clones the provided input, reads all bytes from the file, and calls {@link #checkFooter}
+   *
+   * <p>Note that this method may be slow, as it must process the entire file. If you just need to
+   * extract the checksum value, call {@link #retrieveChecksum}.
    */
   public static long checksumEntireFile(IndexInput input) throws IOException {
     IndexInput clone = input.clone();
@@ -541,14 +603,20 @@ public final class CodecUtil {
     ChecksumIndexInput in = new BufferedChecksumIndexInput(clone);
     assert in.getFilePointer() == 0;
     if (in.length() < footerLength()) {
-      throw new CorruptIndexException("misplaced codec footer (file truncated?): length=" + in.length() + " but footerLength==" + footerLength(), input);
+      throw new CorruptIndexException(
+          "misplaced codec footer (file truncated?): length="
+              + in.length()
+              + " but footerLength=="
+              + footerLength(),
+          input);
     }
     in.seek(in.length() - footerLength());
     return checkFooter(in);
   }
-  
+
   /**
    * Reads CRC32 value as a 64-bit long from the input.
+   *
    * @throws CorruptIndexException if CRC is formatted incorrectly (wrong bits set)
    * @throws IOException if an i/o error occurs
    */
@@ -559,16 +627,18 @@ public final class CodecUtil {
     }
     return value;
   }
-  
+
   /**
    * Writes CRC32 value as a 64-bit long to the output.
+   *
    * @throws IllegalStateException if CRC is formatted incorrectly (wrong bits set)
    * @throws IOException if an i/o error occurs
    */
   static void writeCRC(IndexOutput output) throws IOException {
     long value = output.getChecksum();
     if ((value & 0xFFFFFFFF00000000L) != 0) {
-      throw new IllegalStateException("Illegal CRC-32 checksum: " + value + " (resource=" + output + ")");
+      throw new IllegalStateException(
+          "Illegal CRC-32 checksum: " + value + " (resource=" + output + ")");
     }
     output.writeLong(value);
   }
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/CompetitiveImpactAccumulator.java b/lucene/core/src/java/org/apache/lucene/codecs/CompetitiveImpactAccumulator.java
index fca6455..247cd17 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/CompetitiveImpactAccumulator.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/CompetitiveImpactAccumulator.java
@@ -24,12 +24,9 @@ import java.util.Comparator;
 import java.util.Iterator;
 import java.util.List;
 import java.util.TreeSet;
-
 import org.apache.lucene.index.Impact;
 
-/**
- * This class accumulates the (freq, norm) pairs that may produce competitive scores.
- */
+/** This class accumulates the (freq, norm) pairs that may produce competitive scores. */
 public final class CompetitiveImpactAccumulator {
 
   // We speed up accumulation for common norm values with this array that maps
@@ -44,18 +41,19 @@ public final class CompetitiveImpactAccumulator {
   /** Sole constructor. */
   public CompetitiveImpactAccumulator() {
     maxFreqs = new int[256];
-    Comparator<Impact> comparator = new Comparator<Impact>() {
-      @Override
-      public int compare(Impact o1, Impact o2) {
-        // greater freqs compare greater
-        int cmp = Integer.compare(o1.freq, o2.freq);
-        if (cmp == 0) {
-          // greater norms compare lower
-          cmp = Long.compareUnsigned(o2.norm, o1.norm);
-        }
-        return cmp;
-      }
-    };
+    Comparator<Impact> comparator =
+        new Comparator<Impact>() {
+          @Override
+          public int compare(Impact o1, Impact o2) {
+            // greater freqs compare greater
+            int cmp = Integer.compare(o1.freq, o2.freq);
+            if (cmp == 0) {
+              // greater norms compare lower
+              cmp = Long.compareUnsigned(o2.norm, o1.norm);
+            }
+            return cmp;
+          }
+        };
     otherFreqNormPairs = new TreeSet<>(comparator);
   }
 
@@ -66,12 +64,14 @@ public final class CompetitiveImpactAccumulator {
     assert assertConsistent();
   }
 
-  /** Accumulate a (freq,norm) pair, updating this structure if there is no
-   *  equivalent or more competitive entry already. */
+  /**
+   * Accumulate a (freq,norm) pair, updating this structure if there is no equivalent or more
+   * competitive entry already.
+   */
   public void add(int freq, long norm) {
     if (norm >= Byte.MIN_VALUE && norm <= Byte.MAX_VALUE) {
       int index = Byte.toUnsignedInt((byte) norm);
-      maxFreqs[index] = Math.max(maxFreqs[index], freq); 
+      maxFreqs[index] = Math.max(maxFreqs[index], freq);
     } else {
       add(new Impact(freq, norm), otherFreqNormPairs);
     }
@@ -131,7 +131,8 @@ public final class CompetitiveImpactAccumulator {
       freqNormPairs.add(newEntry);
     }
 
-    for (Iterator<Impact> it = freqNormPairs.headSet(newEntry, false).descendingIterator(); it.hasNext(); ) {
+    for (Iterator<Impact> it = freqNormPairs.headSet(newEntry, false).descendingIterator();
+        it.hasNext(); ) {
       Impact entry = it.next();
       if (Long.compareUnsigned(entry.norm, newEntry.norm) >= 0) {
         // less competitive
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/CompoundDirectory.java b/lucene/core/src/java/org/apache/lucene/codecs/CompoundDirectory.java
index f063a12..362b5b3 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/CompoundDirectory.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/CompoundDirectory.java
@@ -18,7 +18,6 @@ package org.apache.lucene.codecs;
 
 import java.io.IOException;
 import java.util.Collection;
-
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.IndexOutput;
@@ -26,6 +25,7 @@ import org.apache.lucene.store.Lock;
 
 /**
  * A read-only {@link Directory} that consists of a view over a compound file.
+ *
  * @see CompoundFormat
  * @lucene.experimental
  */
@@ -36,29 +36,34 @@ public abstract class CompoundDirectory extends Directory {
 
   /**
    * Checks consistency of this directory.
-   * <p>
-   * Note that this may be costly in terms of I/O, e.g.
-   * may involve computing a checksum value against large data files.
+   *
+   * <p>Note that this may be costly in terms of I/O, e.g. may involve computing a checksum value
+   * against large data files.
    */
   public abstract void checkIntegrity() throws IOException;
 
-  /** Not implemented
-   * @throws UnsupportedOperationException always: not supported by CFS */
+  /**
+   * Not implemented
+   *
+   * @throws UnsupportedOperationException always: not supported by CFS
+   */
   @Override
   public final void deleteFile(String name) {
     throw new UnsupportedOperationException();
   }
-  
-  /** Not implemented
-   * @throws UnsupportedOperationException always: not supported by CFS */
+
+  /**
+   * Not implemented
+   *
+   * @throws UnsupportedOperationException always: not supported by CFS
+   */
   @Override
   public final void rename(String from, String to) {
     throw new UnsupportedOperationException();
   }
 
   @Override
-  public final void syncMetaData() {
-  }
+  public final void syncMetaData() {}
 
   @Override
   public final IndexOutput createOutput(String name, IOContext context) throws IOException {
@@ -66,18 +71,18 @@ public abstract class CompoundDirectory extends Directory {
   }
 
   @Override
-  public final IndexOutput createTempOutput(String prefix, String suffix, IOContext context) throws IOException {
+  public final IndexOutput createTempOutput(String prefix, String suffix, IOContext context)
+      throws IOException {
     throw new UnsupportedOperationException();
   }
-  
+
   @Override
   public final void sync(Collection<String> names) {
     throw new UnsupportedOperationException();
   }
-  
+
   @Override
   public final Lock obtainLock(String name) {
     throw new UnsupportedOperationException();
   }
-
 }
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/CompoundFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/CompoundFormat.java
index d74eced..371e192 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/CompoundFormat.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/CompoundFormat.java
@@ -16,15 +16,14 @@
  */
 package org.apache.lucene.codecs;
 
-
 import java.io.IOException;
-
 import org.apache.lucene.index.SegmentInfo;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
 
 /**
  * Encodes/decodes compound files
+ *
  * @lucene.experimental
  */
 public abstract class CompoundFormat {
@@ -34,16 +33,15 @@ public abstract class CompoundFormat {
 
   // TODO: this is very minimal. If we need more methods,
   // we can add 'producer' classes.
-  
-  /**
-   * Returns a Directory view (read-only) for the compound files in this segment
-   */
-  public abstract CompoundDirectory getCompoundReader(Directory dir, SegmentInfo si, IOContext context) throws IOException;
+
+  /** Returns a Directory view (read-only) for the compound files in this segment */
+  public abstract CompoundDirectory getCompoundReader(
+      Directory dir, SegmentInfo si, IOContext context) throws IOException;
 
   /**
-   * Packs the provided segment's files into a compound format.  All files referenced
-   * by the provided {@link SegmentInfo} must have {@link CodecUtil#writeIndexHeader}
-   * and {@link CodecUtil#writeFooter}.
+   * Packs the provided segment's files into a compound format. All files referenced by the provided
+   * {@link SegmentInfo} must have {@link CodecUtil#writeIndexHeader} and {@link
+   * CodecUtil#writeFooter}.
    */
   public abstract void write(Directory dir, SegmentInfo si, IOContext context) throws IOException;
 }
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/DocValuesConsumer.java b/lucene/core/src/java/org/apache/lucene/codecs/DocValuesConsumer.java
index c4bae5c..6320f97 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/DocValuesConsumer.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/DocValuesConsumer.java
@@ -16,12 +16,13 @@
  */
 package org.apache.lucene.codecs;
 
+import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS;
+
 import java.io.Closeable;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
-
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.DocIDMerger;
 import org.apache.lucene.index.DocValues;
@@ -47,86 +48,88 @@ import org.apache.lucene.util.LongBitSet;
 import org.apache.lucene.util.LongValues;
 import org.apache.lucene.util.packed.PackedInts;
 
-import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS;
-
-/** 
- * Abstract API that consumes numeric, binary and
- * sorted docvalues.  Concrete implementations of this
- * actually do "something" with the docvalues (write it into
- * the index in a specific format).
- * <p>
- * The lifecycle is:
+/**
+ * Abstract API that consumes numeric, binary and sorted docvalues. Concrete implementations of this
+ * actually do "something" with the docvalues (write it into the index in a specific format).
+ *
+ * <p>The lifecycle is:
+ *
  * <ol>
- *   <li>DocValuesConsumer is created by 
- *       {@link NormsFormat#normsConsumer(SegmentWriteState)}.
- *   <li>{@link #addNumericField}, {@link #addBinaryField},
- *       {@link #addSortedField}, {@link #addSortedSetField},
- *       or {@link #addSortedNumericField} are called for each Numeric,
- *       Binary, Sorted, SortedSet, or SortedNumeric docvalues field. 
- *       The API is a "pull" rather than "push", and the implementation 
- *       is free to iterate over the values multiple times 
- *       ({@link Iterable#iterator()}).
+ *   <li>DocValuesConsumer is created by {@link NormsFormat#normsConsumer(SegmentWriteState)}.
+ *   <li>{@link #addNumericField}, {@link #addBinaryField}, {@link #addSortedField}, {@link
+ *       #addSortedSetField}, or {@link #addSortedNumericField} are called for each Numeric, Binary,
+ *       Sorted, SortedSet, or SortedNumeric docvalues field. The API is a "pull" rather than
+ *       "push", and the implementation is free to iterate over the values multiple times ({@link
+ *       Iterable#iterator()}).
  *   <li>After all fields are added, the consumer is {@link #close}d.
  * </ol>
  *
  * @lucene.experimental
  */
 public abstract class DocValuesConsumer implements Closeable {
-  
-  /** Sole constructor. (For invocation by subclass 
-   *  constructors, typically implicit.) */
+
+  /** Sole constructor. (For invocation by subclass constructors, typically implicit.) */
   protected DocValuesConsumer() {}
 
   /**
    * Writes numeric docvalues for a field.
+   *
    * @param field field information
    * @param valuesProducer Numeric values to write.
    * @throws IOException if an I/O error occurred.
    */
-  public abstract void addNumericField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException;    
+  public abstract void addNumericField(FieldInfo field, DocValuesProducer valuesProducer)
+      throws IOException;
 
   /**
    * Writes binary docvalues for a field.
+   *
    * @param field field information
    * @param valuesProducer Binary values to write.
    * @throws IOException if an I/O error occurred.
    */
-  public abstract void addBinaryField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException;
+  public abstract void addBinaryField(FieldInfo field, DocValuesProducer valuesProducer)
+      throws IOException;
 
   /**
    * Writes pre-sorted binary docvalues for a field.
+   *
    * @param field field information
    * @param valuesProducer produces the values and ordinals to write
    * @throws IOException if an I/O error occurred.
    */
-  public abstract void addSortedField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException;
-  
+  public abstract void addSortedField(FieldInfo field, DocValuesProducer valuesProducer)
+      throws IOException;
+
   /**
    * Writes pre-sorted numeric docvalues for a field
+   *
    * @param field field information
    * @param valuesProducer produces the values to write
    * @throws IOException if an I/O error occurred.
    */
-  public abstract void addSortedNumericField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException;
+  public abstract void addSortedNumericField(FieldInfo field, DocValuesProducer valuesProducer)
+      throws IOException;
 
   /**
    * Writes pre-sorted set docvalues for a field
+   *
    * @param field field information
    * @param valuesProducer produces the values to write
    * @throws IOException if an I/O error occurred.
    */
-  public abstract void addSortedSetField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException;
-  
-  /** Merges in the fields from the readers in 
-   *  <code>mergeState</code>. The default implementation 
-   *  calls {@link #mergeNumericField}, {@link #mergeBinaryField},
-   *  {@link #mergeSortedField}, {@link #mergeSortedSetField},
-   *  or {@link #mergeSortedNumericField} for each field,
-   *  depending on its type.
-   *  Implementations can override this method 
-   *  for more sophisticated merging (bulk-byte copying, etc). */
+  public abstract void addSortedSetField(FieldInfo field, DocValuesProducer valuesProducer)
+      throws IOException;
+
+  /**
+   * Merges in the fields from the readers in <code>mergeState</code>. The default implementation
+   * calls {@link #mergeNumericField}, {@link #mergeBinaryField}, {@link #mergeSortedField}, {@link
+   * #mergeSortedSetField}, or {@link #mergeSortedNumericField} for each field, depending on its
+   * type. Implementations can override this method for more sophisticated merging (bulk-byte
+   * copying, etc).
+   */
   public void merge(MergeState mergeState) throws IOException {
-    for(DocValuesProducer docValuesProducer : mergeState.docValuesProducers) {
+    for (DocValuesProducer docValuesProducer : mergeState.docValuesProducers) {
       if (docValuesProducer != null) {
         docValuesProducer.checkIntegrity();
       }
@@ -168,88 +171,92 @@ public abstract class DocValuesConsumer implements Closeable {
       return values.nextDoc();
     }
   }
-  
+
   /**
    * Merges the numeric docvalues from <code>MergeState</code>.
-   * <p>
-   * The default implementation calls {@link #addNumericField}, passing
-   * a DocValuesProducer that merges and filters deleted documents on the fly.
+   *
+   * <p>The default implementation calls {@link #addNumericField}, passing a DocValuesProducer that
+   * merges and filters deleted documents on the fly.
    */
-  public void mergeNumericField(final FieldInfo mergeFieldInfo, final MergeState mergeState) throws IOException {
-    addNumericField(mergeFieldInfo,
-                    new EmptyDocValuesProducer() {
-                      @Override
-                      public NumericDocValues getNumeric(FieldInfo fieldInfo) throws IOException {
-                        if (fieldInfo != mergeFieldInfo) {
-                          throw new IllegalArgumentException("wrong fieldInfo");
-                        }
-
-                        List<NumericDocValuesSub> subs = new ArrayList<>();
-                        assert mergeState.docMaps.length == mergeState.docValuesProducers.length;
-                        long cost = 0;
-                        for (int i=0;i<mergeState.docValuesProducers.length;i++) {
-                          NumericDocValues values = null;
-                          DocValuesProducer docValuesProducer = mergeState.docValuesProducers[i];
-                          if (docValuesProducer != null) {
-                            FieldInfo readerFieldInfo = mergeState.fieldInfos[i].fieldInfo(mergeFieldInfo.name);
-                            if (readerFieldInfo != null && readerFieldInfo.getDocValuesType() == DocValuesType.NUMERIC) {
-                              values = docValuesProducer.getNumeric(readerFieldInfo);
-                            }
-                          }
-                          if (values != null) {
-                            cost += values.cost();
-                            subs.add(new NumericDocValuesSub(mergeState.docMaps[i], values));
-                          }
-                        }
-
-                        final DocIDMerger<NumericDocValuesSub> docIDMerger = DocIDMerger.of(subs, mergeState.needsIndexSort);
-
-                        final long finalCost = cost;
-                        
-                        return new NumericDocValues() {
-                          private int docID = -1;
-                          private NumericDocValuesSub current;
-
-                          @Override
-                          public int docID() {
-                            return docID;
-                          }
-
-                          @Override
-                          public int nextDoc() throws IOException {
-                            current = docIDMerger.next();
-                            if (current == null) {
-                              docID = NO_MORE_DOCS;
-                            } else {
-                              docID = current.mappedDocID;
-                            }
-                            return docID;
-                          }
-
-                          @Override
-                          public int advance(int target) throws IOException {
-                            throw new UnsupportedOperationException();
-                          }
-
-                          @Override
-                          public boolean advanceExact(int target) throws IOException {
-                            throw new UnsupportedOperationException();
-                          }
-
-                          @Override
-                          public long cost() {
-                            return finalCost;
-                          }
-
-                          @Override
-                          public long longValue() throws IOException {
-                            return current.values.longValue();
-                          }
-                        };
-                      }
-                    });
+  public void mergeNumericField(final FieldInfo mergeFieldInfo, final MergeState mergeState)
+      throws IOException {
+    addNumericField(
+        mergeFieldInfo,
+        new EmptyDocValuesProducer() {
+          @Override
+          public NumericDocValues getNumeric(FieldInfo fieldInfo) throws IOException {
+            if (fieldInfo != mergeFieldInfo) {
+              throw new IllegalArgumentException("wrong fieldInfo");
+            }
+
+            List<NumericDocValuesSub> subs = new ArrayList<>();
+            assert mergeState.docMaps.length == mergeState.docValuesProducers.length;
+            long cost = 0;
+            for (int i = 0; i < mergeState.docValuesProducers.length; i++) {
+              NumericDocValues values = null;
+              DocValuesProducer docValuesProducer = mergeState.docValuesProducers[i];
+              if (docValuesProducer != null) {
+                FieldInfo readerFieldInfo = mergeState.fieldInfos[i].fieldInfo(mergeFieldInfo.name);
+                if (readerFieldInfo != null
+                    && readerFieldInfo.getDocValuesType() == DocValuesType.NUMERIC) {
+                  values = docValuesProducer.getNumeric(readerFieldInfo);
+                }
+              }
+              if (values != null) {
+                cost += values.cost();
+                subs.add(new NumericDocValuesSub(mergeState.docMaps[i], values));
+              }
+            }
+
+            final DocIDMerger<NumericDocValuesSub> docIDMerger =
+                DocIDMerger.of(subs, mergeState.needsIndexSort);
+
+            final long finalCost = cost;
+
+            return new NumericDocValues() {
+              private int docID = -1;
+              private NumericDocValuesSub current;
+
+              @Override
+              public int docID() {
+                return docID;
+              }
+
+              @Override
+              public int nextDoc() throws IOException {
+                current = docIDMerger.next();
+                if (current == null) {
+                  docID = NO_MORE_DOCS;
+                } else {
+                  docID = current.mappedDocID;
+                }
+                return docID;
+              }
+
+              @Override
+              public int advance(int target) throws IOException {
+                throw new UnsupportedOperationException();
+              }
+
+              @Override
+              public boolean advanceExact(int target) throws IOException {
+                throw new UnsupportedOperationException();
+              }
+
+              @Override
+              public long cost() {
+                return finalCost;
+              }
+
+              @Override
+              public long longValue() throws IOException {
+                return current.values.longValue();
+              }
+            };
+          }
+        });
   }
-  
+
   /** Tracks state of one binary sub-reader that we are merging */
   private static class BinaryDocValuesSub extends DocIDMerger.Sub {
 
@@ -269,82 +276,86 @@ public abstract class DocValuesConsumer implements Closeable {
 
   /**
    * Merges the binary docvalues from <code>MergeState</code>.
-   * <p>
-   * The default implementation calls {@link #addBinaryField}, passing
-   * a DocValuesProducer that merges and filters deleted documents on the fly.
+   *
+   * <p>The default implementation calls {@link #addBinaryField}, passing a DocValuesProducer that
+   * merges and filters deleted documents on the fly.
    */
-  public void mergeBinaryField(FieldInfo mergeFieldInfo, final MergeState mergeState) throws IOException {
-    addBinaryField(mergeFieldInfo,
-                   new EmptyDocValuesProducer() {
-                     @Override
-                     public BinaryDocValues getBinary(FieldInfo fieldInfo) throws IOException {
-                       if (fieldInfo != mergeFieldInfo) {
-                         throw new IllegalArgumentException("wrong fieldInfo");
-                       }
-                   
-                       List<BinaryDocValuesSub> subs = new ArrayList<>();
-
-                       long cost = 0;
-                       for (int i=0;i<mergeState.docValuesProducers.length;i++) {
-                         BinaryDocValues values = null;
-                         DocValuesProducer docValuesProducer = mergeState.docValuesProducers[i];
-                         if (docValuesProducer != null) {
-                           FieldInfo readerFieldInfo = mergeState.fieldInfos[i].fieldInfo(mergeFieldInfo.name);
-                           if (readerFieldInfo != null && readerFieldInfo.getDocValuesType() == DocValuesType.BINARY) {
-                             values = docValuesProducer.getBinary(readerFieldInfo);
-                           }
-                         }
-                         if (values != null) {
-                           cost += values.cost();
-                           subs.add(new BinaryDocValuesSub(mergeState.docMaps[i], values));
-                         }
-                       }
-
-                       final DocIDMerger<BinaryDocValuesSub> docIDMerger = DocIDMerger.of(subs, mergeState.needsIndexSort);
-                       final long finalCost = cost;
-                       
-                       return new BinaryDocValues() {
-                         private BinaryDocValuesSub current;
-                         private int docID = -1;
-
-                         @Override
-                         public int docID() {
-                           return docID;
-                         }
-
-                         @Override
-                         public int nextDoc() throws IOException {
-                           current = docIDMerger.next();
-                           if (current == null) {
-                             docID = NO_MORE_DOCS;
-                           } else {
-                             docID = current.mappedDocID;
-                           }
-                           return docID;
-                         }
-
-                         @Override
-                         public int advance(int target) throws IOException {
-                           throw new UnsupportedOperationException();
-                         }
-
-                         @Override
-                         public boolean advanceExact(int target) throws IOException {
-                           throw new UnsupportedOperationException();
-                         }
-
-                         @Override
-                         public long cost() {
-                           return finalCost;
-                         }
-
-                         @Override
-                         public BytesRef binaryValue() throws IOException {
-                           return current.values.binaryValue();
-                         }
-                       };
-                     }
-                   });
+  public void mergeBinaryField(FieldInfo mergeFieldInfo, final MergeState mergeState)
+      throws IOException {
+    addBinaryField(
+        mergeFieldInfo,
+        new EmptyDocValuesProducer() {
+          @Override
+          public BinaryDocValues getBinary(FieldInfo fieldInfo) throws IOException {
+            if (fieldInfo != mergeFieldInfo) {
+              throw new IllegalArgumentException("wrong fieldInfo");
+            }
+
+            List<BinaryDocValuesSub> subs = new ArrayList<>();
+
+            long cost = 0;
+            for (int i = 0; i < mergeState.docValuesProducers.length; i++) {
+              BinaryDocValues values = null;
+              DocValuesProducer docValuesProducer = mergeState.docValuesProducers[i];
+              if (docValuesProducer != null) {
+                FieldInfo readerFieldInfo = mergeState.fieldInfos[i].fieldInfo(mergeFieldInfo.name);
+                if (readerFieldInfo != null
+                    && readerFieldInfo.getDocValuesType() == DocValuesType.BINARY) {
+                  values = docValuesProducer.getBinary(readerFieldInfo);
+                }
+              }
+              if (values != null) {
+                cost += values.cost();
+                subs.add(new BinaryDocValuesSub(mergeState.docMaps[i], values));
+              }
+            }
+
+            final DocIDMerger<BinaryDocValuesSub> docIDMerger =
+                DocIDMerger.of(subs, mergeState.needsIndexSort);
+            final long finalCost = cost;
+
+            return new BinaryDocValues() {
+              private BinaryDocValuesSub current;
+              private int docID = -1;
+
+              @Override
+              public int docID() {
+                return docID;
+              }
+
+              @Override
+              public int nextDoc() throws IOException {
+                current = docIDMerger.next();
+                if (current == null) {
+                  docID = NO_MORE_DOCS;
+                } else {
+                  docID = current.mappedDocID;
+                }
+                return docID;
+              }
+
+              @Override
+              public int advance(int target) throws IOException {
+                throw new UnsupportedOperationException();
+              }
+
+              @Override
+              public boolean advanceExact(int target) throws IOException {
+                throw new UnsupportedOperationException();
+              }
+
+              @Override
+              public long cost() {
+                return finalCost;
+              }
+
+              @Override
+              public BytesRef binaryValue() throws IOException {
+                return current.values.binaryValue();
+              }
+            };
+          }
+        });
   }
 
   /** Tracks state of one sorted numeric sub-reader that we are merging */
@@ -366,98 +377,102 @@ public abstract class DocValuesConsumer implements Closeable {
 
   /**
    * Merges the sorted docvalues from <code>toMerge</code>.
-   * <p>
-   * The default implementation calls {@link #addSortedNumericField}, passing
-   * iterables that filter deleted documents.
+   *
+   * <p>The default implementation calls {@link #addSortedNumericField}, passing iterables that
+   * filter deleted documents.
    */
-  public void mergeSortedNumericField(FieldInfo mergeFieldInfo, final MergeState mergeState) throws IOException {
-    
-    addSortedNumericField(mergeFieldInfo,
-                          new EmptyDocValuesProducer() {
-                            @Override
-                            public SortedNumericDocValues getSortedNumeric(FieldInfo fieldInfo) throws IOException {
-                              if (fieldInfo != mergeFieldInfo) {
-                                throw new IllegalArgumentException("wrong FieldInfo");
-                              }
-                              
-                              // We must make new iterators + DocIDMerger for each iterator:
-                              List<SortedNumericDocValuesSub> subs = new ArrayList<>();
-                              long cost = 0;
-                              for (int i=0;i<mergeState.docValuesProducers.length;i++) {
-                                DocValuesProducer docValuesProducer = mergeState.docValuesProducers[i];
-                                SortedNumericDocValues values = null;
-                                if (docValuesProducer != null) {
-                                  FieldInfo readerFieldInfo = mergeState.fieldInfos[i].fieldInfo(mergeFieldInfo.name);
-                                  if (readerFieldInfo != null && readerFieldInfo.getDocValuesType() == DocValuesType.SORTED_NUMERIC) {
-                                    values = docValuesProducer.getSortedNumeric(readerFieldInfo);
-                                  }
-                                }
-                                if (values == null) {
-                                  values = DocValues.emptySortedNumeric();
-                                }
-                                cost += values.cost();
-                                subs.add(new SortedNumericDocValuesSub(mergeState.docMaps[i], values));
-                              }
-
-                              final long finalCost = cost;
-
-                              final DocIDMerger<SortedNumericDocValuesSub> docIDMerger = DocIDMerger.of(subs, mergeState.needsIndexSort);
-
-                              return new SortedNumericDocValues() {
-
-                                private int docID = -1;
-                                private SortedNumericDocValuesSub currentSub;
-
-                                @Override
-                                public int docID() {
-                                  return docID;
-                                }
-                                
-                                @Override
-                                public int nextDoc() throws IOException {
-                                  currentSub = docIDMerger.next();
-                                  if (currentSub == null) {
-                                    docID = NO_MORE_DOCS;
-                                  } else {
-                                    docID = currentSub.mappedDocID;
-                                  }
-
-                                  return docID;
-                                }
-
-                                @Override
-                                public int advance(int target) throws IOException {
-                                  throw new UnsupportedOperationException();
-                                }
-
-                                @Override
-                                public boolean advanceExact(int target) throws IOException {
-                                  throw new UnsupportedOperationException();
-                                }
-
-                                @Override
-                                public int docValueCount() {
-                                  return currentSub.values.docValueCount();
-                                }
-
-                                @Override
-                                public long cost() {
-                                  return finalCost;
-                                }
-
-                                @Override
-                                public long nextValue() throws IOException {
-                                  return currentSub.values.nextValue();
-                                }
-                              };
-                            }
-                          });
+  public void mergeSortedNumericField(FieldInfo mergeFieldInfo, final MergeState mergeState)
+      throws IOException {
+
+    addSortedNumericField(
+        mergeFieldInfo,
+        new EmptyDocValuesProducer() {
+          @Override
+          public SortedNumericDocValues getSortedNumeric(FieldInfo fieldInfo) throws IOException {
+            if (fieldInfo != mergeFieldInfo) {
+              throw new IllegalArgumentException("wrong FieldInfo");
+            }
+
+            // We must make new iterators + DocIDMerger for each iterator:
+            List<SortedNumericDocValuesSub> subs = new ArrayList<>();
+            long cost = 0;
+            for (int i = 0; i < mergeState.docValuesProducers.length; i++) {
+              DocValuesProducer docValuesProducer = mergeState.docValuesProducers[i];
+              SortedNumericDocValues values = null;
+              if (docValuesProducer != null) {
+                FieldInfo readerFieldInfo = mergeState.fieldInfos[i].fieldInfo(mergeFieldInfo.name);
+                if (readerFieldInfo != null
+                    && readerFieldInfo.getDocValuesType() == DocValuesType.SORTED_NUMERIC) {
+                  values = docValuesProducer.getSortedNumeric(readerFieldInfo);
+                }
+              }
+              if (values == null) {
+                values = DocValues.emptySortedNumeric();
+              }
+              cost += values.cost();
+              subs.add(new SortedNumericDocValuesSub(mergeState.docMaps[i], values));
+            }
+
+            final long finalCost = cost;
+
+            final DocIDMerger<SortedNumericDocValuesSub> docIDMerger =
+                DocIDMerger.of(subs, mergeState.needsIndexSort);
+
+            return new SortedNumericDocValues() {
+
+              private int docID = -1;
+              private SortedNumericDocValuesSub currentSub;
+
+              @Override
+              public int docID() {
+                return docID;
+              }
+
+              @Override
+              public int nextDoc() throws IOException {
+                currentSub = docIDMerger.next();
+                if (currentSub == null) {
+                  docID = NO_MORE_DOCS;
+                } else {
+                  docID = currentSub.mappedDocID;
+                }
+
+                return docID;
+              }
+
+              @Override
+              public int advance(int target) throws IOException {
+                throw new UnsupportedOperationException();
+              }
+
+              @Override
+              public boolean advanceExact(int target) throws IOException {
+                throw new UnsupportedOperationException();
+              }
+
+              @Override
+              public int docValueCount() {
+                return currentSub.values.docValueCount();
+              }
+
+              @Override
+              public long cost() {
+                return finalCost;
+              }
+
+              @Override
+              public long nextValue() throws IOException {
+                return currentSub.values.nextValue();
+              }
+            };
+          }
+        });
   }
 
   /**
-   * A merged {@link TermsEnum}. This helps avoid relying on the default terms enum,
-   * which calls {@link SortedDocValues#lookupOrd(int)} or
-   * {@link SortedSetDocValues#lookupOrd(long)} on every call to {@link TermsEnum#next()}.
+   * A merged {@link TermsEnum}. This helps avoid relying on the default terms enum, which calls
+   * {@link SortedDocValues#lookupOrd(int)} or {@link SortedSetDocValues#lookupOrd(long)} on every
+   * call to {@link TermsEnum#next()}.
    */
   private static class MergedTermsEnum extends TermsEnum {
 
@@ -547,7 +562,6 @@ public abstract class DocValuesConsumer implements Closeable {
     public TermState termState() throws IOException {
       throw new UnsupportedOperationException();
     }
-
   }
 
   /** Tracks state of one sorted sub-reader that we are merging */
@@ -555,7 +569,7 @@ public abstract class DocValuesConsumer implements Closeable {
 
     final SortedDocValues values;
     final LongValues map;
-    
+
     public SortedDocValuesSub(MergeState.DocMap docMap, SortedDocValues values, LongValues map) {
       super(docMap);
       this.values = values;
@@ -571,13 +585,14 @@ public abstract class DocValuesConsumer implements Closeable {
 
   /**
    * Merges the sorted docvalues from <code>toMerge</code>.
-   * <p>
-   * The default implementation calls {@link #addSortedField}, passing
-   * an Iterable that merges ordinals and values and filters deleted documents .
+   *
+   * <p>The default implementation calls {@link #addSortedField}, passing an Iterable that merges
+   * ordinals and values and filters deleted documents .
    */
-  public void mergeSortedField(FieldInfo fieldInfo, final MergeState mergeState) throws IOException {
+  public void mergeSortedField(FieldInfo fieldInfo, final MergeState mergeState)
+      throws IOException {
     List<SortedDocValues> toMerge = new ArrayList<>();
-    for (int i=0;i<mergeState.docValuesProducers.length;i++) {
+    for (int i = 0; i < mergeState.docValuesProducers.length; i++) {
       SortedDocValues values = null;
       DocValuesProducer docValuesProducer = mergeState.docValuesProducers[i];
       if (docValuesProducer != null) {
@@ -594,11 +609,11 @@ public abstract class DocValuesConsumer implements Closeable {
 
     final int numReaders = toMerge.size();
     final SortedDocValues dvs[] = toMerge.toArray(new SortedDocValues[numReaders]);
-    
+
     // step 1: iterate thru each sub and mark terms still in use
     TermsEnum liveTerms[] = new TermsEnum[dvs.length];
     long[] weights = new long[liveTerms.length];
-    for (int sub=0;sub<numReaders;sub++) {
+    for (int sub = 0; sub < numReaders; sub++) {
       SortedDocValues dv = dvs[sub];
       Bits liveDocs = mergeState.liveDocs[sub];
       if (liveDocs == null) {
@@ -619,118 +634,122 @@ public abstract class DocValuesConsumer implements Closeable {
         weights[sub] = bitset.cardinality();
       }
     }
-    
+
     // step 2: create ordinal map (this conceptually does the "merging")
     final OrdinalMap map = OrdinalMap.build(null, liveTerms, weights, PackedInts.COMPACT);
-    
+
     // step 3: add field
-    addSortedField(fieldInfo,
-                   new EmptyDocValuesProducer() {
-                     @Override
-                     public SortedDocValues getSorted(FieldInfo fieldInfoIn) throws IOException {
-                       if (fieldInfoIn != fieldInfo) {
-                         throw new IllegalArgumentException("wrong FieldInfo");
-                       }
-
-                       // We must make new iterators + DocIDMerger for each iterator:
-
-                       List<SortedDocValuesSub> subs = new ArrayList<>();
-                       long cost = 0;
-                       for (int i=0;i<mergeState.docValuesProducers.length;i++) {
-                         SortedDocValues values = null;
-                         DocValuesProducer docValuesProducer = mergeState.docValuesProducers[i];
-                         if (docValuesProducer != null) {
-                           FieldInfo readerFieldInfo = mergeState.fieldInfos[i].fieldInfo(fieldInfo.name);
-                           if (readerFieldInfo != null && readerFieldInfo.getDocValuesType() == DocValuesType.SORTED) {
-                             values = docValuesProducer.getSorted(readerFieldInfo);
-                           }
-                         }
-                         if (values == null) {
-                           values = DocValues.emptySorted();
-                         }
-                         cost += values.cost();
-                         
-                         subs.add(new SortedDocValuesSub(mergeState.docMaps[i], values, map.getGlobalOrds(i)));
-                       }
-
-                       final long finalCost = cost;
-
-                       final DocIDMerger<SortedDocValuesSub> docIDMerger = DocIDMerger.of(subs, mergeState.needsIndexSort);
-                       
-                       return new SortedDocValues() {
-                         private int docID = -1;
-                         private int ord;
-
-                         @Override
-                         public int docID() {
-                           return docID;
-                         }
-
-                         @Override
-                         public int nextDoc() throws IOException {
-                           SortedDocValuesSub sub = docIDMerger.next();
-                           if (sub == null) {
-                             return docID = NO_MORE_DOCS;
-                           }
-                           int subOrd = sub.values.ordValue();
-                           assert subOrd != -1;
-                           ord = (int) sub.map.get(subOrd);
-                           docID = sub.mappedDocID;
-                           return docID;
-                         }
-
-                         @Override
-                         public int ordValue() {
-                           return ord;
-                         }
-                         
-                         @Override
-                         public int advance(int target) {
-                           throw new UnsupportedOperationException();
-                         }
-
-                         @Override
-                         public boolean advanceExact(int target) throws IOException {
-                           throw new UnsupportedOperationException();
-                         }
-
-                         @Override
-                         public long cost() {
-                           return finalCost;
-                         }
-
-                         @Override
-                         public int getValueCount() {
-                           return (int) map.getValueCount();
-                         }
-                         
-                         @Override
-                         public BytesRef lookupOrd(int ord) throws IOException {
-                           int segmentNumber = map.getFirstSegmentNumber(ord);
-                           int segmentOrd = (int) map.getFirstSegmentOrd(ord);
-                           return dvs[segmentNumber].lookupOrd(segmentOrd);
-                         }
-
-                         @Override
-                         public TermsEnum termsEnum() throws IOException {
-                           TermsEnum[] subs = new TermsEnum[toMerge.size()];
-                           for (int sub = 0; sub < subs.length; ++sub) {
-                             subs[sub] = toMerge.get(sub).termsEnum();
-                           }
-                           return new MergedTermsEnum(map, subs);
-                         }
-                       };
-                     }
-                   });
+    addSortedField(
+        fieldInfo,
+        new EmptyDocValuesProducer() {
+          @Override
+          public SortedDocValues getSorted(FieldInfo fieldInfoIn) throws IOException {
... 304282 lines suppressed ...


[lucene-solr] 02/02: LUCENE-9570: code reformatting [record rev].

Posted by dw...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

dweiss pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git

commit 8c234b28791db246068ed3f3f43b7acf83f2c731
Author: Dawid Weiss <da...@carrotsearch.com>
AuthorDate: Wed Dec 23 12:41:50 2020 +0100

    LUCENE-9570: code reformatting [record rev].
---
 .git-blame-ignore-revs | 1 +
 1 file changed, 1 insertion(+)

diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
index ad6485a..34b1522 100644
--- a/.git-blame-ignore-revs
+++ b/.git-blame-ignore-revs
@@ -1,2 +1,3 @@
 # LUCENE-9570 [google java formatter applied]
 6faa4f98e04
+2d6ad2fee6d
\ No newline at end of file